hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
2eec3512251e4dc49efd43ee996fd60a991d1c9c.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
int *arr2;
int result2;
hipMalloc(&arr2,size);
hipMemcpy(arr2,a,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( std_dev), dim3(1),dim3(n/2), 0, 0, arr2,avg);
hipMemcpy(&result2,arr2,sizeof(int),hipMemcpyDeviceToHost);
cout<<"****"<<result2;
float result3 = result2/n;
result3 = sqrt(result3);
cout<<endl;
cout<<"The standard deviation is :- "<<result3;
hipFree(arr1);
hipFree(arr2);
__global__ void std_dev(int *input,int a)
{
int step_size = 1;
int tid = threadIdx.x;
int num_threads = blockDim.x;
while(num_threads > 0)
{
if(tid < num_threads)
{
int first = tid*step_size*2;
int second = first+step_size;
input[first] = ((input[first]-a) * (input[first]-a));
input[second] = ((input[second]- a) * (input[second] - a));
input[first] += input[second];
}
step_size *= 2;
num_threads /= 2;
}
}
|
2eec3512251e4dc49efd43ee996fd60a991d1c9c.cu
|
int *arr2;
int result2;
cudaMalloc(&arr2,size);
cudaMemcpy(arr2,a,size,cudaMemcpyHostToDevice);
std_dev<<<1,n/2>>>(arr2,avg);
cudaMemcpy(&result2,arr2,sizeof(int),cudaMemcpyDeviceToHost);
cout<<"****"<<result2;
float result3 = result2/n;
result3 = sqrt(result3);
cout<<endl;
cout<<"The standard deviation is :- "<<result3;
cudaFree(arr1);
cudaFree(arr2);
__global__ void std_dev(int *input,int a)
{
int step_size = 1;
int tid = threadIdx.x;
int num_threads = blockDim.x;
while(num_threads > 0)
{
if(tid < num_threads)
{
int first = tid*step_size*2;
int second = first+step_size;
input[first] = ((input[first]-a) * (input[first]-a));
input[second] = ((input[second]- a) * (input[second] - a));
input[first] += input[second];
}
step_size *= 2;
num_threads /= 2;
}
}
|
9c0c0a17a71da7026f4b0458fb005df91a0c0899.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file indexing_op.cu
* \brief
* \author Siyi Li, Chi Zhang
*/
#include "./indexing_op.h"
#include "./util/tensor_util-inl.cuh"
namespace mxnet {
namespace op {
/*! \brief If there are out-of-bound indices, out will be assigned to 1.
*/
struct is_valid_check {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, int32_t* out, const DType* data,
const DType min, const DType max) {
if (data[i] < min || data[i] > max) *out = 1;
}
};
struct AddTakeGradRspGPUKernel {
template<typename DType, typename IType>
__device__ __forceinline__ static void Map(int tid,
DType* out,
const nnvm::dim_t* prefix_sum,
const IType* data,
const DType* ograd,
const nnvm::dim_t row_length) {
using nnvm::dim_t;
const dim_t data_i = tid / row_length;
const dim_t grad_i = tid % row_length;
const dim_t irow = static_cast<dim_t>(data[data_i]);
const dim_t rsp_row = prefix_sum[irow] - 1;
const DType val = ograd[data_i * row_length + grad_i];
atomicAdd(static_cast<DType *>(&(out[rsp_row*row_length+grad_i])), val);
}
};
template<>
void SparseEmbeddingOpForwardRspImpl<gpu>(mshadow::Stream<gpu>* s,
const TBlob& data,
const NDArray& weight,
const OpReqType req,
const TBlob& output) {
if (req == kNullOp) return;
using namespace rowsparse;
using namespace mxnet_op;
// zeros weight
if (req == kWriteTo && !weight.storage_initialized()) {
size_t out_size = output.shape_.Size();
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
Fill<false>(s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size),
gpu::kDevMask), kWriteTo, 0);
})
return;
}
// check out-of-bound indices
int32_t is_valid = 0;
MSHADOW_TYPE_SWITCH(data.type_flag_, DType, {
DType min = 0;
DType max = static_cast<DType>(weight.shape()[0] - 1);
DType* data_ptr = data.dptr<DType>();
size_t data_size = data.shape_.Size();
int32_t* is_valid_ptr = NULL;
CUDA_CALL(hipMalloc(&is_valid_ptr, sizeof(int32_t)));
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max);
CUDA_CALL(hipMemcpy(&is_valid, is_valid_ptr, sizeof(int32_t),
hipMemcpyDeviceToHost));
})
CHECK_EQ(is_valid, 0) << "SparseEmbedding input contains data out of bound";
// the weight is actually dense
if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) {
EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output);
} else {
EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output);
}
}
template<>
inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow::expr;
using namespace rowsparse;
using nnvm::dim_t;
if (req == kNullOp) return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
// Request temporary storage for marking non-zero rows and prefix sum
Stream<gpu> *s = ctx.get_stream<gpu>();
dim_t num_rows = output.shape()[0];
dim_t row_length = output.shape()[1];
dim_t data_size = static_cast<dim_t>(data.shape_.Size());
dim_t num_threads;
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, {
dim_t* prefix_sum = NULL;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(dim_t) +
temp_storage_bytes), s);
prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows*sizeof(dim_t);
num_threads = num_rows;
Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0);
Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>());
hipcub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(hipMemcpy(&nnr, &prefix_sum[num_rows-1], sizeof(dim_t),
hipMemcpyDeviceToHost));
if (nnr == 0) {
FillZerosRspImpl(s, output);
return;
}
output.CheckAndAlloc({Shape1(nnr)});
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
// fill row_idx array of output matrix, using the row_flg values
Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows,
grad_row_idx, prefix_sum, num_rows);
// prefill with zeros
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask),
kWriteTo, 0);
// add the final gradients
num_threads = row_length * data_size;
Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s, num_threads, grad_data, prefix_sum,
data.dptr<IType>(), ograd.dptr<DType>(), row_length);
});
});
});
}
struct backward_gather_nd_gpu {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, int N, int M, int K,
const mshadow::Shape<10> strides,
DType* out, const DType* data,
const IType* indices) {
int offset = 0;
for (int j = 0; j < M; ++j) {
offset += strides[j] * static_cast<int>(indices[j*N + i]);
}
for (int j = 0; j < K; ++j) {
atomicAdd(out + (offset + j), data[i * K + j]);
}
}
};
template<typename DType, typename IType>
inline void GatherNDBackwardImpl(int N, int M, int K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices,
mshadow::Stream<gpu> *s) {
mxnet_op::Kernel<backward_gather_nd_gpu, gpu>::Launch(s, N, N, M, K, strides, out, data, indices);
}
NNVM_REGISTER_OP(Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>);
NNVM_REGISTER_OP(_contrib_SparseEmbedding)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpForwardEx<gpu>);
NNVM_REGISTER_OP(_backward_Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>);
NNVM_REGISTER_OP(_backward_SparseEmbedding)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(take)
.set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>);
NNVM_REGISTER_OP(_backward_take)
.set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>);
NNVM_REGISTER_OP(batch_take)
.set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>);
NNVM_REGISTER_OP(one_hot)
.set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>);
NNVM_REGISTER_OP(gather_nd)
.set_attr<FCompute>("FCompute<gpu>", GatherNDForward<gpu>);
NNVM_REGISTER_OP(scatter_nd)
.set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>);
NNVM_REGISTER_OP(_backward_gather_nd)
.set_attr<FCompute>("FCompute<gpu>", GatherNDBackward<gpu>);
NNVM_REGISTER_OP(_scatter_set_nd)
.set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>);
} // namespace op
} // namespace mxnet
|
9c0c0a17a71da7026f4b0458fb005df91a0c0899.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* Copyright (c) 2017 by Contributors
* \file indexing_op.cu
* \brief
* \author Siyi Li, Chi Zhang
*/
#include "./indexing_op.h"
#include "./util/tensor_util-inl.cuh"
namespace mxnet {
namespace op {
/*! \brief If there are out-of-bound indices, out will be assigned to 1.
*/
struct is_valid_check {
template<typename DType>
MSHADOW_XINLINE static void Map(int i, int32_t* out, const DType* data,
const DType min, const DType max) {
if (data[i] < min || data[i] > max) *out = 1;
}
};
struct AddTakeGradRspGPUKernel {
template<typename DType, typename IType>
__device__ __forceinline__ static void Map(int tid,
DType* out,
const nnvm::dim_t* prefix_sum,
const IType* data,
const DType* ograd,
const nnvm::dim_t row_length) {
using nnvm::dim_t;
const dim_t data_i = tid / row_length;
const dim_t grad_i = tid % row_length;
const dim_t irow = static_cast<dim_t>(data[data_i]);
const dim_t rsp_row = prefix_sum[irow] - 1;
const DType val = ograd[data_i * row_length + grad_i];
atomicAdd(static_cast<DType *>(&(out[rsp_row*row_length+grad_i])), val);
}
};
template<>
void SparseEmbeddingOpForwardRspImpl<gpu>(mshadow::Stream<gpu>* s,
const TBlob& data,
const NDArray& weight,
const OpReqType req,
const TBlob& output) {
if (req == kNullOp) return;
using namespace rowsparse;
using namespace mxnet_op;
// zeros weight
if (req == kWriteTo && !weight.storage_initialized()) {
size_t out_size = output.shape_.Size();
MSHADOW_TYPE_SWITCH(output.type_flag_, DType, {
Fill<false>(s, TBlob(output.dptr<DType>(), mshadow::Shape1(out_size),
gpu::kDevMask), kWriteTo, 0);
})
return;
}
// check out-of-bound indices
int32_t is_valid = 0;
MSHADOW_TYPE_SWITCH(data.type_flag_, DType, {
DType min = 0;
DType max = static_cast<DType>(weight.shape()[0] - 1);
DType* data_ptr = data.dptr<DType>();
size_t data_size = data.shape_.Size();
int32_t* is_valid_ptr = NULL;
CUDA_CALL(cudaMalloc(&is_valid_ptr, sizeof(int32_t)));
Kernel<set_zero, gpu>::Launch(s, 1, is_valid_ptr);
Kernel<is_valid_check, gpu>::Launch(s, data_size, is_valid_ptr, data_ptr, min, max);
CUDA_CALL(cudaMemcpy(&is_valid, is_valid_ptr, sizeof(int32_t),
cudaMemcpyDeviceToHost));
})
CHECK_EQ(is_valid, 0) << "SparseEmbedding input contains data out of bound";
// the weight is actually dense
if (weight.aux_shape(kIdx)[0] == weight.shape()[0]) {
EmbeddingOpForwardDnsImpl<gpu>(s, data, weight.data(), req, output);
} else {
EmbeddingOpForwardRspImpl<gpu>(s, data, weight, req, output);
}
}
template<>
inline void SparseEmbeddingOpBackwardRspImpl<gpu>(const OpContext& ctx,
const TBlob& ograd,
const TBlob& data,
const OpReqType req,
const NDArray& output) {
using namespace mshadow;
using namespace mxnet_op;
using namespace mshadow::expr;
using namespace rowsparse;
using nnvm::dim_t;
if (req == kNullOp) return;
CHECK_EQ(req, kWriteTo) << "SparseEmbedding layer doesn't support "
<< "weight gradient calculation with req != write";
// Request temporary storage for marking non-zero rows and prefix sum
Stream<gpu> *s = ctx.get_stream<gpu>();
dim_t num_rows = output.shape()[0];
dim_t row_length = output.shape()[1];
dim_t data_size = static_cast<dim_t>(data.shape_.Size());
dim_t num_threads;
MSHADOW_TYPE_SWITCH(data.type_flag_, IType, {
MSHADOW_SGL_DBL_TYPE_SWITCH(ograd.type_flag_, DType, {
MSHADOW_IDX_TYPE_SWITCH(output.aux_type(kIdx), RType, {
dim_t* prefix_sum = NULL;
void* d_temp_storage = NULL;
size_t temp_storage_bytes = 0;
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
Stream<gpu>::GetStream(s));
Tensor<gpu, 1, char> workspace = ctx.requested[0]
.get_space_typed<gpu, 1, char>(Shape1(num_rows * sizeof(dim_t) +
temp_storage_bytes), s);
prefix_sum = reinterpret_cast<dim_t*>(workspace.dptr_);
d_temp_storage = workspace.dptr_ + num_rows*sizeof(dim_t);
num_threads = num_rows;
Fill<false>(s, TBlob(prefix_sum, Shape1(num_threads), gpu::kDevMask), kWriteTo, 0);
Kernel<MarkRowFlgKernel, gpu>::Launch(s, data_size, prefix_sum, data.dptr<IType>());
cub::DeviceScan::InclusiveSum(d_temp_storage,
temp_storage_bytes,
prefix_sum,
prefix_sum,
num_rows,
mshadow::Stream<gpu>::GetStream(s));
dim_t nnr = 0;
CUDA_CALL(cudaMemcpy(&nnr, &prefix_sum[num_rows-1], sizeof(dim_t),
cudaMemcpyDeviceToHost));
if (nnr == 0) {
FillZerosRspImpl(s, output);
return;
}
output.CheckAndAlloc({Shape1(nnr)});
RType* grad_row_idx = output.aux_data(kIdx).dptr<RType>();
// fill row_idx array of output matrix, using the row_flg values
Kernel<FillRspRowIdxKernel, gpu>::Launch(s, num_rows,
grad_row_idx, prefix_sum, num_rows);
// prefill with zeros
DType* grad_data = output.data().dptr<DType>();
Fill<false>(s, TBlob(grad_data, Shape1(nnr * row_length), gpu::kDevMask),
kWriteTo, 0);
// add the final gradients
num_threads = row_length * data_size;
Kernel<AddTakeGradRspGPUKernel, gpu>::Launch(s, num_threads, grad_data, prefix_sum,
data.dptr<IType>(), ograd.dptr<DType>(), row_length);
});
});
});
}
struct backward_gather_nd_gpu {
template<typename DType, typename IType>
MSHADOW_XINLINE static void Map(int i, int N, int M, int K,
const mshadow::Shape<10> strides,
DType* out, const DType* data,
const IType* indices) {
int offset = 0;
for (int j = 0; j < M; ++j) {
offset += strides[j] * static_cast<int>(indices[j*N + i]);
}
for (int j = 0; j < K; ++j) {
atomicAdd(out + (offset + j), data[i * K + j]);
}
}
};
template<typename DType, typename IType>
inline void GatherNDBackwardImpl(int N, int M, int K,
const mshadow::Shape<10> strides,
DType* out,
const DType* data,
const IType* indices,
mshadow::Stream<gpu> *s) {
mxnet_op::Kernel<backward_gather_nd_gpu, gpu>::Launch(s, N, N, M, K, strides, out, data, indices);
}
NNVM_REGISTER_OP(Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpForward<gpu>);
NNVM_REGISTER_OP(_contrib_SparseEmbedding)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpForwardEx<gpu>);
NNVM_REGISTER_OP(_backward_Embedding)
.set_attr<FCompute>("FCompute<gpu>", EmbeddingOpBackward<gpu>);
NNVM_REGISTER_OP(_backward_SparseEmbedding)
.set_attr<FComputeEx>("FComputeEx<gpu>", SparseEmbeddingOpBackwardEx<gpu>);
NNVM_REGISTER_OP(take)
.set_attr<FCompute>("FCompute<gpu>", TakeOpForward<gpu>);
NNVM_REGISTER_OP(_backward_take)
.set_attr<FCompute>("FCompute<gpu>", TakeOpBackward<gpu>);
NNVM_REGISTER_OP(batch_take)
.set_attr<FCompute>("FCompute<gpu>", BatchTakeOpForward<gpu>);
NNVM_REGISTER_OP(one_hot)
.set_attr<FCompute>("FCompute<gpu>", OneHotOpForward<gpu>);
NNVM_REGISTER_OP(gather_nd)
.set_attr<FCompute>("FCompute<gpu>", GatherNDForward<gpu>);
NNVM_REGISTER_OP(scatter_nd)
.set_attr<FCompute>("FCompute<gpu>", ScatterNDForward<gpu>);
NNVM_REGISTER_OP(_backward_gather_nd)
.set_attr<FCompute>("FCompute<gpu>", GatherNDBackward<gpu>);
NNVM_REGISTER_OP(_scatter_set_nd)
.set_attr<FCompute>("FCompute<gpu>", ScatterSetNDForward<gpu>);
} // namespace op
} // namespace mxnet
|
679768e9af9177f4fd19f939803ea76633fda639.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hiprtc.h"
#ifndef CMAKE_CUDA_TOOLKIT_IS_SYSTEM
# error "Failed to specify the CUDA Toolkit includes as system"
#endif
|
679768e9af9177f4fd19f939803ea76633fda639.cu
|
#include "nvrtc.h"
#ifndef CMAKE_CUDA_TOOLKIT_IS_SYSTEM
# error "Failed to specify the CUDA Toolkit includes as system"
#endif
|
be5ce3cb756ad76ca9cdb314d846fa8897eb2211.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "../common/point_cloud.h"
#include "transform.h"
#include <hip/hip_runtime_api.h>
#include <device_launch_parameters.h>
#include <math.h>
constexpr int THREADS_PER_BLOCK = 512;
__global__ void shifting(PointXYZI *d_points, double *d_shift, size_t size) {
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
d_points[idx].x += d_shift[0];
d_points[idx].y += d_shift[1];
d_points[idx].z += d_shift[2];
}
}
__global__ void rotate(PointXYZI *d_points, Quaternion *d_quaternion, size_t size) {
long idx = blockIdx.x * blockDim.x + threadIdx.x;
Quaternion q(d_points[idx]);
Quaternion q_prime = *d_quaternion * q * d_quaternion->inverse();
d_points[idx].x = q_prime.x;
d_points[idx].y = q_prime.y;
d_points[idx].z = q_prime.z;
}
PointCloud<PointXYZI> shift_points(PointCloud<PointXYZI> &h_cloud, std::vector<double> shift) {
PointXYZI *d_points;
double *d_shift;
PointXYZI *h_points = &(h_cloud.points[0]);
double *h_shift = &(shift[0]);
hipMalloc((void**) &d_points, h_cloud.points.size() * sizeof(PointXYZI));
hipMalloc((void**) &d_shift, shift.size() * sizeof(double));
hipMemcpy(d_points, h_points, h_cloud.points.size() * sizeof(PointXYZI), hipMemcpyHostToDevice);
hipMemcpy(d_shift, h_shift, shift.size() * sizeof(double), hipMemcpyHostToDevice);
long N = h_cloud.points.size();
int NUM_BLOCKS = ceil(float(N) / THREADS_PER_BLOCK);
hipLaunchKernelGGL(( shifting), dim3(NUM_BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, d_points, d_shift, N);
hipDeviceSynchronize();
hipMemcpy(h_points, d_points, h_cloud.points.size() * sizeof(PointXYZI), hipMemcpyDeviceToHost);
hipMemcpy(h_shift, d_shift, shift.size() * sizeof(double), hipMemcpyDeviceToHost);
hipFree(d_points);
hipFree(d_shift);
return h_cloud;
}
PointCloud<PointXYZI> rotate_points(PointCloud<PointXYZI> &h_cloud, Quaternion &h_quaternion){
PointXYZI *d_points;
PointXYZI *h_points = &(h_cloud.points[0]);
Quaternion *d_quaternion;
hipMalloc((void**) &d_points, h_cloud.points.size() * sizeof(PointXYZI));
hipMalloc((void**) &d_quaternion, sizeof(Quaternion));
hipMemcpy(d_points, h_points, h_cloud.points.size() * sizeof(PointXYZI), hipMemcpyHostToDevice);
hipMemcpy(d_quaternion, &h_quaternion, sizeof(Quaternion), hipMemcpyHostToDevice);
long N = h_cloud.points.size();
int NUM_BLOCKS = ceil(float(N) / THREADS_PER_BLOCK);
hipLaunchKernelGGL(( rotate), dim3(NUM_BLOCKS),dim3(THREADS_PER_BLOCK), 0, 0, d_points, d_quaternion, N);
hipDeviceSynchronize();
hipMemcpy(h_points, d_points, h_cloud.points.size() * sizeof(PointXYZI), hipMemcpyDeviceToHost);
hipFree(d_points);
hipFree(d_quaternion);
return h_cloud;
}
|
be5ce3cb756ad76ca9cdb314d846fa8897eb2211.cu
|
#include "../common/point_cloud.h"
#include "transform.h"
#include <cuda_runtime_api.h>
#include <device_launch_parameters.h>
#include <math.h>
constexpr int THREADS_PER_BLOCK = 512;
__global__ void shifting(PointXYZI *d_points, double *d_shift, size_t size) {
long idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < size) {
d_points[idx].x += d_shift[0];
d_points[idx].y += d_shift[1];
d_points[idx].z += d_shift[2];
}
}
__global__ void rotate(PointXYZI *d_points, Quaternion *d_quaternion, size_t size) {
long idx = blockIdx.x * blockDim.x + threadIdx.x;
Quaternion q(d_points[idx]);
Quaternion q_prime = *d_quaternion * q * d_quaternion->inverse();
d_points[idx].x = q_prime.x;
d_points[idx].y = q_prime.y;
d_points[idx].z = q_prime.z;
}
PointCloud<PointXYZI> shift_points(PointCloud<PointXYZI> &h_cloud, std::vector<double> shift) {
PointXYZI *d_points;
double *d_shift;
PointXYZI *h_points = &(h_cloud.points[0]);
double *h_shift = &(shift[0]);
cudaMalloc((void**) &d_points, h_cloud.points.size() * sizeof(PointXYZI));
cudaMalloc((void**) &d_shift, shift.size() * sizeof(double));
cudaMemcpy(d_points, h_points, h_cloud.points.size() * sizeof(PointXYZI), cudaMemcpyHostToDevice);
cudaMemcpy(d_shift, h_shift, shift.size() * sizeof(double), cudaMemcpyHostToDevice);
long N = h_cloud.points.size();
int NUM_BLOCKS = ceil(float(N) / THREADS_PER_BLOCK);
shifting<<<NUM_BLOCKS,THREADS_PER_BLOCK>>>(d_points, d_shift, N);
cudaDeviceSynchronize();
cudaMemcpy(h_points, d_points, h_cloud.points.size() * sizeof(PointXYZI), cudaMemcpyDeviceToHost);
cudaMemcpy(h_shift, d_shift, shift.size() * sizeof(double), cudaMemcpyDeviceToHost);
cudaFree(d_points);
cudaFree(d_shift);
return h_cloud;
}
PointCloud<PointXYZI> rotate_points(PointCloud<PointXYZI> &h_cloud, Quaternion &h_quaternion){
PointXYZI *d_points;
PointXYZI *h_points = &(h_cloud.points[0]);
Quaternion *d_quaternion;
cudaMalloc((void**) &d_points, h_cloud.points.size() * sizeof(PointXYZI));
cudaMalloc((void**) &d_quaternion, sizeof(Quaternion));
cudaMemcpy(d_points, h_points, h_cloud.points.size() * sizeof(PointXYZI), cudaMemcpyHostToDevice);
cudaMemcpy(d_quaternion, &h_quaternion, sizeof(Quaternion), cudaMemcpyHostToDevice);
long N = h_cloud.points.size();
int NUM_BLOCKS = ceil(float(N) / THREADS_PER_BLOCK);
rotate<<<NUM_BLOCKS,THREADS_PER_BLOCK>>>(d_points, d_quaternion, N);
cudaDeviceSynchronize();
cudaMemcpy(h_points, d_points, h_cloud.points.size() * sizeof(PointXYZI), cudaMemcpyDeviceToHost);
cudaFree(d_points);
cudaFree(d_quaternion);
return h_cloud;
}
|
d634d11d0b0db21e76c15a3bc04e88241991b49b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/layers/softmax_interpolation_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxInterpolationGPUForward(const int n,
const Dtype* bottom_data, const Dtype* interpolation_data,
const int softmax_dim, const int inner_dim,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, n) {
const int i = index / inner_dim;
const int j = index % inner_dim;
Dtype sum = 0;
for (int c = 0; c < softmax_dim; c++) {
sum += bottom_data[(i * softmax_dim + c) * inner_dim + j] *
interpolation_data[c];
}
top_data[index] = sum;
}
}
template <typename Dtype>
void SoftmaxInterpolationLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* interpolation_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int softmax_dim = bottom[1]->count();
const int top_count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( SoftmaxInterpolationGPUForward<Dtype>), dim3(CAFFE_GET_BLOCKS(top_count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, top_count, bottom_data, interpolation_data,
softmax_dim, inner_num_, top_data);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FORWARD(SoftmaxInterpolationLayer);
} // namespace caffe
|
d634d11d0b0db21e76c15a3bc04e88241991b49b.cu
|
#include <vector>
#include "caffe/layers/softmax_interpolation_layer.hpp"
namespace caffe {
template <typename Dtype>
__global__ void SoftmaxInterpolationGPUForward(const int n,
const Dtype* bottom_data, const Dtype* interpolation_data,
const int softmax_dim, const int inner_dim,
Dtype* top_data) {
CUDA_KERNEL_LOOP(index, n) {
const int i = index / inner_dim;
const int j = index % inner_dim;
Dtype sum = 0;
for (int c = 0; c < softmax_dim; c++) {
sum += bottom_data[(i * softmax_dim + c) * inner_dim + j] *
interpolation_data[c];
}
top_data[index] = sum;
}
}
template <typename Dtype>
void SoftmaxInterpolationLayer<Dtype>::Forward_gpu(
const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
const Dtype* interpolation_data = bottom[1]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
const int softmax_dim = bottom[1]->count();
const int top_count = top[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
SoftmaxInterpolationGPUForward<Dtype><<<CAFFE_GET_BLOCKS(top_count),
CAFFE_CUDA_NUM_THREADS>>>(top_count, bottom_data, interpolation_data,
softmax_dim, inner_num_, top_data);
CUDA_POST_KERNEL_CHECK;
}
INSTANTIATE_LAYER_GPU_FORWARD(SoftmaxInterpolationLayer);
} // namespace caffe
|
e0f9fc87575fc7a5a9638a68130c0e3c82197ff8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "bce_cost.hh"
#include "nn_exception.hh"
#include <math.h>
#include <iostream>
#include <assert.h>
__global__ void binaryCrossEntropyCost(float* predictions, float* target,
int size, float* cost) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
float partial_cost = target[index] * logf(predictions[index])
+ (1.0f - target[index]) * logf(1.0f - predictions[index]);
atomicAdd(cost, - partial_cost / size);
}
}
__global__ void dBinaryCrossEntropyCost(float* predictions, float* target, float* dY,
int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
dY[index] = -1.0 * ( target[index]/predictions[index] - (1 - target[index])/(1 - predictions[index]) );
}
}
float BCECost::cost(Matrix predictions, Matrix target) {
assert(predictions.shape.x == target.shape.x);
float* cost;
hipMallocManaged(&cost, sizeof(float));
*cost = 0.0f;
dim3 block_size(256);
dim3 num_of_blocks((predictions.shape.x + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( binaryCrossEntropyCost), dim3(num_of_blocks), dim3(block_size), 0, 0, predictions.data.get(),
target.data.get(),
predictions.shape.x, cost);
hipDeviceSynchronize();
NNException::throwIfDeviceErrorsOccurred("Cannot compute binary cross entropy cost.");
float cost_value = *cost;
hipFree(cost);
return cost_value;
}
Matrix BCECost::dCost(Matrix predictions, Matrix target, Matrix dY) {
assert(predictions.shape.x == target.shape.x);
dim3 block_size(256);
dim3 num_of_blocks((predictions.shape.x + block_size.x - 1) / block_size.x);
hipLaunchKernelGGL(( dBinaryCrossEntropyCost), dim3(num_of_blocks), dim3(block_size), 0, 0, predictions.data.get(),
target.data.get(),
dY.data.get(),
predictions.shape.x);
NNException::throwIfDeviceErrorsOccurred("Cannot compute derivative for binary cross entropy.");
return dY;
}
|
e0f9fc87575fc7a5a9638a68130c0e3c82197ff8.cu
|
#include "bce_cost.hh"
#include "nn_exception.hh"
#include <math.h>
#include <iostream>
#include <assert.h>
__global__ void binaryCrossEntropyCost(float* predictions, float* target,
int size, float* cost) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
float partial_cost = target[index] * logf(predictions[index])
+ (1.0f - target[index]) * logf(1.0f - predictions[index]);
atomicAdd(cost, - partial_cost / size);
}
}
__global__ void dBinaryCrossEntropyCost(float* predictions, float* target, float* dY,
int size) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
if (index < size) {
dY[index] = -1.0 * ( target[index]/predictions[index] - (1 - target[index])/(1 - predictions[index]) );
}
}
float BCECost::cost(Matrix predictions, Matrix target) {
assert(predictions.shape.x == target.shape.x);
float* cost;
cudaMallocManaged(&cost, sizeof(float));
*cost = 0.0f;
dim3 block_size(256);
dim3 num_of_blocks((predictions.shape.x + block_size.x - 1) / block_size.x);
binaryCrossEntropyCost<<<num_of_blocks, block_size>>>(predictions.data.get(),
target.data.get(),
predictions.shape.x, cost);
cudaDeviceSynchronize();
NNException::throwIfDeviceErrorsOccurred("Cannot compute binary cross entropy cost.");
float cost_value = *cost;
cudaFree(cost);
return cost_value;
}
Matrix BCECost::dCost(Matrix predictions, Matrix target, Matrix dY) {
assert(predictions.shape.x == target.shape.x);
dim3 block_size(256);
dim3 num_of_blocks((predictions.shape.x + block_size.x - 1) / block_size.x);
dBinaryCrossEntropyCost<<<num_of_blocks, block_size>>>(predictions.data.get(),
target.data.get(),
dY.data.get(),
predictions.shape.x);
NNException::throwIfDeviceErrorsOccurred("Cannot compute derivative for binary cross entropy.");
return dY;
}
|
b733a39cbdc00167e8a5881dc3c0b1359185721e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* BSD 2-Clause License
*
* Copyright (c) 2019, Christoph Neuhauser, Stefan Haas, Paul Ng
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstring>
#include <iostream>
#include "BoundaryValuesCuda.hpp"
#include "UvwCuda.hpp"
#include "SorSolverCuda.hpp"
#include "CfdSolverCuda.hpp"
#include "CudaDefines.hpp"
#include "../../Defines.hpp"
CfdSolverCuda::CfdSolverCuda(int gpuId, int blockSizeX, int blockSizeY, int blockSizeZ, int blockSize1D) {
this->gpuId = gpuId;
this->blockSizeX = blockSizeX;
this->blockSizeY = blockSizeY;
this->blockSizeZ = blockSizeZ;
this->blockSize1D = blockSize1D;
}
void CfdSolverCuda::initialize(
const std::string &scenarioName, LinearSystemSolverType linearSystemSolverType, bool shallWriteOutput,
Real Re, Real Pr, Real omg, Real eps, int itermax, Real alpha, Real beta, Real dt, Real tau,
Real GX, Real GY, Real GZ, bool useTemperature, Real T_h, Real T_c,
int imax, int jmax, int kmax, Real dx, Real dy, Real dz,
Real *U, Real *V, Real *W, Real *P, Real *T, uint32_t *Flag) {
this->scenarioName = scenarioName;
this->linearSystemSolverType = linearSystemSolverType;
this->shallWriteOutput = shallWriteOutput;
this->Re = Re;
this->Pr = Pr;
this->omg = omg;
this->eps = eps;
this->itermax = itermax;
this->alpha = alpha;
this->beta = beta;
this->dt = dt;
this->tau = tau;
this->GX = GX;
this->GY = GY;
this->GZ = GZ;
this->useTemperature = useTemperature;
this->T_h = T_h;
this->T_c = T_c;
this->imax = imax;
this->jmax = jmax;
this->kmax = kmax;
this->dx = dx;
this->dy = dy;
this->dz = dz;
int numDevices = 0;
hipGetDeviceCount(&numDevices);
if (numDevices == 0) {
std::cerr << "Fatal error in CfdSolverCuda::initialize: No CUDA devices were found." << std::endl;
exit(1);
}
if (gpuId >= numDevices) {
std::cerr << "Error in CfdSolverCuda::initialize: Invalid device ID specified. Setting device ID to 0."
<< std::endl;
gpuId = 0;
}
hipSetDevice(gpuId);
// Create all arrays for the simulation.
checkCudaError(hipMalloc(&this->U, (imax+1)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(hipMalloc(&this->V, (imax+2)*(jmax+1)*(kmax+2)*sizeof(Real)));
checkCudaError(hipMalloc(&this->W, (imax+2)*(jmax+2)*(kmax+1)*sizeof(Real)));
checkCudaError(hipMalloc(&this->P, (imax+2)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(hipMalloc(&this->P_temp, (imax+2)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(hipMalloc(&this->T, (imax+2)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(hipMalloc(&this->T_temp, (imax+2)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(hipMalloc(&this->F, (imax+1)*(jmax+1)*(kmax+1)*sizeof(Real)));
checkCudaError(hipMalloc(&this->G, (imax+1)*(jmax+1)*(kmax+1)*sizeof(Real)));
checkCudaError(hipMalloc(&this->H, (imax+1)*(jmax+1)*(kmax+1)*sizeof(Real)));
checkCudaError(hipMalloc(&this->RS, (imax+1)*(jmax+1)*(kmax+1)*sizeof(Real)));
checkCudaError(hipMalloc(&this->Flag, (imax+2)*(jmax+2)*(kmax+2)*sizeof(unsigned int)));
int cudaReductionArrayUSize = iceil((imax+1)*(jmax+2)*(kmax+2), blockSize1D*2);
int cudaReductionArrayVSize = iceil((imax+1)*(jmax+2)*(kmax+2), blockSize1D*2);
int cudaReductionArrayWSize = iceil((imax+1)*(jmax+2)*(kmax+2), blockSize1D*2);
int cudaReductionArrayResidualSize1 = iceil(imax*jmax*kmax, blockSize1D*2)*blockSize1D*2;
int cudaReductionArrayResidualSize2 = iceil(imax*jmax*kmax, blockSize1D*2);
checkCudaError(hipMalloc(&cudaReductionArrayU1, cudaReductionArrayUSize*sizeof(Real)));
checkCudaError(hipMalloc(&cudaReductionArrayU2, cudaReductionArrayUSize*sizeof(Real)));
checkCudaError(hipMalloc(&cudaReductionArrayV1, cudaReductionArrayVSize*sizeof(Real)));
checkCudaError(hipMalloc(&cudaReductionArrayV2, cudaReductionArrayVSize*sizeof(Real)));
checkCudaError(hipMalloc(&cudaReductionArrayW1, cudaReductionArrayWSize*sizeof(Real)));
checkCudaError(hipMalloc(&cudaReductionArrayW2, cudaReductionArrayWSize*sizeof(Real)));
checkCudaError(hipMalloc(
&cudaReductionArrayResidual1, cudaReductionArrayResidualSize1*sizeof(Real)));
checkCudaError(hipMalloc(
&cudaReductionArrayResidual2, cudaReductionArrayResidualSize2*sizeof(Real)));
checkCudaError(hipMalloc(
&cudaReductionArrayNumCells1, cudaReductionArrayResidualSize1*sizeof(unsigned int)));
checkCudaError(hipMalloc(
&cudaReductionArrayNumCells2, cudaReductionArrayResidualSize2*sizeof(unsigned int)));
// Copy the content of U, V, W, P, T and Flag to the internal representation.
checkCudaError(hipMemcpy(
this->U, U, sizeof(Real)*(imax+1)*(jmax+2)*(kmax+2), hipMemcpyHostToDevice));
checkCudaError(hipMemcpy(
this->V, V, sizeof(Real)*(imax+2)*(jmax+1)*(kmax+2), hipMemcpyHostToDevice));
checkCudaError(hipMemcpy(
this->W, W, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+1), hipMemcpyHostToDevice));
checkCudaError(hipMemcpy(
this->P, P, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+2), hipMemcpyHostToDevice));
checkCudaError(hipMemcpy(
this->T, T, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+2), hipMemcpyHostToDevice));
checkCudaError(hipMemcpy(
this->Flag, Flag, sizeof(unsigned int)*(imax+2)*(jmax+2)*(kmax+2), hipMemcpyHostToDevice));
}
CfdSolverCuda::~CfdSolverCuda() {
checkCudaError(hipFree(U));
checkCudaError(hipFree(V));
checkCudaError(hipFree(W));
checkCudaError(hipFree(P));
checkCudaError(hipFree(P_temp));
checkCudaError(hipFree(T));
checkCudaError(hipFree(T_temp));
checkCudaError(hipFree(F));
checkCudaError(hipFree(G));
checkCudaError(hipFree(H));
checkCudaError(hipFree(RS));
checkCudaError(hipFree(Flag));
checkCudaError(hipFree(cudaReductionArrayU1));
checkCudaError(hipFree(cudaReductionArrayU2));
checkCudaError(hipFree(cudaReductionArrayV1));
checkCudaError(hipFree(cudaReductionArrayV2));
checkCudaError(hipFree(cudaReductionArrayW1));
checkCudaError(hipFree(cudaReductionArrayW2));
checkCudaError(hipFree(cudaReductionArrayResidual1));
checkCudaError(hipFree(cudaReductionArrayResidual2));
checkCudaError(hipFree(cudaReductionArrayNumCells1));
checkCudaError(hipFree(cudaReductionArrayNumCells2));
}
void CfdSolverCuda::setBoundaryValues() {
setBoundaryValuesCuda(
T_h, T_c, imax, jmax, kmax, blockSizeX, blockSizeY, blockSizeZ, U, V, W, T, Flag);
}
void CfdSolverCuda::setBoundaryValuesScenarioSpecific() {
setBoundaryValuesScenarioSpecificCuda(
scenarioName, imax, jmax, kmax, blockSizeX, blockSizeY, blockSizeZ, U, V, W, Flag);
}
Real CfdSolverCuda::calculateDt() {
calculateDtCuda(
Re, Pr, tau, dt, dx, dy, dz, imax, jmax, kmax, blockSize1D, U, V, W,
cudaReductionArrayU1, cudaReductionArrayU2,
cudaReductionArrayV1, cudaReductionArrayV2,
cudaReductionArrayW1, cudaReductionArrayW2,
useTemperature);
return dt;
}
void CfdSolverCuda::calculateTemperature() {
Real *temp = T;
T = T_temp;
T_temp = temp;
dim3 dimBlock(blockSizeX, blockSizeY, blockSizeZ);
dim3 dimGrid(iceil(kmax,dimBlock.x),iceil(jmax,dimBlock.y),iceil(imax,dimBlock.z));
hipLaunchKernelGGL(( calculateTemperatureCuda), dim3(dimGrid),dim3(dimBlock), 0, 0,
Re, Pr, alpha, dt, dx, dy, dz, imax, jmax, kmax, U, V, W, T, T_temp, Flag);
}
void CfdSolverCuda::calculateFgh() {
calculateFghCuda(
Re, GX, GY, GZ, alpha, beta, dt, dx, dy, dz, imax, jmax, kmax,
blockSizeX, blockSizeY, blockSizeZ, U, V, W, T, F, G, H, Flag);
}
void CfdSolverCuda::calculateRs() {
dim3 dimBlock(blockSizeX, blockSizeY, blockSizeZ);
dim3 dimGrid(iceil(kmax,dimBlock.x),iceil(jmax,dimBlock.y),iceil(imax,dimBlock.z));
hipLaunchKernelGGL(( calculateRsCuda), dim3(dimGrid),dim3(dimBlock), 0, 0, dt, dx, dy, dz, imax, jmax, kmax, F, G, H, RS);
}
void CfdSolverCuda::executeSorSolver() {
sorSolverCuda(
omg, eps, itermax, linearSystemSolverType, shallWriteOutput,
dx, dy, dz, imax, jmax, kmax,
blockSizeX, blockSizeY, blockSizeZ, blockSize1D, P, P_temp, RS, Flag,
cudaReductionArrayResidual1, cudaReductionArrayResidual2,
cudaReductionArrayNumCells1, cudaReductionArrayNumCells2);
}
void CfdSolverCuda::calculateUvw() {
dim3 dimBlock(blockSizeX, blockSizeY, blockSizeZ);
dim3 dimGrid(iceil(kmax,dimBlock.x),iceil(jmax,dimBlock.y),iceil(imax,dimBlock.z));
hipLaunchKernelGGL(( calculateUvwCuda), dim3(dimGrid),dim3(dimBlock), 0, 0, dt, dx, dy, dz, imax, jmax, kmax, U, V, W, F, G, H, P, Flag);
}
void CfdSolverCuda::getDataForOutput(Real *U, Real *V, Real *W, Real *P, Real *T) {
// Copy the content of U, V, W, P, T in the internal representation to the specified output arrays.
checkCudaError(hipMemcpy(
U, this->U, sizeof(Real)*(imax+1)*(jmax+2)*(kmax+2), hipMemcpyDeviceToHost));
checkCudaError(hipMemcpy(
V, this->V, sizeof(Real)*(imax+2)*(jmax+1)*(kmax+2), hipMemcpyDeviceToHost));
checkCudaError(hipMemcpy(
W, this->W, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+1), hipMemcpyDeviceToHost));
checkCudaError(hipMemcpy(
P, this->P, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+2), hipMemcpyDeviceToHost));
checkCudaError(hipMemcpy(
T, this->T, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+2), hipMemcpyDeviceToHost));
}
|
b733a39cbdc00167e8a5881dc3c0b1359185721e.cu
|
/*
* BSD 2-Clause License
*
* Copyright (c) 2019, Christoph Neuhauser, Stefan Haas, Paul Ng
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cstring>
#include <iostream>
#include "BoundaryValuesCuda.hpp"
#include "UvwCuda.hpp"
#include "SorSolverCuda.hpp"
#include "CfdSolverCuda.hpp"
#include "CudaDefines.hpp"
#include "../../Defines.hpp"
CfdSolverCuda::CfdSolverCuda(int gpuId, int blockSizeX, int blockSizeY, int blockSizeZ, int blockSize1D) {
this->gpuId = gpuId;
this->blockSizeX = blockSizeX;
this->blockSizeY = blockSizeY;
this->blockSizeZ = blockSizeZ;
this->blockSize1D = blockSize1D;
}
void CfdSolverCuda::initialize(
const std::string &scenarioName, LinearSystemSolverType linearSystemSolverType, bool shallWriteOutput,
Real Re, Real Pr, Real omg, Real eps, int itermax, Real alpha, Real beta, Real dt, Real tau,
Real GX, Real GY, Real GZ, bool useTemperature, Real T_h, Real T_c,
int imax, int jmax, int kmax, Real dx, Real dy, Real dz,
Real *U, Real *V, Real *W, Real *P, Real *T, uint32_t *Flag) {
this->scenarioName = scenarioName;
this->linearSystemSolverType = linearSystemSolverType;
this->shallWriteOutput = shallWriteOutput;
this->Re = Re;
this->Pr = Pr;
this->omg = omg;
this->eps = eps;
this->itermax = itermax;
this->alpha = alpha;
this->beta = beta;
this->dt = dt;
this->tau = tau;
this->GX = GX;
this->GY = GY;
this->GZ = GZ;
this->useTemperature = useTemperature;
this->T_h = T_h;
this->T_c = T_c;
this->imax = imax;
this->jmax = jmax;
this->kmax = kmax;
this->dx = dx;
this->dy = dy;
this->dz = dz;
int numDevices = 0;
cudaGetDeviceCount(&numDevices);
if (numDevices == 0) {
std::cerr << "Fatal error in CfdSolverCuda::initialize: No CUDA devices were found." << std::endl;
exit(1);
}
if (gpuId >= numDevices) {
std::cerr << "Error in CfdSolverCuda::initialize: Invalid device ID specified. Setting device ID to 0."
<< std::endl;
gpuId = 0;
}
cudaSetDevice(gpuId);
// Create all arrays for the simulation.
checkCudaError(cudaMalloc(&this->U, (imax+1)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->V, (imax+2)*(jmax+1)*(kmax+2)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->W, (imax+2)*(jmax+2)*(kmax+1)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->P, (imax+2)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->P_temp, (imax+2)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->T, (imax+2)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->T_temp, (imax+2)*(jmax+2)*(kmax+2)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->F, (imax+1)*(jmax+1)*(kmax+1)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->G, (imax+1)*(jmax+1)*(kmax+1)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->H, (imax+1)*(jmax+1)*(kmax+1)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->RS, (imax+1)*(jmax+1)*(kmax+1)*sizeof(Real)));
checkCudaError(cudaMalloc(&this->Flag, (imax+2)*(jmax+2)*(kmax+2)*sizeof(unsigned int)));
int cudaReductionArrayUSize = iceil((imax+1)*(jmax+2)*(kmax+2), blockSize1D*2);
int cudaReductionArrayVSize = iceil((imax+1)*(jmax+2)*(kmax+2), blockSize1D*2);
int cudaReductionArrayWSize = iceil((imax+1)*(jmax+2)*(kmax+2), blockSize1D*2);
int cudaReductionArrayResidualSize1 = iceil(imax*jmax*kmax, blockSize1D*2)*blockSize1D*2;
int cudaReductionArrayResidualSize2 = iceil(imax*jmax*kmax, blockSize1D*2);
checkCudaError(cudaMalloc(&cudaReductionArrayU1, cudaReductionArrayUSize*sizeof(Real)));
checkCudaError(cudaMalloc(&cudaReductionArrayU2, cudaReductionArrayUSize*sizeof(Real)));
checkCudaError(cudaMalloc(&cudaReductionArrayV1, cudaReductionArrayVSize*sizeof(Real)));
checkCudaError(cudaMalloc(&cudaReductionArrayV2, cudaReductionArrayVSize*sizeof(Real)));
checkCudaError(cudaMalloc(&cudaReductionArrayW1, cudaReductionArrayWSize*sizeof(Real)));
checkCudaError(cudaMalloc(&cudaReductionArrayW2, cudaReductionArrayWSize*sizeof(Real)));
checkCudaError(cudaMalloc(
&cudaReductionArrayResidual1, cudaReductionArrayResidualSize1*sizeof(Real)));
checkCudaError(cudaMalloc(
&cudaReductionArrayResidual2, cudaReductionArrayResidualSize2*sizeof(Real)));
checkCudaError(cudaMalloc(
&cudaReductionArrayNumCells1, cudaReductionArrayResidualSize1*sizeof(unsigned int)));
checkCudaError(cudaMalloc(
&cudaReductionArrayNumCells2, cudaReductionArrayResidualSize2*sizeof(unsigned int)));
// Copy the content of U, V, W, P, T and Flag to the internal representation.
checkCudaError(cudaMemcpy(
this->U, U, sizeof(Real)*(imax+1)*(jmax+2)*(kmax+2), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(
this->V, V, sizeof(Real)*(imax+2)*(jmax+1)*(kmax+2), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(
this->W, W, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+1), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(
this->P, P, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+2), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(
this->T, T, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+2), cudaMemcpyHostToDevice));
checkCudaError(cudaMemcpy(
this->Flag, Flag, sizeof(unsigned int)*(imax+2)*(jmax+2)*(kmax+2), cudaMemcpyHostToDevice));
}
CfdSolverCuda::~CfdSolverCuda() {
checkCudaError(cudaFree(U));
checkCudaError(cudaFree(V));
checkCudaError(cudaFree(W));
checkCudaError(cudaFree(P));
checkCudaError(cudaFree(P_temp));
checkCudaError(cudaFree(T));
checkCudaError(cudaFree(T_temp));
checkCudaError(cudaFree(F));
checkCudaError(cudaFree(G));
checkCudaError(cudaFree(H));
checkCudaError(cudaFree(RS));
checkCudaError(cudaFree(Flag));
checkCudaError(cudaFree(cudaReductionArrayU1));
checkCudaError(cudaFree(cudaReductionArrayU2));
checkCudaError(cudaFree(cudaReductionArrayV1));
checkCudaError(cudaFree(cudaReductionArrayV2));
checkCudaError(cudaFree(cudaReductionArrayW1));
checkCudaError(cudaFree(cudaReductionArrayW2));
checkCudaError(cudaFree(cudaReductionArrayResidual1));
checkCudaError(cudaFree(cudaReductionArrayResidual2));
checkCudaError(cudaFree(cudaReductionArrayNumCells1));
checkCudaError(cudaFree(cudaReductionArrayNumCells2));
}
void CfdSolverCuda::setBoundaryValues() {
setBoundaryValuesCuda(
T_h, T_c, imax, jmax, kmax, blockSizeX, blockSizeY, blockSizeZ, U, V, W, T, Flag);
}
void CfdSolverCuda::setBoundaryValuesScenarioSpecific() {
setBoundaryValuesScenarioSpecificCuda(
scenarioName, imax, jmax, kmax, blockSizeX, blockSizeY, blockSizeZ, U, V, W, Flag);
}
Real CfdSolverCuda::calculateDt() {
calculateDtCuda(
Re, Pr, tau, dt, dx, dy, dz, imax, jmax, kmax, blockSize1D, U, V, W,
cudaReductionArrayU1, cudaReductionArrayU2,
cudaReductionArrayV1, cudaReductionArrayV2,
cudaReductionArrayW1, cudaReductionArrayW2,
useTemperature);
return dt;
}
void CfdSolverCuda::calculateTemperature() {
Real *temp = T;
T = T_temp;
T_temp = temp;
dim3 dimBlock(blockSizeX, blockSizeY, blockSizeZ);
dim3 dimGrid(iceil(kmax,dimBlock.x),iceil(jmax,dimBlock.y),iceil(imax,dimBlock.z));
calculateTemperatureCuda<<<dimGrid,dimBlock>>>(
Re, Pr, alpha, dt, dx, dy, dz, imax, jmax, kmax, U, V, W, T, T_temp, Flag);
}
void CfdSolverCuda::calculateFgh() {
calculateFghCuda(
Re, GX, GY, GZ, alpha, beta, dt, dx, dy, dz, imax, jmax, kmax,
blockSizeX, blockSizeY, blockSizeZ, U, V, W, T, F, G, H, Flag);
}
void CfdSolverCuda::calculateRs() {
dim3 dimBlock(blockSizeX, blockSizeY, blockSizeZ);
dim3 dimGrid(iceil(kmax,dimBlock.x),iceil(jmax,dimBlock.y),iceil(imax,dimBlock.z));
calculateRsCuda<<<dimGrid,dimBlock>>>(dt, dx, dy, dz, imax, jmax, kmax, F, G, H, RS);
}
void CfdSolverCuda::executeSorSolver() {
sorSolverCuda(
omg, eps, itermax, linearSystemSolverType, shallWriteOutput,
dx, dy, dz, imax, jmax, kmax,
blockSizeX, blockSizeY, blockSizeZ, blockSize1D, P, P_temp, RS, Flag,
cudaReductionArrayResidual1, cudaReductionArrayResidual2,
cudaReductionArrayNumCells1, cudaReductionArrayNumCells2);
}
void CfdSolverCuda::calculateUvw() {
dim3 dimBlock(blockSizeX, blockSizeY, blockSizeZ);
dim3 dimGrid(iceil(kmax,dimBlock.x),iceil(jmax,dimBlock.y),iceil(imax,dimBlock.z));
calculateUvwCuda<<<dimGrid,dimBlock>>>(dt, dx, dy, dz, imax, jmax, kmax, U, V, W, F, G, H, P, Flag);
}
void CfdSolverCuda::getDataForOutput(Real *U, Real *V, Real *W, Real *P, Real *T) {
// Copy the content of U, V, W, P, T in the internal representation to the specified output arrays.
checkCudaError(cudaMemcpy(
U, this->U, sizeof(Real)*(imax+1)*(jmax+2)*(kmax+2), cudaMemcpyDeviceToHost));
checkCudaError(cudaMemcpy(
V, this->V, sizeof(Real)*(imax+2)*(jmax+1)*(kmax+2), cudaMemcpyDeviceToHost));
checkCudaError(cudaMemcpy(
W, this->W, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+1), cudaMemcpyDeviceToHost));
checkCudaError(cudaMemcpy(
P, this->P, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+2), cudaMemcpyDeviceToHost));
checkCudaError(cudaMemcpy(
T, this->T, sizeof(Real)*(imax+2)*(jmax+2)*(kmax+2), cudaMemcpyDeviceToHost));
}
|
eee1cc26b0ffad97a20baa8d636415b1fe80a008.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
*Create:24 7 2018
*@author:haili
* dim block(x,y)
**/
//----------------------------------------------------------------
#include"dct.h"
__global__ void dct2(const float* A,const int m,const int n,float* B){
__shared__ float mat[32][32+1];
const int tid_x=threadIdx.x;
const int tid_y=threadIdx.y;
const int t_x=blockDim.x;
const int t_y=blockDim.y;
const int bid_x=blockIdx.x;
float temp=0;
int tidx=tid_x;
int tidy=tid_y;
for(tidy=tid_y;tidy<n;tidy+=t_y){
temp=0;
for(tidx=tid_x;tidx<n*m;tidx+=t_x){
int row=tidx/n;
int col=tidx%n;
temp+=A[row*n+col]*cos(pi*(row+0.5)*bid_x/m)*cos(pi*(col+0.5)*tidy/n);
}
mat[tid_y][tid_x]=temp;
__syncthreads();
int k=t_x/2;
while(k!=0){
if(tid_x<k){
mat[tid_y][tid_x]+=mat[tid_y][tid_x+k];
}
__syncthreads();
k/=2;
}
float a=0;
float b=0;
if(bid_x==0){
a=(float)1/sqrt(float(m));
}else{
a=sqrt((float)2)/sqrt(float(m));
}
if(tidy==0){
b=(float)1/sqrt(float(n));
}else{
b=sqrt((float)2)/sqrt(float(n));
}
if(tid_x==0){
B[bid_x*n+tidy]=a*b*mat[tid_y][tid_x];
}
}
}
__global__ void idct2(const float* A,const int m,const int n,float* B){
__shared__ float mat[32][32+1];
const int tid_x=threadIdx.x;
const int tid_y=threadIdx.y;
const int t_x=blockDim.x;
const int t_y=blockDim.y;
const int bid_x=blockIdx.x;
float temp=0;
int tidx=tid_x;
int tidy=tid_y;
for(tidy=tid_y;tidy<n;tidy+=t_y){
temp=0;
for(tidx=tid_x;tidx<n*m;tidx+=t_x){
int row=tidx/n;
int col=tidx%n;
float a=0;
float b=0;
if(row==0){
a=(float)1/sqrt(float(m));
}else{
a=sqrt((float)2)/sqrt(float(m));
}
if(col==0){
b=(float)1/sqrt(float(n));
}else{
b=sqrt((float)2)/sqrt(float(n));
}
temp+=a*b*A[row*n+col]*cos(pi*(bid_x+0.5)*row/m)*cos(pi*(tidy+0.5)*col/n);
}
mat[tid_y][tid_x]=temp;
__syncthreads();
int k=t_x/2;
while(k!=0){
if(tid_x<k){
mat[tid_y][tid_x]+=mat[tid_y][tid_x+k];
}
__syncthreads();
k/=2;
}
if(tid_x==0){
B[bid_x*n+tidy]=mat[tid_y][tid_x];
}
}
}
|
eee1cc26b0ffad97a20baa8d636415b1fe80a008.cu
|
/**
*Create:24 7 2018
*@author:haili
* dim block(x,y)
**/
//----------------------------------------------------------------
#include"dct.h"
__global__ void dct2(const float* A,const int m,const int n,float* B){
__shared__ float mat[32][32+1];
const int tid_x=threadIdx.x;
const int tid_y=threadIdx.y;
const int t_x=blockDim.x;
const int t_y=blockDim.y;
const int bid_x=blockIdx.x;
float temp=0;
int tidx=tid_x;
int tidy=tid_y;
for(tidy=tid_y;tidy<n;tidy+=t_y){
temp=0;
for(tidx=tid_x;tidx<n*m;tidx+=t_x){
int row=tidx/n;
int col=tidx%n;
temp+=A[row*n+col]*cos(pi*(row+0.5)*bid_x/m)*cos(pi*(col+0.5)*tidy/n);
}
mat[tid_y][tid_x]=temp;
__syncthreads();
int k=t_x/2;
while(k!=0){
if(tid_x<k){
mat[tid_y][tid_x]+=mat[tid_y][tid_x+k];
}
__syncthreads();
k/=2;
}
float a=0;
float b=0;
if(bid_x==0){
a=(float)1/sqrt(float(m));
}else{
a=sqrt((float)2)/sqrt(float(m));
}
if(tidy==0){
b=(float)1/sqrt(float(n));
}else{
b=sqrt((float)2)/sqrt(float(n));
}
if(tid_x==0){
B[bid_x*n+tidy]=a*b*mat[tid_y][tid_x];
}
}
}
__global__ void idct2(const float* A,const int m,const int n,float* B){
__shared__ float mat[32][32+1];
const int tid_x=threadIdx.x;
const int tid_y=threadIdx.y;
const int t_x=blockDim.x;
const int t_y=blockDim.y;
const int bid_x=blockIdx.x;
float temp=0;
int tidx=tid_x;
int tidy=tid_y;
for(tidy=tid_y;tidy<n;tidy+=t_y){
temp=0;
for(tidx=tid_x;tidx<n*m;tidx+=t_x){
int row=tidx/n;
int col=tidx%n;
float a=0;
float b=0;
if(row==0){
a=(float)1/sqrt(float(m));
}else{
a=sqrt((float)2)/sqrt(float(m));
}
if(col==0){
b=(float)1/sqrt(float(n));
}else{
b=sqrt((float)2)/sqrt(float(n));
}
temp+=a*b*A[row*n+col]*cos(pi*(bid_x+0.5)*row/m)*cos(pi*(tidy+0.5)*col/n);
}
mat[tid_y][tid_x]=temp;
__syncthreads();
int k=t_x/2;
while(k!=0){
if(tid_x<k){
mat[tid_y][tid_x]+=mat[tid_y][tid_x+k];
}
__syncthreads();
k/=2;
}
if(tid_x==0){
B[bid_x*n+tidy]=mat[tid_y][tid_x];
}
}
}
|
53e201ab6e10dbfeff993bca5e568b038d9a9b0e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
__global__ void childKernel()
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
{
printf("Hello ");
}
}
__global__ void parentKernel()
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid != 0)
{
return;
}
// launch child
hipLaunchKernelGGL(( childKernel), dim3(10), dim3(32), 0, 0, );
if (hipSuccess != hipGetLastError())
{
printf("Child kernel failed to lauch\n");
return;
}
// wait for child to complete
if (hipSuccess != hipDeviceSynchronize())
{
printf("Child kernel failed to lauch\n");
return;
}
printf("World!\n");
}
int main(int argc, char *argv[])
{
// launch parent
hipLaunchKernelGGL(( parentKernel), dim3(10), dim3(32), 0, 0, );
if (hipSuccess != hipGetLastError())
{
return 1;
}
// wait for parent to complete
if (hipSuccess != hipDeviceSynchronize())
{
return 2;
}
return 0;
}
|
53e201ab6e10dbfeff993bca5e568b038d9a9b0e.cu
|
#include <stdio.h>
#include <stdlib.h>
__global__ void childKernel()
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid == 0)
{
printf("Hello ");
}
}
__global__ void parentKernel()
{
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid != 0)
{
return;
}
// launch child
childKernel<<<10, 32>>>();
if (cudaSuccess != cudaGetLastError())
{
printf("Child kernel failed to lauch\n");
return;
}
// wait for child to complete
if (cudaSuccess != cudaDeviceSynchronize())
{
printf("Child kernel failed to lauch\n");
return;
}
printf("World!\n");
}
int main(int argc, char *argv[])
{
// launch parent
parentKernel<<<10, 32>>>();
if (cudaSuccess != cudaGetLastError())
{
return 1;
}
// wait for parent to complete
if (cudaSuccess != cudaDeviceSynchronize())
{
return 2;
}
return 0;
}
|
cbe24f3f2ba803dac7d0547a0e3c136a5e11b58b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <dlfcn.h>
#include <stdlib.h>
hipError_t cuDeviceTotalMem(size_t* bytes, hipDevice_t dev) {
void *handle;
handle = dlopen("/usr/lib/x86_64-linux-gnu/libcuda.so.1", RTLD_LAZY);
printf("%s\n", "I just want to tell you that cuDeviceTotalMem is STILL hijacked!");
hipError_t (*ori_cu_device_total_mem)(size_t*, hipDevice_t);
ori_cu_device_total_mem = (hipError_t (*)(size_t *, hipDevice_t))dlsym(handle, "hipDeviceTotalMem");
hipError_t res = ori_cu_device_total_mem(bytes, dev);
dlclose(handle);
return res;
}
|
cbe24f3f2ba803dac7d0547a0e3c136a5e11b58b.cu
|
#include <cuda.h>
#include <stdio.h>
#include <dlfcn.h>
#include <stdlib.h>
CUresult cuDeviceTotalMem(size_t* bytes, CUdevice dev) {
void *handle;
handle = dlopen("/usr/lib/x86_64-linux-gnu/libcuda.so.1", RTLD_LAZY);
printf("%s\n", "I just want to tell you that cuDeviceTotalMem is STILL hijacked!");
CUresult (*ori_cu_device_total_mem)(size_t*, CUdevice);
ori_cu_device_total_mem = (CUresult (*)(size_t *, CUdevice))dlsym(handle, "cuDeviceTotalMem_v2");
CUresult res = ori_cu_device_total_mem(bytes, dev);
dlclose(handle);
return res;
}
|
2d11cf723b2151d07e24fbf2edded80484558cbc.hip
|
// !!! This is a file automatically generated by hipify!!!
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/conv_grad_grad_kernel.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#endif
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/padding.h"
#include "paddle/phi/kernels/impl/conv_cudnn_impl.h"
namespace phi {
template <typename T, typename Context>
void ConvCudnnGradGradKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
auto X = &input;
auto W = &filter;
auto dO = &out_grad;
auto ddX = input_grad_grad.get_ptr();
auto ddW = filter_grad_grad.get_ptr();
auto ddO = out_grad_grad;
auto dW = filter_grad;
auto dX = input_grad;
if (ddO) {
ctx.template Alloc<T>(ddO);
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(ctx, ddO, static_cast<T>(0));
}
if (dW) {
ctx.template Alloc<T>(dW);
}
if (dX) {
ctx.template Alloc<T>(dX);
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
std::vector<int> dilations = dilations_t;
bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t;
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic,
false,
phi::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
std::vector<int> paddings = paddings_t;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
DenseTensor transformed_X_channel(X->type());
DenseTensor transformed_dO_channel(dO->type());
DenseTensor transformed_ddX_channel(X->type());
DenseTensor transformed_ddO_channel(dO->type());
DenseTensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel);
TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel);
ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel);
TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel);
ctx.template Alloc<T>(&transformed_dX_channel);
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim);
DenseTensor transformed_X(X->type());
DenseTensor transformed_ddX(X->type());
DenseTensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = ::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
ctx.template Alloc<T>(&transformed_X);
if (ddX) {
ctx.template Alloc<T>(&transformed_ddX);
}
if (dX) {
ctx.template Alloc<T>(&transformed_dX);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
funcs::PadFunction<Context, T, 4>(ctx,
input_pad,
transformed_ddX_channel,
pad_value,
&transformed_ddX);
}
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
funcs::PadFunction<Context, T, 5>(ctx,
input_pad,
transformed_ddX_channel,
pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = paddle::platform::CudnnDataType<T>::type;
auto handle = ctx.cudnn_handle();
auto layout = paddle::platform::GetCudnnTensorFormat(
paddle::platform::DataLayout::kNCHW);
paddle::operators::ConvArgs args1{&transformed_ddX,
W,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype,
groups,
paddle::platform::DataLayout::kNCHW};
paddle::operators::ConvArgs args2{&transformed_X,
ddW,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype,
groups,
paddle::platform::DataLayout::kNCHW};
paddle::operators::ConvArgs args3{&transformed_ddX,
dW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype,
groups,
paddle::platform::DataLayout::kNCHW};
paddle::operators::ConvArgs args4{&transformed_dX,
ddW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype,
groups,
paddle::platform::DataLayout::kNCHW};
#ifdef PADDLE_WITH_HIP
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result1;
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result2;
paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> data_result;
paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t>
filter_result;
#else
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result1;
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result2;
paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> data_result;
paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t>
filter_result;
#endif
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search1 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = search1::GetWorkspaceSize(args1);
fwd_result1.algo = search1::Find<T>(
args1, exhaustive_search, false, workspace_size, ctx);
#else
using search1 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result1 = search1::Find<T>(args1, exhaustive_search, false, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_result1.algo);
#endif
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search2 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size =
::max(workspace_size, search2::GetWorkspaceSize(args2));
fwd_result2.algo = search2::Find<T>(
args2, exhaustive_search, false, workspace_size, ctx);
#else
using search2 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result2 = search2::Find<T>(args2, exhaustive_search, false, ctx);
workspace_size = ::max(
workspace_size, search2::GetWorkspaceSize(args2, fwd_result2.algo));
#endif
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search3 =
paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size = ::max(workspace_size, search3::GetWorkspaceSize(args3));
filter_result.algo = search3::Find<T>(
args3, exhaustive_search, deterministic, workspace_size, ctx);
#else
using search3 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_result =
search3::Find<T>(args3, exhaustive_search, deterministic, ctx);
workspace_size = ::max(
workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo));
#endif
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search4 =
paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size = ::max(workspace_size, search4::GetWorkspaceSize(args4));
data_result.algo = search4::Find<T>(
args4, exhaustive_search, deterministic, workspace_size, ctx);
#else
using search4 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_result =
search4::Find<T>(args4, exhaustive_search, deterministic, ctx);
workspace_size = ::max(
workspace_size, search4::GetWorkspaceSize(args4, data_result.algo));
#endif
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(
transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(),
DataLayout::kNCHW,
&o_n,
&o_c,
&o_d,
&o_h,
&o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
paddle::operators::ScalingParamType<T> alpha = 1.0f;
paddle::operators::ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f :
// 0.0f;
// VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto");
auto wkspace_handle = ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionForward(
handle,
&alpha,
args1.idesc.desc(),
ddx,
args1.wdesc.desc(),
w,
args1.cdesc.desc(),
fwd_result1.algo,
&beta,
args1.odesc.desc(),
transformed_ddy_channel,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionForward(
handle,
&alpha,
args1.idesc.desc(),
ddx + i * group_offset_in,
args1.wdesc.desc(),
w + i * group_offset_filter,
args1.cdesc.desc(),
fwd_result1.algo,
workspace_ptr,
workspace_size,
&beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (ddW) {
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionForward(
handle,
&alpha,
args2.idesc.desc(),
x,
args2.wdesc.desc(),
ddw,
args2.cdesc.desc(),
fwd_result2.algo,
&beta,
args2.odesc.desc(),
transformed_ddy_channel,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionForward(
handle,
&alpha,
args2.idesc.desc(),
x + i * group_offset_in,
args2.wdesc.desc(),
ddw + i * group_offset_filter,
args2.cdesc.desc(),
fwd_result2.algo,
workspace_ptr,
workspace_size,
&alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionBackwardWeights(
handle,
&alpha,
args3.odesc.desc(),
transformed_dy_channel,
args3.idesc.desc(),
ddx,
args3.cdesc.desc(),
filter_result.algo,
&beta,
args3.wdesc.desc(),
dw,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionBackwardFilter(
handle,
&alpha,
args3.idesc.desc(),
ddx + i * group_offset_in,
args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(),
filter_result.algo,
workspace_ptr,
workspace_size,
&beta,
args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
#endif
}
if (dX && ddW) {
ddw = ddW->data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionBackwardData(
handle,
&alpha,
args4.odesc.desc(),
transformed_dy_channel,
args4.wdesc.desc(),
ddw,
args4.cdesc.desc(),
data_result.algo,
&beta,
args4.idesc.desc(),
transformed_dx,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionBackwardData(
handle,
&alpha,
args4.wdesc.desc(),
ddw + i * group_offset_filter,
args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(),
data_result.algo,
workspace_ptr,
workspace_size,
&beta,
args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
#endif
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
paddle::operators::RemovePaddingSlice<Context, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
paddle::operators::RemovePaddingSlice<Context, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX);
}
}
}
template <typename T, typename Context>
void DepthwiseConvDoubleGradGPUDNNKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
bool fuse_relu,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
groups,
dilations_t,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search_t,
input_grad,
filter_grad,
out_grad_grad);
}
template <typename T, typename Context>
void Conv3DCudnnGradGradKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
groups,
dilations_t,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search_t,
input_grad,
filter_grad,
out_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvDoubleGradGPUDNNKernel,
float,
phi::dtype::float16) {}
#else
#if CUDNN_VERSION_MIN(8, 1, 0)
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvDoubleGradGPUDNNKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#else
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvDoubleGradGPUDNNKernel,
float,
double,
phi::dtype::float16) {}
#endif
#endif
|
2d11cf723b2151d07e24fbf2edded80484558cbc.cu
|
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "paddle/phi/kernels/conv_grad_grad_kernel.h"
#include "paddle/fluid/framework/eigen.h"
#include "paddle/phi/backends/gpu/gpu_context.h"
#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/kernel_registry.h"
#ifdef PADDLE_WITH_HIP
#include "paddle/fluid/operators/conv_miopen_helper.h"
#else
#include "paddle/fluid/operators/conv_cudnn_helper.h"
#endif
#include "paddle/fluid/platform/cudnn_workspace_helper.h"
#include "paddle/fluid/platform/float16.h"
#include "paddle/fluid/platform/profiler.h"
#include "paddle/phi/common/bfloat16.h"
#include "paddle/phi/common/float16.h"
#include "paddle/phi/kernels/cpu/conv_util.h"
#include "paddle/phi/kernels/funcs/batch_norm_utils.h"
#include "paddle/phi/kernels/funcs/math_function.h"
#include "paddle/phi/kernels/funcs/padding.h"
#include "paddle/phi/kernels/impl/conv_cudnn_impl.h"
namespace phi {
template <typename T, typename Context>
void ConvCudnnGradGradKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
auto X = &input;
auto W = &filter;
auto dO = &out_grad;
auto ddX = input_grad_grad.get_ptr();
auto ddW = filter_grad_grad.get_ptr();
auto ddO = out_grad_grad;
auto dW = filter_grad;
auto dX = input_grad;
if (ddO) {
ctx.template Alloc<T>(ddO);
phi::funcs::SetConstant<Context, T> set_zero;
set_zero(ctx, ddO, static_cast<T>(0));
}
if (dW) {
ctx.template Alloc<T>(dW);
}
if (dX) {
ctx.template Alloc<T>(dX);
}
// const T* x = X->data<T>();
const T* dy = dO->data<T>();
const T* w = W->data<T>();
const T* ddx = nullptr;
const T* ddw = nullptr;
T *dw, *dx, *ddy;
dw = dx = ddy = nullptr;
T* transformed_dx = nullptr;
std::vector<int> dilations = dilations_t;
bool exhaustive_search = FLAGS_cudnn_exhaustive_search || exhaustive_search_t;
bool deterministic = FLAGS_cudnn_deterministic;
auto exhaustive_deterministic = exhaustive_search && deterministic;
PADDLE_ENFORCE_EQ(exhaustive_deterministic,
false,
phi::errors::InvalidArgument(
"Cann't set exhaustive_search True and "
"FLAGS_cudnn_deterministic True at same time."));
std::vector<int> paddings = paddings_t;
const bool channel_last = (data_format == "NHWC" || data_format == "NDHWC");
// transform Tensors to channel first-----------
DenseTensor transformed_X_channel(X->type());
DenseTensor transformed_dO_channel(dO->type());
DenseTensor transformed_ddX_channel(X->type());
DenseTensor transformed_ddO_channel(dO->type());
DenseTensor transformed_dX_channel(X->type());
if (channel_last) {
ResizeToChannelFirst<Context, T>(ctx, X, &transformed_X_channel);
TransToChannelFirst<Context, T>(ctx, X, &transformed_X_channel);
ResizeToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel);
TransToChannelFirst<Context, T>(ctx, dO, &transformed_dO_channel);
if (ddX) {
ResizeToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel);
TransToChannelFirst<Context, T>(ctx, ddX, &transformed_ddX_channel);
}
if (ddO) {
ResizeToChannelFirst<Context, T>(ctx, ddO, &transformed_ddO_channel);
}
if (dX) {
ResizeToChannelFirst<Context, T>(ctx, dX, &transformed_dX_channel);
ctx.template Alloc<T>(&transformed_dX_channel);
}
} else {
transformed_X_channel = *X;
transformed_dO_channel = *dO;
if (ddX) {
transformed_ddX_channel = *ddX;
}
if (ddO) {
transformed_ddO_channel.ShareDataWith(*ddO);
}
if (dX) {
transformed_dX_channel.ShareDataWith(*dX);
}
}
auto in_dims = transformed_X_channel.dims();
auto filter_dims = W->dims();
DDim in_data_dims = slice_ddim(in_dims, 2, in_dims.size());
DDim filter_data_dims = slice_ddim(filter_dims, 2, filter_dims.size());
std::vector<int> ksize = vectorize<int>(filter_data_dims);
UpdatePaddingAndDilation(
&paddings, &dilations, padding_algorithm, in_data_dims, strides, ksize);
int data_dim = strides.size(); // 2d or 3d
bool is_sys_pad = funcs::IsSymmetricPadding(paddings, data_dim);
DenseTensor transformed_X(X->type());
DenseTensor transformed_ddX(X->type());
DenseTensor transformed_dX(X->type());
std::vector<int> padding_common(data_dim, 0);
std::vector<int> input_pad(X->dims().size() * 2, 0);
if (!is_sys_pad) {
// get pad
std::vector<int> padding_diff(data_dim);
std::vector<int> new_input_shape_vec(data_dim + 2);
new_input_shape_vec[0] = transformed_X_channel.dims()[0];
new_input_shape_vec[1] = transformed_X_channel.dims()[1];
for (size_t i = 0; i < data_dim; ++i) {
padding_diff[i] = std::abs(paddings[2 * i] - paddings[2 * i + 1]);
padding_common[i] = std::min(paddings[2 * i], paddings[2 * i + 1]);
new_input_shape_vec[i + 2] =
transformed_X_channel.dims()[i + 2] + padding_diff[i];
input_pad[2 * i + 4] = paddings[2 * i] - padding_common[i];
input_pad[2 * i + 4 + 1] = paddings[2 * i + 1] - padding_common[i];
}
DDim new_input_shape(make_ddim(new_input_shape_vec));
transformed_X.Resize(new_input_shape);
transformed_ddX.Resize(new_input_shape);
transformed_dX.Resize(new_input_shape);
ctx.template Alloc<T>(&transformed_X);
if (ddX) {
ctx.template Alloc<T>(&transformed_ddX);
}
if (dX) {
ctx.template Alloc<T>(&transformed_dX);
}
// pad for input
const int rank = X->dims().size();
T pad_value(0.0);
switch (rank) {
case 4: {
funcs::PadFunction<Context, T, 4>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
funcs::PadFunction<Context, T, 4>(ctx,
input_pad,
transformed_ddX_channel,
pad_value,
&transformed_ddX);
}
} break;
case 5: {
funcs::PadFunction<Context, T, 5>(
ctx, input_pad, transformed_X_channel, pad_value, &transformed_X);
if (ddX) {
funcs::PadFunction<Context, T, 5>(ctx,
input_pad,
transformed_ddX_channel,
pad_value,
&transformed_ddX);
}
} break;
default:
PADDLE_THROW(phi::errors::InvalidArgument(
"ConvOp only support tensors with 4 or 5 dimensions."));
}
} else {
transformed_X.ShareDataWith(transformed_X_channel);
if (ddX) {
transformed_ddX.ShareDataWith(transformed_ddX_channel);
}
if (dX) {
transformed_dX.ShareDataWith(transformed_dX_channel);
}
if (paddings.size() == data_dim) {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[i];
}
} else {
for (size_t i = 0; i < data_dim; ++i) {
padding_common[i] = paddings[2 * i];
}
}
}
const T* x = transformed_X.data<T>();
int iwo_group = groups;
int c_group = 1;
#if defined(PADDLE_WITH_HIP) || CUDNN_VERSION_MIN(7, 0, 1)
iwo_group = 1;
c_group = groups;
groups = 1;
#endif
auto dtype = paddle::platform::CudnnDataType<T>::type;
auto handle = ctx.cudnn_handle();
auto layout = paddle::platform::GetCudnnTensorFormat(
paddle::platform::DataLayout::kNCHW);
paddle::operators::ConvArgs args1{&transformed_ddX,
W,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype,
groups,
paddle::platform::DataLayout::kNCHW};
paddle::operators::ConvArgs args2{&transformed_X,
ddW,
&transformed_ddO_channel,
strides,
padding_common,
dilations,
dtype,
groups,
paddle::platform::DataLayout::kNCHW};
paddle::operators::ConvArgs args3{&transformed_ddX,
dW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype,
groups,
paddle::platform::DataLayout::kNCHW};
paddle::operators::ConvArgs args4{&transformed_dX,
ddW,
&transformed_dO_channel,
strides,
padding_common,
dilations,
dtype,
groups,
paddle::platform::DataLayout::kNCHW};
#ifdef PADDLE_WITH_HIP
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result1;
paddle::operators::SearchResult<miopenConvFwdAlgorithm_t> fwd_result2;
paddle::operators::SearchResult<miopenConvBwdDataAlgorithm_t> data_result;
paddle::operators::SearchResult<miopenConvBwdWeightsAlgorithm_t>
filter_result;
#else
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result1;
paddle::operators::SearchResult<cudnnConvolutionFwdAlgo_t> fwd_result2;
paddle::operators::SearchResult<cudnnConvolutionBwdDataAlgo_t> data_result;
paddle::operators::SearchResult<cudnnConvolutionBwdFilterAlgo_t>
filter_result;
#endif
// ddo = conv(ddI, W) + conv(I, ddW)
size_t workspace_size = 0;
T* transformed_ddy_channel = nullptr;
if (ddO) {
ddy = ddO->data<T>();
transformed_ddy_channel = transformed_ddO_channel.data<T>();
if (ddX) {
args1.handle = handle;
args1.idesc.set(transformed_ddX, iwo_group);
args1.wdesc.set(*W, layout, iwo_group);
args1.odesc.set(transformed_ddO_channel, iwo_group);
args1.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search1 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size = search1::GetWorkspaceSize(args1);
fwd_result1.algo = search1::Find<T>(
args1, exhaustive_search, false, workspace_size, ctx);
#else
using search1 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result1 = search1::Find<T>(args1, exhaustive_search, false, ctx);
workspace_size = search1::GetWorkspaceSize(args1, fwd_result1.algo);
#endif
}
if (ddW) {
ddw = ddW->data<T>();
args2.handle = handle;
args2.idesc.set(transformed_X, iwo_group);
args2.wdesc.set(*ddW, layout, iwo_group);
args2.odesc.set(transformed_ddO_channel, iwo_group);
args2.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search2 =
paddle::operators::SearchAlgorithm<miopenConvFwdAlgorithm_t>;
workspace_size =
std::max(workspace_size, search2::GetWorkspaceSize(args2));
fwd_result2.algo = search2::Find<T>(
args2, exhaustive_search, false, workspace_size, ctx);
#else
using search2 =
paddle::operators::SearchAlgorithm<cudnnConvolutionFwdAlgoPerf_t>;
fwd_result2 = search2::Find<T>(args2, exhaustive_search, false, ctx);
workspace_size = std::max(
workspace_size, search2::GetWorkspaceSize(args2, fwd_result2.algo));
#endif
}
}
if (dW && ddX) {
dw = dW->data<T>();
args3.handle = handle;
args3.idesc.set(transformed_ddX, iwo_group);
args3.wdesc.set(*dW, layout, iwo_group);
args3.odesc.set(transformed_dO_channel, iwo_group);
args3.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search3 =
paddle::operators::SearchAlgorithm<miopenConvBwdWeightsAlgorithm_t>;
workspace_size = std::max(workspace_size, search3::GetWorkspaceSize(args3));
filter_result.algo = search3::Find<T>(
args3, exhaustive_search, deterministic, workspace_size, ctx);
#else
using search3 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdFilterAlgoPerf_t>;
filter_result =
search3::Find<T>(args3, exhaustive_search, deterministic, ctx);
workspace_size = std::max(
workspace_size, search3::GetWorkspaceSize(args3, filter_result.algo));
#endif
}
if (ddW && dX) {
transformed_dx = transformed_dX.data<T>();
args4.handle = handle;
args4.idesc.set(transformed_dX, iwo_group);
args4.wdesc.set(*ddW, layout, iwo_group);
args4.odesc.set(transformed_dO_channel, iwo_group);
args4.cdesc.set(dtype,
padding_common,
strides,
dilations,
paddle::platform::AllowTF32Cudnn(),
c_group);
#ifdef PADDLE_WITH_HIP
using search4 =
paddle::operators::SearchAlgorithm<miopenConvBwdDataAlgorithm_t>;
workspace_size = std::max(workspace_size, search4::GetWorkspaceSize(args4));
data_result.algo = search4::Find<T>(
args4, exhaustive_search, deterministic, workspace_size, ctx);
#else
using search4 =
paddle::operators::SearchAlgorithm<cudnnConvolutionBwdDataAlgoPerf_t>;
data_result =
search4::Find<T>(args4, exhaustive_search, deterministic, ctx);
workspace_size = std::max(
workspace_size, search4::GetWorkspaceSize(args4, data_result.algo));
#endif
}
int i_n, i_c, i_d, i_h, i_w;
GetNCDHW(
transformed_X.dims(), DataLayout::kNCHW, &i_n, &i_c, &i_d, &i_h, &i_w);
int o_n, o_c, o_d, o_h, o_w;
GetNCDHW(transformed_dO_channel.dims(),
DataLayout::kNCHW,
&o_n,
&o_c,
&o_d,
&o_h,
&o_w);
int group_offset_in = i_c / groups * i_h * i_w * i_d;
int group_offset_out = o_c / groups * o_h * o_w * o_d;
int group_offset_filter = W->numel() / groups;
paddle::operators::ScalingParamType<T> alpha = 1.0f;
paddle::operators::ScalingParamType<T> beta = 0.0f;
// NOTE(zhiqiu): inplace addto is not supportted in double grad yet.
// ScalingParamType<T> beta = ctx.Attr<bool>("use_addto") ? 1.0f :
// 0.0f;
// VLOG(4) << "Conv_grad_grad: use_addto = " << ctx.Attr<bool>("use_addto");
auto wkspace_handle = ctx.cudnn_workspace_handle();
if (ddO) {
if (ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionForward(
handle,
&alpha,
args1.idesc.desc(),
ddx,
args1.wdesc.desc(),
w,
args1.cdesc.desc(),
fwd_result1.algo,
&beta,
args1.odesc.desc(),
transformed_ddy_channel,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionForward(
handle,
&alpha,
args1.idesc.desc(),
ddx + i * group_offset_in,
args1.wdesc.desc(),
w + i * group_offset_filter,
args1.cdesc.desc(),
fwd_result1.algo,
workspace_ptr,
workspace_size,
&beta,
args1.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (ddW) {
#ifdef PADDLE_WITH_HIP
// MIOPEN ONLY support beta to be 0.0f
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionForward(
handle,
&alpha,
args2.idesc.desc(),
x,
args2.wdesc.desc(),
ddw,
args2.cdesc.desc(),
fwd_result2.algo,
&beta,
args2.odesc.desc(),
transformed_ddy_channel,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionForward(
handle,
&alpha,
args2.idesc.desc(),
x + i * group_offset_in,
args2.wdesc.desc(),
ddw + i * group_offset_filter,
args2.cdesc.desc(),
fwd_result2.algo,
workspace_ptr,
workspace_size,
&alpha,
args2.odesc.desc(),
transformed_ddy_channel + i * group_offset_out));
},
workspace_size);
}
#endif
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_ddO_channel, ddO);
}
}
T* transformed_dy_channel = transformed_dO_channel.data<T>();
if (dW && ddX) {
ddx = transformed_ddX.data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionBackwardWeights(
handle,
&alpha,
args3.odesc.desc(),
transformed_dy_channel,
args3.idesc.desc(),
ddx,
args3.cdesc.desc(),
filter_result.algo,
&beta,
args3.wdesc.desc(),
dw,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionBackwardFilter(
handle,
&alpha,
args3.idesc.desc(),
ddx + i * group_offset_in,
args3.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args3.cdesc.desc(),
filter_result.algo,
workspace_ptr,
workspace_size,
&beta,
args3.wdesc.desc(),
dw + i * group_offset_filter));
},
workspace_size);
}
#endif
}
if (dX && ddW) {
ddw = ddW->data<T>();
#ifdef PADDLE_WITH_HIP
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::miopenConvolutionBackwardData(
handle,
&alpha,
args4.odesc.desc(),
transformed_dy_channel,
args4.wdesc.desc(),
ddw,
args4.cdesc.desc(),
data_result.algo,
&beta,
args4.idesc.desc(),
transformed_dx,
workspace_ptr,
workspace_size));
},
workspace_size);
#else
for (int i = 0; i < groups; i++) {
wkspace_handle.RunFunc(
[&](void* workspace_ptr) {
PADDLE_ENFORCE_GPU_SUCCESS(
paddle::platform::dynload::cudnnConvolutionBackwardData(
handle,
&alpha,
args4.wdesc.desc(),
ddw + i * group_offset_filter,
args4.odesc.desc(),
transformed_dy_channel + i * group_offset_out,
args4.cdesc.desc(),
data_result.algo,
workspace_ptr,
workspace_size,
&beta,
args4.idesc.desc(),
transformed_dx + i * group_offset_in));
},
workspace_size);
}
#endif
if (!is_sys_pad) {
// reverse padded input
std::vector<int> starts(X->dims().size(), 0);
std::vector<int> axes(X->dims().size(), 0);
for (size_t i = 0; i < X->dims().size(); ++i) {
starts[i] = input_pad[2 * i];
axes[i] = i;
}
if (X->dims().size() == 4) {
paddle::operators::RemovePaddingSlice<Context, T, 4>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
} else {
paddle::operators::RemovePaddingSlice<Context, T, 5>(
ctx, &transformed_dX, &transformed_dX_channel, starts, axes);
}
}
if (channel_last) {
TransToChannelLast<Context, T>(ctx, &transformed_dX_channel, dX);
}
}
}
template <typename T, typename Context>
void DepthwiseConvDoubleGradGPUDNNKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
bool fuse_relu,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
groups,
dilations_t,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search_t,
input_grad,
filter_grad,
out_grad_grad);
}
template <typename T, typename Context>
void Conv3DCudnnGradGradKernel(
const Context& ctx,
const DenseTensor& input,
const DenseTensor& filter,
const DenseTensor& out_grad,
const paddle::optional<DenseTensor>& input_grad_grad,
const paddle::optional<DenseTensor>& filter_grad_grad,
const std::vector<int>& strides,
const std::vector<int>& paddings_t,
const std::string& padding_algorithm,
int groups,
const std::vector<int>& dilations_t,
const std::string& data_format,
bool use_addto,
int workspace_size_MB,
bool exhaustive_search_t,
DenseTensor* input_grad,
DenseTensor* filter_grad,
DenseTensor* out_grad_grad) {
ConvCudnnGradGradKernel<T>(ctx,
input,
filter,
out_grad,
input_grad_grad,
filter_grad_grad,
strides,
paddings_t,
padding_algorithm,
groups,
dilations_t,
data_format,
use_addto,
workspace_size_MB,
exhaustive_search_t,
input_grad,
filter_grad,
out_grad_grad);
}
} // namespace phi
#ifdef PADDLE_WITH_HIP
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvDoubleGradGPUDNNKernel,
float,
phi::dtype::float16) {}
#else
#if CUDNN_VERSION_MIN(8, 1, 0)
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvDoubleGradGPUDNNKernel,
float,
double,
phi::dtype::float16,
phi::dtype::bfloat16) {}
#else
PD_REGISTER_KERNEL(conv2d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::ConvCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(conv3d_grad_grad,
GPUDNN,
ALL_LAYOUT,
phi::Conv3DCudnnGradGradKernel,
float,
double,
phi::dtype::float16) {}
PD_REGISTER_KERNEL(depthwise_conv2d_grad_grad,
GPU,
ALL_LAYOUT,
phi::DepthwiseConvDoubleGradGPUDNNKernel,
float,
double,
phi::dtype::float16) {}
#endif
#endif
|
cu-kernels.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// 2013 Ehsan Variani
// 2013 Johns Hopkins University (author: Daniel Povey)
// 2013 Hainan Xu
// 2013 Xiaohui Zhang
// 2013-2015 Guoguo Chen
// 2016-2018 Shiyin Kang
// 2017 Hossein Hadian, Daniel Galvez
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include <limits>
#include <math_constants.h>
#include "cudamatrix/cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while (nTotalThreads > 1) {
int32_cuda halfPoint = ((1 + nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x >= halfPoint) { // was <
// Get the shared value stored by another thread
Real temp = 0.0;
if (threadIdx.x < nTotalThreads) { // was +halfPoint
temp = buffer[threadIdx.x]; // was +halfPoint
}
buffer[threadIdx.x - halfPoint] += temp;
}
__syncthreads();
nTotalThreads = ((1 + nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _copy_low_upp(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i <= j || i >= dimA.rows)
return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
template<typename Real>
__global__
static void _copy_upp_low(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j <= i || j >= dimA.rows)
return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
// mat += diag(vec) * mat2.
template<typename Real>
__global__
static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *vec, const Real *mat2,
int mat2_row_stride, int mat2_col_stride,
Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride
+ i * mat2_col_stride;
if (i < mat_dim.cols && j < mat_dim.rows) {
mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index];
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dmat.cols && j < dmat.rows) {
int32_cuda index_B = (j * (j + 1) / 2) + i;
int32_cuda index_A = j * dmat.stride + i;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) {
// we interpret these indexes oppositely from normal, but it doesn't
// matter as it's invoked in a symmetric way.
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
// transpose the indices used to index the source TpMatrix.
if (i < dmat.rows && j < dmat.cols) {
int32_cuda index_B = (j * (j + 1) / 2) + i;
int32_cuda index_A = i * dmat.stride + j;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index.
int32_cuda index_out = i + j * d_out.stride;
int32_cuda index_in = i + j * d_in.stride;
if (i < d_out.cols && j < d_out.rows)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
template<int TileDim, typename Real, typename OtherReal>
__global__
static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in,
MatrixDim d_out, MatrixDim d_in) {
// Use shared meme to achieve both coalesced memory reading and writing
// '+1' to avoid bank conflict when reading sbuf
__shared__ Real sbuf[TileDim][TileDim + 1];
const int32_cuda i_in = blockIdx.y * TileDim + threadIdx.y; // row-index
const int32_cuda j_in = blockIdx.x * TileDim + threadIdx.x; // col-index
const int32_cuda tile_stride_in = CU1DBLOCK / TileDim * d_in.stride;
int32_cuda index_in = i_in * d_in.stride + j_in;
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_in + i < d_in.rows && j_in < d_in.cols) {
sbuf[threadIdx.y + i][threadIdx.x] = static_cast<Real>(mat_in[index_in]);
}
index_in += tile_stride_in;
}
__syncthreads();
// Grid is transposed, but block is not yet.
// Warp (blockDim.x) is always along the row-dim.
const int32_cuda i_out = blockIdx.x * TileDim + threadIdx.y;
const int32_cuda j_out = blockIdx.y * TileDim + threadIdx.x;
const int32_cuda tile_stride_out = CU1DBLOCK / TileDim * d_out.stride;
int32_cuda index_out = i_out * d_out.stride + j_out;
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_out + i < d_out.rows && j_out < d_out.cols) {
// block is tranposed when reading sbuf
mat_out[index_out] = sbuf[threadIdx.x][threadIdx.y + i];
}
index_out += tile_stride_out;
}
}
// Copy from CSR sparse matrix to dense matrix
//
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_smat(Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const OtherReal* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.rows) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx
mat[i * mat_dim.stride + j] = static_cast<Real>(smat_val[nz_id]);
}
}
}
/// Select a subset of the rows of a CSR SparseMatrix.
/// Sets 'out' to only the rows of 'in' that are listed
/// in 'row_indexes'. 'row_indexes' must be sorted and unique,
/// and satisfy 0 <= row_indexes[i] < in.size().
///
/// Note: 'out_row_ptr' is an input parameter that is calculated before
/// calling this kernel function
///
/// We use warpSize threads per row to access only the nnz elements.
/// Every CU1DBLOCK/warpSize rows share one thread block.
/// 1D grid to cover all selected rows.
template<typename Real>
__global__
static void _select_rows(const int* out_row_ptr, int* out_col_idx,
Real* out_val, const int* row_indexes,
const int num_selected_rows, const int* in_row_ptr,
const int* in_col_idx, const Real* in_val) {
const int out_i = blockIdx.x * blockDim.y + threadIdx.y; // out row idx
if (out_i < num_selected_rows) {
const int in_i = row_indexes[out_i];
const int in_row_start = in_row_ptr[in_i];
const int out_row_start = out_row_ptr[out_i];
const int row_length = in_row_ptr[in_i + 1] - in_row_start;
for (int k = threadIdx.x; k < row_length; k += warpSize) {
const int in_n = in_row_start + k;
const int out_n = out_row_start + k;
out_col_idx[out_n] = in_col_idx[in_n];
out_val[out_n] = in_val[in_n];
}
}
}
// mat += alpha * smat
//
// We use warpSize threads per row to access only the nonzero elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _add_smat(Real* mat, MatrixDim mat_dim, Real alpha,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.rows) {
const int row_start = smat_row_ptr[i];
const int row_end = smat_row_ptr[i + 1];
for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) {
const int j = smat_col_idx[n]; // col idx of smat
mat[i * mat_dim.stride + j] += alpha * smat_val[n];
}
}
}
// mat += alpha * smat^T
//
// We use warpSize threads per row to access only the nonzero elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _add_smat_trans(Real* mat, MatrixDim mat_dim, Real alpha,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.cols) {
const int row_start = smat_row_ptr[i];
const int row_end = smat_row_ptr[i + 1];
for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) {
const int j = smat_col_idx[n]; // col idx of smat
mat[j * mat_dim.stride + i] += alpha * smat_val[n];
}
}
}
/// For each element x of the matrix, set it to
/// (x < 0 ? exp(x) : x + 1).
/// Use block/grid sizes for simple matrix ops
template<typename T>
__global__
static void _apply_exp_special(T* out, MatrixDim out_dim, const T* in,
int in_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < out_dim.rows && j < out_dim.cols) {
T x = in[i * in_stride + j];
if (x < T(0)) {
out[i * out_dim.stride + j] = exp(x);
} else {
out[i * out_dim.stride + j] = x + T(1);
}
}
}
/// Fill the array 'data' with the sequence [base ... base + length)
/// Use 1D block and 1D grid
template<typename T>
__global__
static void _sequence(T* data, int length, T base) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < length) {
data[i] = base + T(i);
}
}
// Copy from CSR sparse matrix to transposed dense matrix
//
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_smat_trans(Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr,
const int* smat_col_idx,
const OtherReal* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.cols) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
mat[j * mat_dim.stride + i] = static_cast<Real>(smat_val[nz_id]);
}
}
}
// First stage of trace(mat * smat^T)
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _trace_mat_smat_trans(const Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr,
const int* smat_col_idx, const Real* smat_val,
Real* trace_vec) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.rows) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
trace_vec[nz_id] = mat[i * mat_dim.stride + j] * smat_val[nz_id];
}
}
}
// First stage of trace(mat * smat)
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _trace_mat_smat(const Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val, Real* trace_vec) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.cols) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
trace_vec[nz_id] = mat[j * mat_dim.stride + i] * smat_val[nz_id];
}
}
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
mat[index] = exp(mat[index]);
}
}
template<typename Real>
__global__
static void _apply_exp_limited(Real* mat, MatrixDim d,
Real lower_limit, Real upper_limit) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
Real x = mat[index];
// I'm writing !(x >= lower_limit) instead of (x < lower_limit) so that
// nan's will be set to the lower-limit.
if (!(x >= lower_limit))
x = lower_limit;
else if (x > upper_limit)
x = upper_limit;
mat[index] = exp(x);
}
}
template<typename Real>
__global__
static void _scale_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = value * mat[index];
}
}
template<typename Real>
__global__
static void _set_diag(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = i + i * d.stride;
if (i < d.rows && i < d.cols) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _set_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _add_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = mat[index] + value;
}
}
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = value;
}
template<typename Real>
__global__
static void _set_zero_above_diag(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < i)
mat[index] = 0.0;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] * A[src_index];
}
template<typename Real>
__global__
static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] / A[src_index];
}
template<typename Real>
__global__
static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows) {
Real a = mat[dst_index], b = A[src_index];
mat[dst_index] = fmax(a, b);
}
}
template<typename Real>
__global__
static void _min(Real* mat, const Real* other, MatrixDim mat_d,
int other_stride) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda mat_index = i * mat_d.stride + j;
int32_cuda other_index = i * other_stride + j;
if (j < mat_d.cols && i < mat_d.rows) {
Real a = mat[mat_index], b = other[other_index];
mat[mat_index] = fmin(a, b);
}
}
template<typename Real>
__global__
static void _vec_mul_elements(Real* v, const Real* a, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
v[i] = v[i] * a[i];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d,
int src_stride, int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
int src_index = i / group_size + j * src_stride;
y[dst_index] *= x[src_index];
}
}
template<typename Real>
__global__
void _diff_group_pnorm(Real *id, const Real *iv, const Real *ov, const Real* od,
MatrixDim id_dim, int iv_stride, int ov_stride,
int od_stride, int group_size, Real power) {
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < id_dim.cols) {
const int grid_stride = gridDim.y * blockDim.y;
const int src_j = j / group_size;
int i = blockIdx.y * blockDim.y + threadIdx.y;
for (; i < id_dim.rows; i += grid_stride) {
const int iv_index = j + i * iv_stride;
Real iv_ij = iv[iv_index];
Real ans;
if (power == Real(2)) {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
ans = ov_ij <= 0.0 ? 0.0 : iv_ij / ov_ij;
} else if (power == Real(1)) {
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
ans = (iv_ij == Real(0) ? 0.0 : iv_ij_sign);
} else if (power
== (sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF)) {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
ans =
ov_ij <= 0.0 ?
0.0 : (iv_ij_sign * (abs(iv_ij) == ov_ij ? 1.0 : 0.0));
} else {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
if (ov_ij <= 0.0) {
ans = 0.0; // The derivative is either 0 or undefined at the origin.
} else {
ans = iv_ij_sign * pow(std::abs(iv_ij), power - 1)
* pow(ov_ij, 1 - power);
}
}
const int od_index = src_j + i * od_stride;
const int id_index = j + i * id_dim.stride;
id[id_index] = ans * od[od_index];
}
}
}
/// deriv is the derivative we will output; vec is the input we're computing
/// the group max on, "maxv" is the previously computed group max.
template<typename Real>
__global__
static void _calc_group_max_deriv(Real *deriv, const Real *vec,
const Real *maxv, MatrixDim deriv_dim,
int vec_stride, int maxv_stride,
int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < deriv_dim.rows && i < deriv_dim.cols) {
int deriv_index = i + j * deriv_dim.stride;
int vec_index = i + j * vec_stride;
int maxv_index = i / group_size + j * maxv_stride;
Real vec_element = vec[vec_index], // The element of the original vector.
max_element = maxv[maxv_index]; // this is the max value
Real ans = (max_element == vec_element ? 1.0 : 0.0);
deriv[deriv_index] = ans;
}
}
/// Set each element to y = (x == orig ? changed : x).
template<typename Real>
__global__
static void _replace_value(Real *vec, int dim, Real orig, Real changed) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
if (vec[i] == orig)
vec[i] = changed;
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
const int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
if (i < d.rows) {
const int32_cuda start = i * d.stride;
const Real scale = Real(1) / vec_div[i];
const int32_cuda grid_stride = blockDim.x * gridDim.x;
for (int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; j < d.cols; j +=
grid_stride) {
mat[start + j] *= scale;
}
}
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int32_cuda index = i + j * d.stride;
int32_cuda index_src = i + j * src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = j + i * src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_blocks(Real alpha, const Real* src,
int32_cuda num_row_blocks,
int32_cuda num_col_blocks, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = i + j * src_stride;
if (i < d.cols && j < d.rows)
for (int32_cuda p = 0; p < num_row_blocks; p++) {
for (int32_cuda q = 0; q < num_col_blocks; q++) {
dst[index] = alpha
* src[index_src + p * src_stride * d.rows + q * d.cols]
+ dst[index];
}
}
}
template<typename Real>
__global__
static void _add_mat_repeated(Real alpha, const Real* src,
MatrixDim src_dim, Real* dst,
MatrixDim dst_dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda src_i = i % src_dim.cols,
src_j = j % src_dim.rows,
dst_index = i + j * dst_dim.stride,
src_index = src_i + src_j * src_dim.stride;
if (i < dst_dim.cols && j < dst_dim.rows)
dst[dst_index] += alpha * src[src_index];
}
template<typename Real>
__global__
static void _add_mat_blocks_trans(Real alpha, const Real* src,
int32_cuda num_row_blocks,
int32_cuda num_col_blocks, Real* dst,
MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = j + i * src_stride;
if (i < d.cols && j < d.rows)
for (int32_cuda p = 0; p < num_row_blocks; p++) {
for (int32_cuda q = 0; q < num_col_blocks; q++) {
dst[index] = alpha
* src[index_src + p * src_stride * d.cols + q * d.rows]
+ dst[index];
}
}
}
template<typename Real>
__global__
static void _set_mat_mat_div_mat(const Real* A, const Real* B, const Real* C,
Real* dst, MatrixDim d, int stride_a,
int stride_b, int stride_c) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride, a_index = i + j * stride_a, b_index = i
+ j * stride_b, c_index = i + j * stride_c;
if (i < d.cols && j < d.rows)
if (C[c_index] == 0)
dst[index] = A[a_index];
else
dst[index] = A[a_index] * B[b_index] / C[c_index];
}
// Given a matrix input S (not packed!) and a lower-triangular matrix L, this
// function does S = beta S + alpha * L^T L. This is used in PSD matrix
// inversion. The i index is the row of the destination S and the j the column
// (although of course the output is symmetric so it doesn't matter in a sense).
// The main point of this is to make use of various symmetries and zero-ness.
template<typename Real>
__global__
static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim,
Real *S, MatrixDim sdim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= sdim.rows || j > i)
return;
// this thread computes the dot-product of the i'th column of
// L with the j'th column of L. The values we're multiplying
// are only nonzero for row-index k greater or equal to
// max(i, j), which equals i.
Real sum = 0.0;
for (int k = i; k < sdim.rows; k++) {
int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k;
sum += T[i_index] * T[j_index];
}
int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i;
S[output_index1] = alpha * sum + beta * S[output_index1];
S[output_index2] = alpha * sum + beta * S[output_index2];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst,
MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * col[j] + beta * dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst,
MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * row[i] + beta * dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat,
MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * dmat.stride;
int32_cuda index2 = i + j * dmask.stride;
if (i < dmat.cols && j < dmat.rows)
if (mask[index2] == 0)
mat[index] = 0;
}
template<typename Real>
__global__
static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *mat2, int mat2_row_stride,
int mat2_col_stride, const Real *vec, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride
+ j * mat2_row_stride;
if (j < mat_dim.rows && i < mat_dim.cols)
mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index];
}
template<typename Real>
__global__
static void _add_mat_mat_elements(Real *data, const Real *srcA_data,
const Real *srcB_data, MatrixDim dim,
int srcA_stride, int srcB_stride, Real alpha,
Real beta) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda tgt_index = i + j * dim.stride;
int32_cuda srcA_index = i + j * srcA_stride;
int32_cuda srcB_index = i + j * srcB_stride;
if (i < dim.cols && j < dim.rows) {
data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index]
+ beta * data[tgt_index];
}
}
/*
* CuVector
*/
// very limited application!
template<typename Real>
__global__
static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2,
Real param_3, int* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
Real ratio = a[i] / param_3;
if ((ratio < 0.0) || (ratio >= 1.01)) {
*flag = 1;
return;
}
if (ratio < param_1) {
Real factor = ((param_1 / ratio) > param_2) ? param_2 : (param_1 / ratio);
v[i] = v[i] / factor;
} else if (ratio > param_1) {
Real factor = ((ratio / param_1) > param_2) ? param_2 : (ratio / param_1);
v[i] = v[i] * factor;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _cublas_copy_kaldi(int n, const Real* x, int incx, OtherReal* y,
int incy) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
y[i * incy] = static_cast<OtherReal>(x[i * incx]);
}
}
// This kernel writes a copy of the vector "v_in" to each row of the matrix
// "m_out". the dimension of v_in should be equal to the #columns of m_out.
template<typename Real>
__global__
static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index.
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index.
if (i < d.cols && j < d.rows) {
int index = i + j * d.stride;
m_out[index] = v_in[i];
}
}
// This kernel writes a copy of the vector "v_in" to each col of the matrix
// "m_out". the dimension of v_in should be equal to the #row of m_out.
template<typename Real>
__global__
static void _copy_cols_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.y * blockDim.y + threadIdx.y; // row id
int j = blockIdx.x * blockDim.x + threadIdx.x; // col id
if (i < d.rows && j < d.cols) {
m_out[i * d.stride + j] = v_in[i];
}
}
// _trace_mat_mat reduce the partial sum to
// value[blockIdx.y * gridDim.x + blockIdx.x]
// It use shared mem to transpose matrix B to ensure coalesced memory access
template<int TileDim, typename Real>
__global__
static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA,
int B_stride, Real* value) {
// Reuse shared mem and make indexing easier. "+1" to avoid bank conflict
__shared__ union {
Real trans[TileDim][TileDim + 1];
Real sum[CU1DBLOCK];
} smem;
// linear thread id;
const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x;
const int32_cuda grid_height = gridDim.y * TileDim;
const int32_cuda ja = blockIdx.x * TileDim + threadIdx.x;
const int32_cuda ib = blockIdx.x * TileDim + threadIdx.y;
int32_cuda ia = blockIdx.y * TileDim + threadIdx.y;
int32_cuda jb = blockIdx.y * TileDim + threadIdx.x;
// Grid reduce
Real tsum = Real(0);
for (int32_cuda i0 = 0; i0 < dA.rows; i0 += grid_height) {
// Load from B, transpose the block and store in shared mem
if (jb < dA.rows) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (ib + i < dA.cols) {
smem.trans[threadIdx.x][threadIdx.y + i] =
B[(ib + i) * B_stride + jb];
}
}
}
__syncthreads();
// Load from A, sum up the product.
if (ja < dA.cols) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (ia + i < dA.rows) {
tsum += A[(ia + i) * dA.stride + ja]
* smem.trans[threadIdx.y + i][threadIdx.x];
}
}
}
__syncthreads();
ia += grid_height;
jb += grid_height;
}
smem.sum[tid] = tsum;
__syncthreads();
// Block reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
smem.sum[tid] += smem.sum[tid + shift];
__syncthreads();
}
// Warp reduce. Implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem.sum[tid] += smem.sum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
value[blockIdx.y * gridDim.x + blockIdx.x] = smem.sum[0];
}
}
// _trace_mat_mat_trans reduce the partial sum to
// value[blockIdx.y * gridDim.x + blockIdx.x]
template<typename Real>
__global__
static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA,
int B_stride, Real* value) {
__shared__ Real ssum[CU1DBLOCK];
// linear thread id;
const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x;
const int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
const int32_cuda grid_height = gridDim.y * blockDim.y;
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
// Grid reduce
Real tsum = Real(0);
if (j < dA.cols) {
while (i < dA.rows) {
tsum += A[i * dA.stride + j] * B[i * B_stride + j];
i += grid_height;
}
}
ssum[tid] = tsum;
__syncthreads();
// Block reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Warp reduce. Implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
value[blockIdx.y * gridDim.x + blockIdx.x] = ssum[0];
}
}
// v = alpha * diag(M * N^T) + beta * v
template<typename Real>
__global__
static void _add_diag_mat_mat_MNT(const Real alpha, const Real* M,
const MatrixDim dim_M, const Real* N,
const int stride_N, const Real beta,
Real* v) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int m_start = i * dim_M.stride;
const int n_start = i * stride_N;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < dim_M.cols; j += CU1DBLOCK) {
tsum += M[m_start + j] * N[n_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
v[i] = alpha * ssum[0] + beta * v[i];
}
}
// v = alpha * diag(M^T * N) + beta * v
template<int TileDim, typename Real>
__global__
static void _add_diag_mat_mat_MTN(const Real alpha, const Real* M,
const int stride_M, const Real* N,
const MatrixDim dim_N, const Real beta,
Real* v, const int stride_v) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= dim_N.cols)
return;
// Loop along the matrix column.
// Reduce to gridDim.y * CU1DBLOCK / TileDim elements per column.
Real tsum = Real(0);
const int grid_stride_y = blockDim.y * gridDim.y;
for (int i = blockIdx.y * blockDim.y + threadIdx.y; i < dim_N.rows; i +=
grid_stride_y) {
tsum += M[i * stride_M + j] * N[i * dim_N.stride + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize / TileDim elements per column.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim;
shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element per column.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift >= TileDim; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output TileDim sums per thread block
if (tid < TileDim) {
if (beta != Real(0)) {
v[blockIdx.y * stride_v + j] = alpha * ssum[tid]
+ beta * v[blockIdx.y * stride_v + j];
} else {
v[blockIdx.y * stride_v + j] = alpha * ssum[tid];
}
}
}
// v = alpha * diag(M * N) + beta * v
template<int TileDim, typename Real>
__global__
static void _add_diag_mat_mat_MN(const Real alpha, const Real* M,
const int stride_M, const Real* N,
const MatrixDim dim_N, const Real beta,
Real* v) {
// Reuse shared mem and make indexing easier. "+1" to avoid bank conflict
__shared__ union {
Real trans[TileDim][TileDim + 1];
Real sum[CU1DBLOCK];
} smem;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int i_m = blockIdx.x * TileDim + threadIdx.y;
const int j_n = blockIdx.x * TileDim + threadIdx.x;
int i_n = threadIdx.y;
int j_m = threadIdx.x;
// Loop along the matrix column.
// Reduce to CU1DBLOCK / TileDim elements per column.
Real tsum = Real(0);
for (int block_i_n = 0; block_i_n < dim_N.rows; block_i_n += TileDim) {
// Load, transpose and store M to shared mem.
if (j_m < dim_N.rows) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_m + i < dim_N.cols) {
smem.trans[threadIdx.x][threadIdx.y + i] = M[(i_m + i) * stride_M
+ j_m];
}
}
}
__syncthreads();
// Load N, sum up the product.
if (j_n < dim_N.cols) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_n + i < dim_N.rows) {
tsum += N[(i_n + i) * dim_N.stride + j_n]
* smem.trans[threadIdx.y + i][threadIdx.x];
}
}
}
__syncthreads();
i_n += TileDim;
j_m += TileDim;
}
smem.sum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize / TileDim elements per column.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim;
shift >>= 1) {
if (tid < shift) {
smem.sum[tid] += smem.sum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element per column.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift >= TileDim; shift >>= 1) {
smem.sum[tid] += smem.sum[tid + shift];
}
}
// output TileDim sums per thread block
if (tid < TileDim && j_n < dim_N.cols) {
v[j_n] = alpha * smem.sum[tid] + beta * v[j_n];
}
}
template<typename Real>
__global__
static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y,
Real beta, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = alpha * x[i] * y[i] + beta * v[i];
}
template<typename Real>
__global__
static void _copy_col_from_mat_df(double* v, int col, const Real* mat,
MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (double) mat[index];
}
template<typename Real>
__global__
static void _copy_col_from_mat_fd(float* v, int col, const Real* mat,
MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (float) mat[index];
}
template<typename Real>
__global__
static void _vec_apply_exp(Real* v, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v[i] = exp(v[i]);
}
}
template<typename Real>
__global__
static void _vec_apply_log(Real* v, Real* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
if (v[i] < 0) {
*flag = 1;
return;
}
v[i] = log(v[i]);
}
}
template<typename Real>
__global__
static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z,
MatrixDim d, Real* z2, MatrixDim d2, Real* t) {
int i = threadIdx.x;
__shared__ Real tot_objf[CU1DBLOCK];
__shared__ Real tot_weight[CU1DBLOCK];
Real tmp_weight_sum = 0;
Real tmp_tot_objf = 0;
int size = s / CU1DBLOCK; //the least size in a loop (later part)
int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if (i < threshold) {
loop_start = i * (size + 1);
loop_end = (i + 1) * (size + 1);
} else {
loop_start = threshold + i * size;
loop_end = threshold + (i + 1) * size;
}
for (int j = loop_start; j < loop_end; j++) {
//* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) );
int m = (x + j)->row;
//*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int));
int label = (x + j)->column;
// *(Real*) ((size_t)x + j*(2*sizeof(int) + sizeof(Real)) + 2*sizeof(int));
Real weight = (x + j)->weight;
tmp_weight_sum += weight;
Real this_prob = *(z + m * d.stride + label);
tmp_tot_objf += weight * log(this_prob);
// there might be problems here....
*(z2 + m * d2.stride + label) += weight / this_prob;
}
tot_objf[i] = tmp_tot_objf;
tot_weight[i] = tmp_weight_sum;
__syncthreads();
*t = _sum_reduce(tot_objf);
__syncthreads();
*(t + 1) = _sum_reduce(tot_weight);
return;
}
template<typename Real>
__global__
static void _cuda_vector_copy_elements(Real *data, int dim,
const Real *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= dim)
return;
int j = elements[i];
int mat_index;
if (transpose)
mat_index = i + j * mat_stride;
else
mat_index = j + i * mat_stride;
data[i] = src_mat[mat_index];
}
template<typename Real>
__global__
static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha,
MatrixElement<Real>* x,
int num_elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= num_elements)
return;
data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight;
}
template<typename Real>
__global__
static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha,
const Int32Pair* indices,
const Real* x, int s, Real* data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= s)
return;
int data_i = indices[i].first * dim.stride + indices[i].second;
data[data_i] += alpha * x[i];
}
template<typename Real>
__global__
static void _cuda_matrix_add_to_elements(Real alpha,
Real* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < dim.rows) {
int col = elements[row];
if (col >= 0) {
int index = col + row * dim.stride;
mat[index] += alpha;
}
}
}
template<typename Real>
__global__
static void _matrix_lookup(const Real *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
Real *output) {
int ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= indices_size)
return;
int data_ind = indices[ind].first * dim.stride + indices[ind].second;
output[ind] = data[data_ind];
}
template<typename Real>
__global__
static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row
int32_cuda index_mat1 = i + j * mat1_dim.stride;
int32_cuda index_mat2 = i + j * mat2_stride;
int32_cuda index_mask = i + j * mask_stride;
if (i < mat1_dim.cols && j < mat1_dim.rows)
mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0);
}
enum EnumTransformReduce {
SUMAB, SUM, MAX, MIN, LINFNORM, L2NORM, L1NORM, L0NORM, LPNORM
};
template<EnumTransformReduce TransReduceType, typename Real>
struct TransReduceOp {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return Real(0);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return Real(0);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return Real(0);
}
};
template<typename Real>
struct TransReduceOp<SUMAB, Real> {
const Real alpha_;
const Real beta_;
TransReduceOp(const Real& a, const Real& b) :
alpha_(a), beta_(b) {
}
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
if (beta_ == Real(0)) {
return alpha_ * x;
} else {
return alpha_ * x + beta_ * output;
}
}
};
template<typename Real>
struct TransReduceOp<SUM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<MAX, Real> {
__forceinline__
__device__ Real InitValue() const {
return sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<MIN, Real> {
__forceinline__
__device__ Real InitValue() const {
return sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF;
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return min(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<LINFNORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return abs(x);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<L2NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x * x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return sqrt(x);
}
};
template<typename Real>
struct TransReduceOp<L1NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return abs(x);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<L0NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return Real(x == Real(0) ? 0 : 1);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<LPNORM, Real> {
const Real power_;
TransReduceOp(const Real& p) :
power_(p) {
}
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return pow(abs(x), power_);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return pow(x, Real(1) / power_);
}
};
// Vector reduce.
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _vec_transform_reduce(
const Real* v, Real* result, const int dim, const int inc,
const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sdata[CU1DBLOCK];
Real tdata = op.InitValue();
const int tid = threadIdx.x;
const int vec_len = dim * inc;
const int grid_stride = gridDim.x * blockDim.x * inc;
int i = (blockIdx.x * blockDim.x + tid) * inc;
// Grid reduce. Loop over the whole vector v.
for (; i < vec_len; i += grid_stride) {
tdata = op.Reduce(tdata, op.Transform(v[i]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
__syncthreads();
}
// Reduce last warp. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
}
// Output to vector result.
if (tid == 0)
result[blockIdx.x] = op.PostReduce(sdata[0], result[blockIdx.x]);
}
// Reduce a matrix 'mat' to a column vector 'result'
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _transform_reduce_mat_cols(
Real *result, const Real *mat, const MatrixDim d,
const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sdata[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int row_start = i * d.stride;
Real tdata = op.InitValue();
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tdata = op.Reduce(tdata, op.Transform(mat[row_start + j]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
__syncthreads();
}
// Reduce last warp. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1)
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
// Output to vector result.
if (tid == 0) {
result[i] = op.PostReduce(sdata[0], result[i]);
}
}
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _group_transform_reduce(
Real *y, const Real *x, const MatrixDim d, const int src_stride,
const int group_size, const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sreduction[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * src_stride;
const int y_start = i * d.stride;
const int threads_per_group = blockDim.x;
// Reduce n groups per thread block
const int n = blockDim.y;
const int len = group_size * n;
// linear thread id
const int tid = threadIdx.y * threads_per_group + threadIdx.x;
int j = threadIdx.y * group_size + threadIdx.x; // col-id of *x
int group_id = threadIdx.y; // col-id of *y
int group_end = x_start + (group_id + 1) * group_size;
while (group_id < d.cols) {
// reduce to threads_per_group elements per group
int x_idx = x_start + j;
Real treduction = op.Transform(x[x_idx]);
x_idx += threads_per_group;
while (x_idx < group_end) {
treduction = op.Reduce(treduction, op.Transform(x[x_idx]));
x_idx += threads_per_group;
}
sreduction[tid] = treduction;
if (threads_per_group > warpSize) {
__syncthreads();
}
// tree-reduce to 2x warpSize elements per group
# pragma unroll
for (int shift = threads_per_group / 2; shift > warpSize; shift >>= 1) {
if (threadIdx.x < shift) {
sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]);
}
__syncthreads();
}
// Warp-reduce to 1 element per group.
// Threads implicitly synchronized within the warp.
const int warp_reduce_size =
threads_per_group / 2 < warpSize ? threads_per_group / 2 : warpSize;
if (threadIdx.x < warp_reduce_size) {
# pragma unroll
for (int shift = warp_reduce_size; shift > 0; shift >>= 1) {
sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]);
}
}
// Store the result.
if (threadIdx.x == 0) {
y[y_start + group_id] = op.PostReduce(sreduction[tid],
y[y_start + group_id]);
}
j += len;
group_end += len;
group_id += n;
}
}
template<typename Real>
__global__
static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
if (v[i] < floor_val) {
v[i] = floor_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
template<typename Real>
__global__
static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count,
int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
if (v[i] > ceiling_val) {
v[i] = ceiling_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
template<typename Real>
__global__
static void _apply_pow(Real* mat, Real power, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (power == 1.0)
return;
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
if (!(mat[index] >= 0.0))
return;
mat[index] = sqrt(mat[index]);
} else {
mat[index] = pow(mat[index], power);
}
}
}
template<typename Real>
__global__
static void _apply_pow_abs(Real* mat, Real power, bool include_sign,
MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (include_sign == true && mat[index] < 0) {
if (power == 1.0)
mat[index] = -std::abs(mat[index]);
if (power == 2.0) {
mat[index] = -mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = -sqrt(std::abs(mat[index]));
} else {
mat[index] = -pow(std::abs(mat[index]), power);
}
} else {
if (power == 1.0)
mat[index] = std::abs(mat[index]);
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = sqrt(std::abs(mat[index]));
} else if (power < 0.0 && mat[index] == 0.0) {
mat[index] = 0.0;
} else {
mat[index] = pow(std::abs(mat[index]), power);
}
}
}
}
template<typename Real>
__global__
static void _apply_heaviside(Real* mat, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0);
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
mat[index] = max(mat[index], floor_val);
}
}
template<typename Real>
__global__
static void _copy_cols(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[i], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = j * src_stride + reorder[i];
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0.0;
}
}
}
template<typename Real>
__global__
static void _add_cols(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[i], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = j * src_stride + index;
Real val = src[src_index];
dst[dst_index] += val;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[j], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = reorder[j] * src_stride + i;
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real * const *src, MatrixDim dst_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
const Real *pointer = src[j];
if (pointer != NULL) {
dst[dst_index] = pointer[i];
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _copy_to_rows(Real* const * dst, const Real *src,
MatrixDim src_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < src_dim.cols && j < src_dim.rows) {
Real *pointer = dst[j];
if (pointer != NULL) {
pointer[i] = src[j * src_dim.stride + i];
}
}
}
template<typename Real>
__global__
static void _add_rows(Real alpha, Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (reorder[j] >= 0) {
int src_index = reorder[j] * src_stride + i;
dst[dst_index] += alpha * src[src_index];
}
}
}
template<typename Real>
__global__
static void _mul_rows(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (reorder[j] >= 0) {
int src_index = reorder[j] * src_stride + i;
dst[dst_index] *= src[src_index];
}
}
}
template<typename Real>
__global__
static void _add_rows(Real alpha, Real* dst, const Real * const *src,
MatrixDim dst_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (src[j] != NULL) {
dst[dst_index] += alpha * src[j][i];
}
}
}
template<typename Real>
__global__
static void _add_to_rows(Real alpha, Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim src_dim,
int dst_stride) {
int c = blockIdx.x * blockDim.x + threadIdx.x; // col index
int r = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (c < src_dim.cols && r < src_dim.rows) {
int src_index = r * src_dim.stride + c;
if (reorder[r] >= 0) {
int dst_index = reorder[r] * dst_stride + c;
dst[dst_index] += alpha * src[src_index];
}
}
}
template<typename Real>
__global__
static void _add_to_rows(Real alpha, Real* const * dst, const Real *src,
MatrixDim src_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < src_dim.cols && j < src_dim.rows) {
if (dst[j] != NULL) {
dst[j][i] += alpha * src[j * src_dim.stride + i];
}
}
}
template<typename Real>
__global__
static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
mat[index] = min(mat[index], ceiling_val);
}
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows)
data[index] = 1.0 / data[index];
}
// matrix-wise, do data = alpha * data + beta * A * B^T,
// where B is a block matrix.
template<typename Real>
__global__
static void _add_mat_blockmat_trans(Real *data, MatrixDim dim,
const Real *A_data, int A_num_rows,
int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols =
cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride
+ BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data,
int A_num_rows, int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &block_data = B_cu_data[j];
int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset,
B_num_rows = block_data.matrix_dim.rows, B_num_cols =
block_data.matrix_dim.cols, B_row_stride =
block_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(block_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < B_num_cols; k++) {
const Real *this_B_col = B_data + k;
const Real *this_A_row = A_data + i * A_row_stride
+ B_row_start * A_col_stride;
// this_A_row points to the element A[i][B_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < B_num_rows; l++) // l indexes rows of B.
sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + B_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
// For a block matrix B, does B = alpha * C * D + beta * B.
// the (x,y,z) indices are the block index, then the row
// and column indices within the block. Note: transposition of C and D
// is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride),
// so it's invisible to this code. The num-cols and num-rows of C and D
// are only provided to the extent that they are not already determined
// by other quantities.
template<typename Real>
__global__
static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks,
const Real *C_data, int C_num_cols,
int C_row_stride, int C_col_stride,
const Real *D_data, int D_row_stride,
int D_col_stride, Real alpha, Real beta) {
int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B.
int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block
int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block
if (b >= num_blocks)
return;
const CuBlockMatrixData &block_data = B_cu_data[b];
if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols)
return; // we're outside the dimensions of the b'th block.
// B_elem is the element of B we're writing to.
Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data)
+ i * block_data.matrix_dim.stride + j;
Real B_val = *B_elem;
// B_row and B_col are the (row, col) index into the full matrix B.
int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j;
const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data
+ D_col_stride * B_col;
Real sum = 0.0;
for (int k = 0; k < C_num_cols; k++) {
sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride];
}
*B_elem = alpha * sum + beta * B_val;
}
template<typename Real>
__global__
static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim,
const Real *A_data, int A_num_rows,
int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha,
Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols =
cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride
+ BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data,
MatrixDim src_dim, const Int32Pair *indices) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride
+ indices[col].first, src_end_index = row * src_dim.stride
+ indices[col].second;
Real sum = 0.0;
for (int index = src_start_index; index < src_end_index; index++)
sum += src_data[index];
data[dst_index] = sum;
}
template<typename Real>
__global__
static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data,
MatrixDim src_dim, const Int32Pair *indexes) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col;
int src_index_start = indexes[row].first, src_index_end = indexes[row].second;
for (int row_index = src_index_start; row_index < src_index_end; row_index++)
data[dst_index] += src_data[row_index * src_dim.stride + col];
}
template<typename Real>
__global__
static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
// compute the function y[index] = log(1 + exp(x[index]))
if (i < d.cols && j < d.rows) {
Real val = x[src_index], result;
if (val >= 10.0)
result = val; // function approaches y=x as x gets large
else
result = log1p(exp(val));
y[dst_index] = result;
}
}
template<typename Real>
__global__
static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride,
int group_size, Real power) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
Real tmp = 0;
int src_begin_index = i * group_size + j * src_stride;
int src_end_index = src_begin_index + group_size;
for (int src_index = src_begin_index; src_index < src_end_index;
src_index++) {
tmp += pow(std::abs(x[src_index]), power);
}
tmp = pow(tmp, Real(1.0 / power));
if (!isnan(tmp)) {
y[dst_index] = tmp;
} else {
Real max_value = x[src_begin_index], min_value = max_value;
for (int src_index = src_begin_index + 1; src_index < src_end_index;
src_index++) {
if (x[src_index] > max_value)
max_value = x[src_index];
if (x[src_index] < min_value)
min_value = x[src_index];
}
tmp = 0.0;
// let max_value be the largest abs(value)
Real max_abs_value = (max_value > -min_value ? max_value : -min_value);
if (max_abs_value == 0) {
y[dst_index] = 0.0;
} else {
for (int src_index = src_begin_index; src_index < src_end_index;
src_index++) {
Real x_scaled = x[src_index] / max_abs_value;
tmp += pow(std::abs(x_scaled), Real(power));
}
y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value;
}
}
}
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = 1.0 / (1.0 + exp(-x[src_index]));
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d,
int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows)
eout[dst_index] = y[y_index] * (1.0 - y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real exp_2x = exp(2.0 * x[src_index]);
Real res;
if (isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d,
int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows)
eout[dst_index] = (1.0 - y[y_index] * y[y_index]) * e[e_index];
}
/*
This function copies x to y while bounding the elements
away from zero using the scalar function:
y = x if x <= -epsilon or x >= +epsilon
+epsilon if 0 <= x < epsilon
-epsilon if -epsilon < x < 0.
where:
x is the source matrix, of dimension and stride given by d
epsilon > 0
y is the destination matrix, with the num-rows and num-cols
given by d, but stride given by y_stride.
*/
template<typename Real>
__global__
static void _ensure_nonzero(const Real *x, MatrixDim d, Real epsilon,
int y_stride, Real *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int x_index = i + j * d.stride,
y_index = i + j * y_stride;
if (i < d.cols && j < d.rows) {
Real src = x[x_index], dst;
if (src <= -epsilon || src >= epsilon)
dst = src;
else if (src >= 0)
dst = epsilon;
else
dst = -epsilon;
__syncthreads(); // This allows it to do consolidated write below, which
// should improve speed.
y[y_index] = dst;
}
}
template<typename Real>
__global__
static void _parametric_relu(Real* y, const Real* x, MatrixDim d, int src_stride,
const Real* a, const Real* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride,
src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = (x[src_index] > 0.0) ? a[i] * x[src_index] : b[i] * x[src_index];
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_parametric_relu(Real* eout, const Real* e, const Real* y,
MatrixDim d, int e_stride, int y_stride,
const Real* a, const Real* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = (y[y_index] > 0.0 ? a[i] * e[e_index] : b[i] * e[e_index]);
}
template<typename Real>
__global__
static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = (x[src_index] > 0.0 ? 1.0 : 0.0);
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) {
__shared__ Real smem[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * src_stride;
const int y_start = i * d.stride;
const int tid = threadIdx.x;
// find max element of the row
// reduce to CU1DBLOCK elements per row.
Real tmax = sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tmax = fmax(tmax, x[x_start + j]);
}
smem[tid] = tmax;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
}
// broadcast max to all threads
__syncthreads();
Real max = smem[0];
// sum_j(exp(x(i,j)-max))
// reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tsum += exp(x[x_start + j] - max);
}
smem[tid] = tsum;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] += smem[tid + shift];
}
}
// broadcast sum to all threads
__syncthreads();
Real inv_sum = Real(1) / smem[0];
// normalize the row
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
y[y_start + j] = exp(x[x_start + j] - max) * inv_sum;
}
}
// The output y_i = scale * x_i,
// and we want to RMS value of the y_i to equal target_rms,
// so y^t y = D * target_rms^2 (if y is one row of the input).
// we need to have scale = 1.0 / sqrt(x^t x / (D * target_rms^2)).
// there is also flooring involved, to avoid division-by-zero
// problems. It's important for the backprop, that the floor's
// square root is exactly representable as float.
// If add_log_stddev is true, log(max(epsi, sqrt(x^t x / D)))
// is an extra dimension of the output.
//
// 1D grid is used. Each 256-thread block works on 1 row of the data matrix.
// The block is also of 1D. Strided memory access is used if the length of the
// row is longer than 256.
template<typename Real>
__global__
static void _normalize_per_row(Real *y, int y_stride, const Real *x,
MatrixDim x_d, Real target_rms,
bool add_log_stddev) {
const int i = blockIdx.x;
const int tid = threadIdx.x;
const Real* x_row = x + i * x_d.stride;
__shared__ Real ssum[CU1DBLOCK];
// Reduce x_j^2 to CU1DBLOCK elements per row
Real tsum = Real(0);
for (int j = tid; j < x_d.cols; j += CU1DBLOCK) {
tsum += x_row[j] * x_row[j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Reduce last warp to 1 element per row.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66
if (tid == 0) {
ssum[0] = sqrt(
fmax(ssum[0] / (target_rms * target_rms * x_d.cols), kSquaredNormFloor));
}
// Broadcast floored stddev to all threads.
__syncthreads();
const Real stddev_div_target_rms = ssum[0];
const Real scale = Real(1) / stddev_div_target_rms;
// Store normalized input to output
Real* y_row = y + i * y_stride;
for (int j = tid; j < x_d.cols; j += CU1DBLOCK) {
y_row[j] = x_row[j] * scale;
}
if (tid == 0 && add_log_stddev) {
y_row[x_d.cols] = log(stddev_div_target_rms * target_rms);
}
}
template<typename Real>
__global__
static void _diff_normalize_per_row(Real *id, int id_stride, const Real *iv,
MatrixDim iv_dim, const Real* od,
int od_stride, Real target_rms,
bool add_log_stddev) {
const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66
const Real kInvNormFloor = 8589934592.0;
const int tid = threadIdx.x;
const int i = blockIdx.x;
const Real* iv_row = iv + i * iv_dim.stride;
const Real* od_row = od + i * od_stride;
// reduce to CU1DBLOCK elements per row
Real dot_products = Real(0);
Real in_norm = Real(0);
for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) {
const Real iv_ij = iv_row[j];
dot_products += iv_ij * od_row[j];
in_norm += iv_ij * iv_ij;
}
__shared__ Real sprod[CU1DBLOCK];
__shared__ Real snorm[CU1DBLOCK];
sprod[tid] = dot_products;
snorm[tid] = in_norm;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sprod[tid] += sprod[tid + shift];
snorm[tid] += snorm[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
sprod[tid] += sprod[tid + shift];
snorm[tid] += snorm[tid + shift];
}
}
// broadcast the sum results
__syncthreads();
dot_products = sprod[0];
in_norm = snorm[0];
Real log_stddev_deriv;
if (add_log_stddev) {
log_stddev_deriv = Real(1) / max(in_norm, iv_dim.cols * kSquaredNormFloor)
* od_row[iv_dim.cols];
}
const Real inv_d_scaled = Real(1) / (iv_dim.cols * target_rms * target_rms);
in_norm = Real(1) / sqrt(max(in_norm * inv_d_scaled, kSquaredNormFloor));
const Real f = in_norm == kInvNormFloor ? Real(0) : in_norm;
dot_products *= f * f * f * inv_d_scaled;
for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) {
const Real iv_ij = iv_row[j];
Real id_ij = id[i * id_stride + j];
if (add_log_stddev) {
id_ij += log_stddev_deriv * iv_ij;
}
if (id != od) {
id_ij += in_norm * od_row[j];
} else {
id_ij *= in_norm;
}
id_ij -= dot_products * iv_ij;
id[i * id_stride + j] = id_ij;
}
}
// Per-row log-softmax operation on 'x', with writing to 'y'.
// note, x and y may point to the same memory. This is equivalent to setting
// matrix y to matrix x and then, for each row of y, subtracting the offset that
// will make exp(y.row[j]) sum to 1 for each row j.
//
// It expects to be called with CU1DBLOCK threads.
// The number of blocks [i.e. the gridDim] equals to y_dim.rows,
// so one block of threads processes each row. x and y are
// expected to have the same dimension, but possibly different row strides.
template<typename Real>
__global__
static void _log_softmax_reduce(Real* y, const Real* x, MatrixDim y_dim,
int x_stride) {
__shared__ Real smem[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * x_stride;
const int y_start = i * y_dim.stride;
const int tid = threadIdx.x;
// find max element of the row
// reduce to CU1DBLOCK elements per row.
Real tmax = -1e20;
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
tmax = fmax(tmax, x[x_start + j]);
}
smem[tid] = tmax;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
}
// broadcast max to all threads
__syncthreads();
Real max = smem[0];
// sum_j(exp(x(i,j)-max))
// reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
tsum += exp(x[x_start + j] - max);
}
smem[tid] = tsum;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] += smem[tid + shift];
}
}
// broadcast sum to all threads
__syncthreads();
Real log_sum = log(smem[0]);
// normalize the row
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
y[y_start + j] = x[x_start + j] - max - log_sum;
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if (src_row < 0)
src_row = 0;
if (src_row >= d_in.rows)
src_row = d_in.rows - 1;
y[index] = x[src_col + src_row * d_in.stride];
}
}
template<typename Real>
__global__
static void _take_mean(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index1 = i + j * d_in.stride;
int32_cuda index2 = j + i * d_in.stride;
if (i <= j && j < d_in.rows) {
int32_cuda index_sp = (j * (j + 1) / 2) + i;
y[index_sp] = 0.5 * (x[index1] + x[index2]);
}
}
template<typename Real>
__global__
static void _take_lower(const Real* x, Real* y, MatrixDim d_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j > i || i >= d_in.rows)
return;
int index = i * d_in.stride + j;
Real val = x[index];
int index_sp = (i * (i + 1) / 2) + j;
y[index_sp] = val;
}
template<typename Real>
__global__
static void _take_upper(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j < i || j >= d_in.rows)
return;
int32_cuda index = i * d_in.stride + j;
int32_cuda index_sp = (j * (j + 1) / 2) + i;
y[index_sp] = x[index];
}
template<typename Real>
__global__
static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
y[i] = x[index];
}
}
template<typename Real>
__global__
static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; //
if (i < dim.cols && j < dim.rows) {
int dst_index = i + j * dim.stride, src_index;
if (j <= i) { // no transpose
src_index = (i * (i + 1) / 2) + j;
} else { // transpose.
src_index = (j * (j + 1) / 2) + i;
}
y[dst_index] = x[src_index];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_col = copy_from[i];
if (src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j * d_in.stride];
} else {
y[index] = 1.0 / 0.0;
}
}
}
template<typename Real>
__global__
static void _one(Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
x[i] = 1.0;
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row * d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d,
int stride_grad) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride, grad_index = i + j * stride_grad;
if (i < d.cols && j < d.rows) {
if (wei[index] == 0.0)
return; //skip L1 if zero weight!
Real l1_signed = l1;
if (wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
//simulate update
Real after = wei[index] - lr * grad[grad_index] - l1_signed;
if ((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[grad_index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id,
MatrixDim d) {
const int32_cuda i = blockIdx.x;
const int32_cuda base = i * d.stride;
const int32_cuda tid = threadIdx.x;
__shared__ Real smax[CU1DBLOCK];
__shared__ int32_cuda sidx[CU1DBLOCK];
Real tmax = -1e20;
int32_cuda tidx = -1;
// Loop over blocks for coalesced memory access.
for (int32_cuda j = tid; j < d.cols; j += CU1DBLOCK) {
const Real val = mat[base + j];
if (val > tmax) {
tmax = val;
tidx = j;
}
}
smax[tid] = tmax;
sidx[tid] = tidx;
// Parallel reduce
#pragma unroll
for (int32_cuda num_working_threads = CU1DBLOCK / 2;
num_working_threads >= warpSize; num_working_threads >>= 1) {
__syncthreads();
if (tid < num_working_threads) {
if (smax[tid + num_working_threads] > smax[tid]) {
smax[tid] = smax[tid + num_working_threads];
sidx[tid] = sidx[tid + num_working_threads];
}
}
}
// Warp reduce without __syncthreads()
// (note.: synchronizes implicitly within a warp at the multiprocessor)
if (tid < warpSize / 2) {
#pragma unroll
for (int32_cuda num_working_threads = warpSize / 2; num_working_threads > 0;
num_working_threads >>= 1) {
if (smax[tid + num_working_threads] > smax[tid]) {
smax[tid] = smax[tid + num_working_threads];
sidx[tid] = sidx[tid + num_working_threads];
}
}
}
if (tid == 0) {
if (vec_val) {
vec_val[i] = smax[0];
}
vec_id[i] = sidx[0];
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out,
Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 0)
return;
if (j < d.rows) {
int32_cuda index = vec_tgt[j] + j * d.stride;
Real value = mat_net_out[index];
if (value < 1e-20)
value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _diff_softmax(Real* x, const MatrixDim dim, const Real* value,
const int value_stride, const Real* diff,
const int diff_stride) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int value_start = i * value_stride;
const int diff_start = i * diff_stride;
const int x_start = i * dim.stride;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < dim.cols; j += CU1DBLOCK) {
tsum += value[value_start + j] * diff[diff_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// Broadcast result to all threads
__syncthreads();
const Real pe = ssum[0];
// Apply element-wise x = value * (diff - pe)
for (int j = tid; j < dim.cols; j += CU1DBLOCK) {
x[x_start + j] = value[value_start + j] * (diff[diff_start + j] - pe);
}
}
// Differentiate backward through the log softmax function.
// "out_value" is the log softmax output. Does, for each row i,
// in_deriv(i) = out_deriv(i) - sum(out_deriv(i)) .* exp(out_value(i))
// ???(i) is row-vector.
// CUDA thread layout: 1 thread block (CU1DBLOCK == 256 threads) per matrix-row.
template<typename Real>
__global__
static void _diff_log_softmax(const MatrixDim in_deriv_dim,
const Real* out_value, const int out_value_stride,
const Real* out_deriv, const int out_deriv_stride,
Real* in_deriv) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int out_value_start = i * out_value_stride;
const int out_deriv_start = i * out_deriv_stride;
const int in_deriv_start = i * in_deriv_dim.stride;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) {
tsum += out_deriv[out_deriv_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// Broadcast result to all threads
__syncthreads();
const Real sum_e = ssum[0];
// Apply element-wise x = out_deriv - exp(value) * sum_e
for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) {
in_deriv[in_deriv_start + j] = out_deriv[out_deriv_start + j]
- exp(out_value[out_value_start + j]) * sum_e;
}
}
/**
this function computes the core part of the LSTM nonlinearity.
@param [in] in A matrix, of dimension num_rows by 5*cell_dim
(i.e. its num-cols must be a multiple of 5).
The column-space is interpreted as 5
consecutive blocks, each of dimension cell_dim,
which we name:
(i_part, f_part, c_part, o_part, c_{t-1}).
If 'have_dropout_mask' is nonzero, each row of
'in' will have 3 extra elements, interpreted
as dropout masks/scales for i_t, f_t and o_t.
@param [in] params A matrix, of dimension 3 by cell_dim,
with rows containing the 3 diagonal parameter matrices
used in LSTMs, namely
w_{ic}, w_{fc} and w_{oc}.
@param [out] out A matrix, of dimension num_rows by 2*cell_dim.
The quantities c_t and m_t respectively are put there
(in two blocks of column-dimension cell_dim),
according to the following equations:
i_t = Sigmoid(i_part + w_{ic}*c_{t-1})
f_t = Sigmoid(f_part + w_{fc}*c_{t-1})
c_t = f_t*c_{t-1} + i_t * Tanh(c_part)
o_t = Sigmoid(o_part + w_{oc}*c_t)
m_t = o_t * Tanh(c_t)
We use 1D thread block with CU1DBLOCK threads.
It works best when cell_dim is a multiple of CU1DBLOCK.
We use 1d Grid. Each block is working on one row of the in and out matrices.
*/
template<typename Real>
__global__
static void _lstm_nonlinearity(const Real* in, const int in_stride,
const Real* params, const int params_stride,
const int out_stride, const int cell_dim,
const int have_dropout_mask, const int num_rows,
Real* out) {
const int tid = threadIdx.x;
const int i = blockIdx.x;
const Real* i_part = in + i * in_stride;
const Real* f_part = in + i * in_stride + cell_dim;
const Real* c_part = in + i * in_stride + cell_dim * 2;
const Real* o_part = in + i * in_stride + cell_dim * 3;
const Real* c_tm1 = in + i * in_stride + cell_dim * 4;
const Real* w_ic = params;
const Real* w_fc = params + params_stride;
const Real* w_oc = params + params_stride * 2;
Real* c_t = out + i * out_stride;
Real* m_t = out + i * out_stride + cell_dim;
Real i_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5] : 1),
f_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 1] : 1),
o_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 2] : 1);
for (int j = tid; j < cell_dim; j += CU1DBLOCK) {
Real c_tm1_j = c_tm1[j];
Real i_t_j = Real(1) / (Real(1) + exp(-i_part[j] - w_ic[j] * c_tm1_j));
Real f_t_j = Real(1) / (Real(1) + exp(-f_part[j] - w_fc[j] * c_tm1_j));
Real c_t_j = f_t_j * f_scale * c_tm1_j + i_t_j * i_scale * tanh(c_part[j]);
Real o_t_j = Real(1) / (Real(1) + exp(-o_part[j] - w_oc[j] * c_t_j));
c_t[j] = c_t_j;
m_t[j] = o_t_j * o_scale * tanh(c_t_j);
}
}
/**
This function does the 'backward' pass corresponding to the function
ComputeLstmNonlinearity. It's a little more complicated than you might
expect because of the 'self-repair' mechanism that we use to prevent the
sigmoid and tanh nonlinearities oversaturating, and because of the
average-activation and average-derivative stats that we store for these
nonlinearites (these stats are used both to control the self-repair
mechanism, and for diagnostic purposes).
Because the forward pass computes various intermediate values that are not
output, this function actually has to do the same computations as the
forward pass before it actually does the backprop.
In the following description, `C` is for `cell_dim`, `N` is for `num_rows`.
@param [in] input The same as in ComputeLstmNonlinearity().
A matrix, of dimension N by 5C (i.e. its num-cols must be
a multiple of 5). The column-space is interpreted as 5
consecutive blocks, each of dimension C, which we name:
(i_part, f_part, c_part, o_part, c_{t-1}).
If 'have_dropout_mask' is nonzero, each row of
'in' will have 3 extra elements, interpreted
as dropout masks/scales for i_t, f_t and o_t.
@param [in] params The same as in ComputeLstmNonlinearity().
A matrix, of dimension 3 by C, with rows containing the
three diagonal parameter matrices used in LSTMs, namely
w_{ic}, w_{fc} and w_{oc}.
@param [in] output_deriv
A matrix, of dimension N by 2C, containing the derivative
of the objective function we're backpropagating,
w.r.t. the quantities c_t and m_t (in two blocks of
column-dimension C).
@param [in] deriv_sum_in
This is used in the self-repair code to identify
oversaturated nonlinearities.
It is a matrix, of dimension 5 by C, corresponding to
the totals of the derivatives of the 5 sigmoid and tanh
nonlinearities, in they order they appear in the equations
in the documentation of ComputeLstmNonlinearity()
respectively,
they appear in the equations for (i_t, f_t, c_t, o_t, m_t).
This will be divided by 'count_in' to get the average
derivative value so far, for each of the nonlinearities.
@param [in] self_repair_config
A vector of dimension 10, containing the configuration of
the self-repair to be used for the 5 nonlinearities.
The first 5 elements are the self_repair_lower_threshold
values (typically 0.05 for sigmoid and 0.2 for tanh),
and the next 5 elements are the corresponding
self-repair-scales (typically 10^-5).
@param [in] count_in The data-count that corresponds to the stats in
'deriv_sum_in' at entry to the function.
This function should tolerate the count being zero
(in that case, it is free to do the self-repair or not,
as this should only happen on the 1st minibatch of each
training job).
@param [out] input_deriv
May be NULL; if not, this function writes, to this
location, the backpropagated derivative of the objective
function w.r.t. the 'input' matrix. This matrix should
have the same dimension as 'input' i.e. N by 5C. In
addition to the regular backpropagated derivative, the
output will include small values relating to 'self-repair'.
@param [out] params_deriv
May be NULL; if not, this is where this function *writes*
[not adds] the backpropagated derivative of the objective
function w.r.t. 'params'; it should have the same dimension
as 'params' (3 by C). (This matrix will then be processed
by the natural gradient code and added to the appropriate
copy of the parameter matrix, outside this function).
@param [out] value_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C. This function *adds* to this location
the total value of each of the sigmoid/tanh nonlinearities
that it computes (this is for diagnostic purposes).
@param [out] deriv_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C; this function *adds* to this location the
total of the derivative of each of the sigmoid/tanh
nonlinearities that it computes (this is for diagnostic
purposes and to control the self-repair). This function
should tolerate the case when 'deriv_sum_out' points to the
same data as 'deriv_sum_in'.
@param [out] self_repair_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C; this function *writes* to this location
the sum of the number of times the self-repair code was
activated (integer values 0 <= k <= N). This will be
processed outside this function into self-repair stats for
diagnostics.
// Use 2D block (8x32 threads) as we need to compute column sum.
// Use 1D grid to cover the data matrix `cell_dim`.
*/
template<typename Real>
__global__
static void _diff_lstm_nonlinearity(const int cell_dim, const int have_dropout_mask,
const int num_rows,
const Real* input, const int input_stride,
const Real* params, const int params_stride,
const Real* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const Real* self_repair_config,
double count, Real* input_deriv,
const int input_deriv_stride,
Real* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
Real* self_repair_sum_out,
const int self_repair_sum_out_stride) {
__shared__ Real smem[CU1DBLOCK];
const int j = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int grid_stride = gridDim.y * blockDim.y;
const int i0 = blockIdx.y * blockDim.y + threadIdx.y;
Real w_ic_deriv_sum = 0;
Real w_fc_deriv_sum = 0;
Real w_oc_deriv_sum = 0;
Real i_t_value_sum = 0, i_t_deriv_sum = 0;
Real f_t_value_sum = 0, f_t_deriv_sum = 0;
Real c_part_value_sum = 0, c_part_deriv_sum = 0;
Real o_t_value_sum = 0, o_t_deriv_sum = 0;
Real c_t_value_sum = 0, c_t_deriv_sum = 0;
bool update_sr[5];
if (j < cell_dim) {
const Real w_ic = params[j];
const Real w_fc = params[params_stride + j];
const Real w_oc = params[2 * params_stride + j];
const Real* sr_config = self_repair_config;
# pragma unroll
for (int i = 0; i < 5; i++) {
update_sr[i] =
deriv_sum_in[i * deriv_sum_in_stride + j] < sr_config[i] * count;
}
const Real i_t_self_repair = (update_sr[0] ? sr_config[5] : 0);
const Real f_t_self_repair = (update_sr[1] ? sr_config[6] : 0);
const Real c_part_self_repair = (update_sr[2] ? sr_config[7] : 0);
const Real o_t_self_repair = (update_sr[3] ? sr_config[8] : 0);
const Real c_t_self_repair = (update_sr[4] ? sr_config[9] : 0);
for (int i = i0; i < num_rows; i += grid_stride) {
const Real i_part = input[i * input_stride + j];
const Real f_part = input[i * input_stride + j + cell_dim];
const Real c_part = input[i * input_stride + j + 2 * cell_dim];
const Real o_part = input[i * input_stride + j + 3 * cell_dim];
const Real c_prev = input[i * input_stride + j + 4 * cell_dim];
const Real i_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5] : 1),
f_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5 + 1] :1),
o_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5 + 2] :1);
const Real i_t = Real(1) / (1 + exp(-i_part - w_ic * c_prev));
const Real f_t = Real(1) / (1 + exp(-f_part - w_fc * c_prev));
const Real tanh_c_part = tanh(c_part);
const Real c_t = f_t * f_scale * c_prev + i_t * i_scale * tanh_c_part;
const Real o_t = 1 / (1 + exp(-o_part - w_oc * c_t));
const Real tanh_c_t = tanh(c_t);
const Real i_t_deriv = i_t * (1 - i_t);
const Real f_t_deriv = f_t * (1 - f_t);
const Real c_part_deriv = 1 - tanh_c_part * tanh_c_part;
const Real o_t_deriv = o_t * (1 - o_t);
const Real c_t_deriv = 1 - tanh_c_t * tanh_c_t;
if (params_deriv) {
i_t_value_sum += i_t;
f_t_value_sum += f_t;
c_part_value_sum += tanh_c_part;
o_t_value_sum += o_t;
c_t_value_sum += tanh_c_t;
i_t_deriv_sum += i_t_deriv;
f_t_deriv_sum += f_t_deriv;
c_part_deriv_sum += c_part_deriv;
o_t_deriv_sum += o_t_deriv;
c_t_deriv_sum += c_t_deriv;
}
const Real dc_t_out = output_deriv[i * output_deriv_stride + j];
const Real dm_t = output_deriv[i * output_deriv_stride + j + cell_dim];
const Real dtanh_c_t = o_t * o_scale * dm_t;
const Real do_t = o_scale * tanh_c_t * dm_t;
const Real do_t_input = (o_t_deriv * do_t
- (2 * o_t - 1) * o_t_self_repair);
const Real dc_t = (c_t_deriv * dtanh_c_t + dc_t_out + do_t_input * w_oc)
- tanh_c_t * c_t_self_repair;
const Real dtanh_c_part = i_t * i_scale * dc_t;
const Real df_t = dc_t * f_scale * c_prev;
const Real df_t_input = (df_t * f_t_deriv
- (2 * f_t - 1) * f_t_self_repair);
const Real di_t = dc_t * i_scale * tanh_c_part;
const Real di_t_input = (di_t * i_t_deriv
- (2 * i_t - 1) * i_t_self_repair);
if (params_deriv) {
w_ic_deriv_sum += c_prev * di_t_input;
w_fc_deriv_sum += c_prev * df_t_input;
w_oc_deriv_sum += c_t * do_t_input;
}
const Real dc_prev = w_ic * di_t_input + w_fc * df_t_input + f_t * f_scale * dc_t;
const Real do_part = do_t_input;
const Real dc_part = (c_part_deriv * dtanh_c_part
- tanh_c_part * c_part_self_repair);
const Real df_part = df_t_input;
const Real di_part = di_t_input;
if (input_deriv) {
input_deriv[i * input_deriv_stride + j] = di_part;
input_deriv[i * input_deriv_stride + j + cell_dim] = df_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 2] = dc_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 3] = do_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 4] = dc_prev;
}
}
}
if (params_deriv) {
// compute params_deriv
smem[tid] = w_ic_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[j] = smem[tid];
}
__syncthreads();
smem[tid] = w_fc_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[params_deriv_stride + j] = smem[tid];
}
__syncthreads();
smem[tid] = w_oc_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[2 * params_deriv_stride + j] = smem[tid];
}
// compute value_sum_out
__syncthreads();
smem[tid] = i_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[j] += smem[tid];
}
__syncthreads();
smem[tid] = f_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_part_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[2 * value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = o_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[3 * value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[4 * value_sum_out_stride + j] += smem[tid];
}
// need to update self_repair_sum_out before deriv_sum_out, because
// deriv_sum_out and deriv_sum_in might point to the same memory.
if (i0 < 5 && j < cell_dim) {
self_repair_sum_out[i0 * self_repair_sum_out_stride + j] =
update_sr[i0] ? num_rows : 0;
}
// compute derive_sum_out
__syncthreads();
smem[tid] = i_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[j] += smem[tid];
}
__syncthreads();
smem[tid] = f_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_part_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[2 * deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = o_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[3 * deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_t_deriv_sum;
__syncthreads();
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[4 * deriv_sum_out_stride + j] += smem[tid];
}
}
}
__global__
static void _cuda_compress_uint8_sign(const float *src, MatrixDim dim,
unsigned char *dest, int dest_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dest_index = i + j * dest_stride,
src_index = i + j * dim.stride;
if (i < dim.cols && j < dim.rows) {
float f = src[src_index];
dest[dest_index] = (f > 0.0 ? (unsigned char)1 : (unsigned char)0);
}
}
// The following inline templated functions are a workaround for the
// fact that (I believe) std::numeric_limits is not available in CUDA;
// they allow us to access the minimum and maximum elements of certain
// types from templated code.
template <typename I> __device__ static inline int minimum_integer_value();
template <typename I> __device__ static inline int maximum_integer_value();
template<> __device__ int maximum_integer_value<int8_t>() { return 127; }
template<> __device__ int minimum_integer_value<int8_t>() { return -128; }
template<> __device__ int maximum_integer_value<uint8_t>() { return 255; }
template<> __device__ int minimum_integer_value<uint8_t>() { return 0; }
template<> __device__ int maximum_integer_value<int16_t>() { return 32767; }
template<> __device__ int minimum_integer_value<int16_t>() { return -32768; }
template<> __device__ int maximum_integer_value<uint16_t>() { return 65535; }
template<> __device__ int minimum_integer_value<uint16_t>() { return 0; }
template <typename I>
__global__
static void _cuda_compress_bounds_check(const float *src, MatrixDim dim,
I *dest, int dest_stride, float inv_scale) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dest_index = i + j * dest_stride,
src_index = i + j * dim.stride;
const int min_value = minimum_integer_value<I>(),
max_value = maximum_integer_value<I>();
int compressed_value;
int ok = (i < dim.cols && j < dim.rows);
if (ok) {
float f = src[src_index];
// note: I'm not sure what __float2int_rn does if input is outside of
// integer range, but it doesn't matter much as in the situations where this
// type of compression would make sense, the input should be well inside the
// range of 'int', and if it fails, we've probably already catastrophically
// diverged.
int i = __float2int_rn(f * inv_scale);
if (i < min_value) compressed_value = min_value;
else if (i > max_value) compressed_value = max_value;
else compressed_value = i;
}
__syncthreads();
if (ok) {
dest[dest_index] = compressed_value;
}
}
template <typename I>
__global__
static void _cuda_compress_no_bounds_check(const float *src, MatrixDim dim,
I *dest, int dest_stride,
float inv_scale) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dest_index = i + j * dest_stride,
src_index = i + j * dim.stride;
if (i < dim.cols && j < dim.rows) {
float f = src[src_index];
int i = __float2int_rn(f * inv_scale);
I s = i;
dest[dest_index] = s;
}
}
template <typename I>
__global__
static void _cuda_uncompress(float *dest, MatrixDim dim,
const I *src, int src_stride,
float scale) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int src_index = i + j * src_stride,
dest_index = i + j * dim.stride;
if (i < dim.cols && j < dim.rows) {
I s = src[src_index];
dest[dest_index] = float(s * scale);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cuda_int32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value,
MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cuda_int32_add(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value,
MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cuda_int32_sequence(dim3 Gr, dim3 Bl, int32_cuda* data, int length,
int32_cuda base) {
hipLaunchKernelGGL(( _sequence), dim3(Gr), dim3(Bl), 0, 0, data, length, base);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {
hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA);}
void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {
hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA);}
void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat,
MatrixDim mat_dim, const float *vec,
const float *mat2, int mat2_row_stride,
int mat2_col_stride, float beta) {
hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_apply_exp_limited(dim3 Gr, dim3 Bl, float* mat, MatrixDim d,
float lower_limit, float upper_limit) {
hipLaunchKernelGGL(( _apply_exp_limited), dim3(Gr),dim3(Bl), 0, 0, mat, d, lower_limit, upper_limit);
}
void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d);
}
void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power,
bool include_sign, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d);
}
void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst,
const float* const * src, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim);
}
void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const * dst,
const float* src, MatrixDim src_dim) {
hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim);
}
void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_mul_rows(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _mul_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst,
const float* const * src, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim);
}
void cudaF_add_to_rows(dim3 Gr, dim3 Bl, float alpha,
float* dst, const float* src, const MatrixIndexT_cuda* reorder,
MatrixDim src_dim, int dst_stride) {
hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, src_dim, dst_stride);
}
void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const * dst,
const float* src, MatrixDim src_dim) {
hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val,
MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d);
}
void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val,
MatrixDim d) {
hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d);
}
void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A,
MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A,
MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d,
int src_stride) {
hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaF_min(dim3 Gr, dim3 Bl, float* mat, const float* other,
MatrixDim mat_d, int other_stride) {
hipLaunchKernelGGL(( _min), dim3(Gr),dim3(Bl), 0, 0, mat,other,mat_d,other_stride);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale,
MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale,
MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x,
MatrixDim d, int src_stride, int group_size) {
hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size);
}
void cudaF_diff_group_pnorm(dim3 Gr, dim3 Bl, float *id, const float *iv,
const float *ov, const float* od, MatrixDim id_dim,
int iv_stride, int ov_stride, int od_stride,
int group_size, float power) {
hipLaunchKernelGGL(( _diff_group_pnorm), dim3(Gr), dim3(Bl), 0, 0, id, iv, ov, od, id_dim, iv_stride, ov_stride,
od_stride, group_size, power);
}
void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1,
const float *x2, MatrixDim y_dim, int x1_stride,
int x2_stride, int group_size) {
hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, y_dim, x1_stride, x2_stride,
group_size);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div,
MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst,
MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
} else {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
}
}
void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src,
int32_cuda num_row_blocks, int32_cuda num_col_blocks,
float* dst, MatrixDim d, int src_stride,
int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks,
dst, d, src_stride);
} else {
hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst,
d, src_stride);
}
}
void cudaF_add_mat_repeated(dim3 Gr, dim3 Bl, float alpha, const float* src,
MatrixDim src_dim, float *dst, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _add_mat_repeated), dim3(Gr),dim3(Bl), 0, 0, alpha, src, src_dim, dst, dst_dim);
}
void cudaF_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B,
const float *C, float *dst, MatrixDim d,
int stride_a, int stride_b, int stride_c) {
hipLaunchKernelGGL(( _set_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d, stride_a, stride_b, stride_c);
}
void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T,
MatrixDim tdim, float *S, MatrixDim sdim) {
hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col,
float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row,
float beta, float* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat,
MatrixDim mat_dim, const float *mat2,
int mat2_row_stride, int mat2_col_stride,
const float *vec, float beta) {
hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data,
const float *srcA_data, const float *srcB_data,
MatrixDim dim, int srcA_stride, int srcB_stride,
float alpha, float beta) {
hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim,
srcA_stride, srcB_stride, alpha, beta);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask,
MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_max_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<MAX,float>());
}
void cudaF_min_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<MIN,float>());
}
void cudaF_sum_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<SUM,float>());
}
void cudaF_add_col_sum_mat(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d, const float alpha,
const float beta) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr), dim3(Bl), 0, 0, result, mat, d,
TransReduceOp<SUMAB, float>(alpha, beta));
}
void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig,
float changed) {
hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed);
}
void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a,
float param_1, float param_2, float param_3,
int* flag, int dim) {
hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim);
}
void cublas_copy_kaldi_fd(int Gr, int Bl, int n, const float* x, int incx,
double* y, int incy) {
hipLaunchKernelGGL(( _cublas_copy_kaldi), dim3(Gr),dim3(Bl), 0, 0, n, x, incx, y, incy);
}
void cublas_copy_kaldi_df(int Gr, int Bl, int n, const double* x, int incx,
float* y, int incy) {
hipLaunchKernelGGL(( _cublas_copy_kaldi), dim3(Gr),dim3(Bl), 0, 0, n, x, incx, y, incy);
}
void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) {
hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim);
}
void cudaF_vec_min(int Gr, int Bl, const float* v, float* value, int dim,
int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<MIN, float>());
}
void cudaF_vec_max(int Gr, int Bl, const float* v, float* value, int dim,
int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<MAX, float>());
}
void cudaF_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const float* A, const float* B,
MatrixDim dA, int B_stride, float* value) {
hipLaunchKernelGGL(( _trace_mat_mat_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value);
}
void cudaF_trace_mat_mat(dim3 Gr, dim3 Bl, const float* A, const float* B,
MatrixDim dA, int B_stride, float* value) {
hipLaunchKernelGGL(( _trace_mat_mat<32>) , dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value);
}
void cudaF_add_diag_mat_mat_MNT(int Gr, int Bl, const float alpha,
const float* M, const MatrixDim dim_M,
const float* N, const int stride_N,
const float beta, float* v) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MNT), dim3(Gr),dim3(Bl), 0, 0, alpha,M,dim_M,N,stride_N,beta,v);
}
void cudaF_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const float alpha,
const float* M, const int stride_M,
const float* N, const MatrixDim dim_N,
const float beta, float* v,
const int stride_v) {
if (Bl.x == 16) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<16>) , dim3(Gr), dim3(Bl), 0, 0, alpha, M, stride_M, N, dim_N, beta,
v, stride_v);
} else if (Bl.x == 32) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<32>) , dim3(Gr), dim3(Bl), 0, 0, alpha, M, stride_M, N, dim_N, beta,
v, stride_v);
}
}
void cudaF_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const float alpha,
const float* M, const int stride_M,
const float* N, const MatrixDim dim_N,
const float beta, float* v) {
if (Bl.x == 16) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x,
const float* y, float beta, int dim) {
hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim);
}
void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<SUM, float>());
}
void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
float alpha, MatrixElement<float>* x,
int num_elements) {
hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements);
}
void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim,
float alpha, const Int32Pair* indices,
const float* x, int s, float* data) {
hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data);
}
void cudaF_matrix_add_to_elements(dim3 Gr, dim3 Bl, float alpha,
float* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
hipLaunchKernelGGL(( _cuda_matrix_add_to_elements), dim3(Gr), dim3(Bl), 0, 0, alpha, mat, dim, elements);
}
void cudaF_vector_copy_elements(dim3 Gr, dim3 Bl, float *data, int dim,
const float *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
hipLaunchKernelGGL(( _cuda_vector_copy_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, src_mat, mat_stride,
transpose, elements);
}
void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s,
const float* z, MatrixDim d, float* z2, MatrixDim d2,
float* t) {
hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t);
}
void cudaD_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<double>* x, int s,
const double* z, MatrixDim d, double* z2,
MatrixDim d2, double* t) {
hipLaunchKernelGGL(( _cuda_comp_obj_deriv), dim3(Gr),dim3(Bl), 0, 0, x,s,z,d,z2,d2,t);
}
void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst,
const float *src, int dim) {
hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim);
}
void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val,
float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim);
}
void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val,
float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v, ceiling_val,count,dim);
}
void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) {
hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim);
}
void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) {
hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d,
const float *Adata, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, float alpha, float beta,
int B_trans) {
if (B_trans) {
hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
} else {
hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
}
}
void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data,
int num_blocks, const float *C_data,
int C_num_cols, int C_row_stride, int C_col_stride,
const float *D_data, int D_row_stride,
int D_col_stride, float alpha, float beta) {
hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha,
beta);
}
/*
* cu::
*/
void cudaF_soft_hinge(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d,
int src_stride, int group_size, float power) {
hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power);
}
void cudaF_group_spec_pnorm(dim3 Gr, dim3 Bl, float* y, const float* x,
MatrixDim d, int src_stride, int group_size,
float power) {
if (power == float(0)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L0NORM, float>());
} else if (power == float(1)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L1NORM, float>());
} else if (power == float(2)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L2NORM, float>());
} else if (power == std::numeric_limits<float>::infinity()) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<LINFNORM, float>());
} else {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<LPNORM, float>(power));
}
}
void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d,
int src_stride, int group_size) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<MAX, float>());
}
void cudaF_sigmoid(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_diff_sigmoid(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride,
int y_stride) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaF_tanh(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_diff_tanh(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride, int y_stride) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaF_ensure_nonzero(dim3 Gr, dim3 Bl, const float *x, MatrixDim d,
float epsilon, int y_stride, float *y) {
hipLaunchKernelGGL(( _ensure_nonzero), dim3(Gr),dim3(Bl), 0, 0, x, d, epsilon, y_stride, y);
}
void cudaF_parametric_relu(dim3 Gr, dim3 Bl, float* y, const float* x,
MatrixDim d, int src_stride,
const float* a, const float* b) {
hipLaunchKernelGGL(( _parametric_relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, a, b);
}
void cudaF_diff_parametric_relu(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride,
int y_stride, const float* a, const float* b) {
hipLaunchKernelGGL(( _diff_parametric_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride, a, b);
}
void cudaF_heaviside(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x,
MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaF_log_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x,
MatrixDim y_dim, int x_stride) {
hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, y_dim, x_stride);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaF_normalize_per_row(size_t Gr, size_t Bl, float *y, int y_stride,
const float *x, MatrixDim x_d, float target_rms,
bool add_log_stddev) {
hipLaunchKernelGGL(( _normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, y, y_stride, x, x_d, target_rms, add_log_stddev);
}
void cudaF_one(int Gr, int Bl, float* x, int dim) {
hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim);
}
void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim dim) {
hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x, y, dim);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* copy_from, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1,
float lr, MatrixDim d, int stride_grad) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val,
int32_cuda* vec_id, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt,
float* mat_net_out, float* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaF_diff_softmax(dim3 Gr, dim3 Bl, float* x, const MatrixDim dim,
const float* value, const int value_stride,
const float* diff, const int diff_stride) {
hipLaunchKernelGGL(( _diff_softmax), dim3(Gr), dim3(Bl), 0, 0, x, dim, value, value_stride, diff, diff_stride);
}
void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out,
const float *v_in) {
hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaF_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim,
const float* out_value, const int out_value_stride,
const float* out_deriv, const int out_deriv_stride,
float* in_deriv) {
hipLaunchKernelGGL(( _diff_log_softmax), dim3(Gr), dim3(Bl), 0, 0, in_deriv_dim, out_value, out_value_stride,
out_deriv, out_deriv_stride, in_deriv);
}
void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col,
const float* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col,
const float* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices);
}
void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indexes) {
hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes);
}
void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
float *output) {
hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output);
}
void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1,
const float *mat2, float *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride,
mask_stride);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {
hipLaunchKernelGGL(( _copy_upp_low), dim3(Gr),dim3(Bl), 0, 0, A,dimA);}
void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {
hipLaunchKernelGGL(( _copy_low_upp), dim3(Gr),dim3(Bl), 0, 0, A,dimA);}
void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat,
MatrixDim mat_dim, const double *vec,
const double *mat2, int mat2_row_stride,
int mat2_col_stride, double beta) {
hipLaunchKernelGGL(( _add_diag_vec_mat), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B,
MatrixDim dmat) {
hipLaunchKernelGGL(( _copy_from_tp), dim3(Gr),dim3(Bl), 0, 0, A,B,dmat);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_exp), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_apply_exp_limited(dim3 Gr, dim3 Bl, double* mat, MatrixDim d,
double lower_limit, double upper_limit) {
hipLaunchKernelGGL(( _apply_exp_limited), dim3(Gr),dim3(Bl), 0, 0, mat, d, lower_limit, upper_limit);
}
void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow), dim3(Gr),dim3(Bl), 0, 0, mat, power, d);
}
void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power,
bool include_sign, MatrixDim d) {
hipLaunchKernelGGL(( _apply_pow_abs), dim3(Gr),dim3(Bl), 0, 0, mat, power, include_sign, d);
}
void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_heaviside), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _copy_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _add_cols), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst,
const double* const * src, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _copy_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, dst_dim);
}
void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const * dst,
const double* src, MatrixDim src_dim) {
hipLaunchKernelGGL(( _copy_to_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, src_dim);
}
void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst,
const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim dst_dim, int src_stride) {
hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_mul_rows(dim3 Gr, dim3 Bl, double* dst,
const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim dst_dim, int src_stride) {
hipLaunchKernelGGL(( _mul_rows), dim3(Gr),dim3(Bl), 0, 0, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst,
const double* const * src, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _add_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, dst_dim);
}
void cudaD_add_to_rows(dim3 Gr, dim3 Bl, double alpha,
double* dst, const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim src_dim, int dst_stride) {
hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, reorder, src_dim, dst_stride);
}
void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha,
double* const * dst, const double* src,
MatrixDim src_dim) {
hipLaunchKernelGGL(( _add_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha, dst, src, src_dim);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val,
MatrixDim d) {
hipLaunchKernelGGL(( _apply_floor), dim3(Gr),dim3(Bl), 0, 0, mat, floor_val, d);
}
void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val,
MatrixDim d) {
hipLaunchKernelGGL(( _apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, mat, ceiling_val, d);
}
void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_diag), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
hipLaunchKernelGGL(( _set_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
hipLaunchKernelGGL(( _add_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _set_const), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _set_zero_above_diag), dim3(Gr),dim3(Bl), 0, 0, mat, d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _add), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value,
int dim) {
hipLaunchKernelGGL(( _scale_diag_packed), dim3(Gr),dim3(Bl), 0, 0, mat,value,dim);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
hipLaunchKernelGGL(( _scale), dim3(Gr),dim3(Bl), 0, 0, mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
hipLaunchKernelGGL(( _apply_log), dim3(Gr),dim3(Bl), 0, 0, mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A,
MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _mul_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A,
MatrixDim dst_d, int src_stride) {
hipLaunchKernelGGL(( _div_elements), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d,
int src_stride) {
hipLaunchKernelGGL(( _max), dim3(Gr),dim3(Bl), 0, 0, mat,A,dst_d,src_stride);
}
void cudaD_min(dim3 Gr, dim3 Bl, double* mat, const double* other, MatrixDim mat_d,
int other_stride) {
hipLaunchKernelGGL(( _min), dim3(Gr),dim3(Bl), 0, 0, mat,other,mat_d,other_stride);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale,
MatrixDim d) {
hipLaunchKernelGGL(( _mul_cols_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale,
MatrixDim d) {
hipLaunchKernelGGL(( _mul_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat,scale,d);
}
void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size) {
hipLaunchKernelGGL(( _mul_rows_group_mat), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size);
}
void cudaD_diff_group_pnorm(dim3 Gr, dim3 Bl, double *id, const double *iv,
const double *ov, const double* od,
MatrixDim id_dim, int iv_stride, int ov_stride,
int od_stride, int group_size, double power) {
hipLaunchKernelGGL(( _diff_group_pnorm), dim3(Gr), dim3(Bl), 0, 0, id, iv, ov, od, id_dim, iv_stride, ov_stride,
od_stride, group_size, power);
}
void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1,
const double* x2, MatrixDim y_dim,
int x1_stride, int x2_stride, int group_size) {
hipLaunchKernelGGL(( _calc_group_max_deriv), dim3(Gr),dim3(Bl), 0, 0, y, x1, x2, y_dim, x1_stride, x2_stride,
group_size);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div,
MatrixDim d) {
hipLaunchKernelGGL(( _div_rows_vec), dim3(Gr),dim3(Bl), 0, 0, mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src,
double* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_trans), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
} else {
hipLaunchKernelGGL(( _add_mat), dim3(Gr),dim3(Bl), 0, 0, alpha,src,dst,d,src_stride);
}
}
void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src,
int32_cuda num_row_blocks, int32_cuda num_col_blocks,
double* dst, MatrixDim d, int src_stride,
int A_trans) {
if (A_trans) {
hipLaunchKernelGGL(( _add_mat_blocks_trans), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks,
dst, d, src_stride);
} else {
hipLaunchKernelGGL(( _add_mat_blocks), dim3(Gr),dim3(Bl), 0, 0, alpha, src, num_row_blocks, num_col_blocks, dst,
d, src_stride);
}
}
void cudaD_add_mat_repeated(dim3 Gr, dim3 Bl, double alpha, const double* src,
MatrixDim src_dim, double *dst, MatrixDim dst_dim) {
hipLaunchKernelGGL(( _add_mat_repeated), dim3(Gr),dim3(Bl), 0, 0, alpha, src, src_dim, dst, dst_dim);
}
void cudaD_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A,
const double *B, const double *C, double *dst,
MatrixDim d, int stride_a, int stride_b,
int stride_c) {
hipLaunchKernelGGL(( _set_mat_mat_div_mat), dim3(Gr),dim3(Bl), 0, 0, A,B,C,dst,d,stride_a,stride_b,stride_c);
}
void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta,
const double* T, MatrixDim tdim, double *S,
MatrixDim sdim) {
hipLaunchKernelGGL(( _sy_add_tr2), dim3(Gr),dim3(Bl), 0, 0, alpha, beta, T, tdim, S, sdim);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col,
double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_cols), dim3(Gr),dim3(Bl), 0, 0, alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row,
double beta, double* dst, MatrixDim d) {
hipLaunchKernelGGL(( _add_vec_to_rows), dim3(Gr),dim3(Bl), 0, 0, alpha,row,beta,dst,d);
}
void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat,
MatrixDim mat_dim, const double *mat2,
int mat2_row_stride, int mat2_col_stride,
const double *vec, double beta) {
hipLaunchKernelGGL(( _add_mat_diag_vec), dim3(Gr),dim3(Bl), 0, 0, alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data,
const double *srcA_data,
const double *srcB_data, MatrixDim dim,
int srcA_stride, int srcB_stride, double alpha,
double beta) {
hipLaunchKernelGGL(( _add_mat_mat_elements), dim3(Gr), dim3(Bl), 0, 0, data, srcA_data, srcB_data, dim,
srcA_stride, srcB_stride, alpha, beta);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask,
MatrixDim dmat, MatrixDim dmask) {
hipLaunchKernelGGL(( _apply_mask), dim3(Gr),dim3(Bl), 0, 0, mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_max_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<MAX,double>());
}
void cudaD_min_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<MIN,double>());
}
void cudaD_sum_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr),dim3(Bl), 0, 0, result,mat,d,
TransReduceOp<SUM,double>());
}
void cudaD_add_col_sum_mat(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d, const double alpha,
const double beta) {
hipLaunchKernelGGL(( _transform_reduce_mat_cols), dim3(Gr), dim3(Bl), 0, 0, result, mat, d,
TransReduceOp<SUMAB, double>(alpha, beta));
}
void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig,
double changed) {
hipLaunchKernelGGL(( _replace_value), dim3(Gr),dim3(Bl), 0, 0, v, dim, orig, changed);
}
void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a,
double param_1, double param_2, double param_3,
int* flag, int dim) {
hipLaunchKernelGGL(( _set_bias_params), dim3(Gr),dim3(Bl), 0, 0, v,a,param_1,param_2,param_3,flag,dim);
}
void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a,
int dim) {
hipLaunchKernelGGL(( _vec_mul_elements), dim3(Gr),dim3(Bl), 0, 0, v, a, dim);
}
void cudaD_vec_min(int Gr, int Bl, const double* v, double* value, int dim,
int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<MIN, double>());
}
void cudaD_vec_max(int Gr, int Bl, const double* v, double* value, int dim,
int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v, value, dim, inc,
TransReduceOp<MAX, double>());
}
void cudaD_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const double* A,
const double* B, MatrixDim dA, int B_stride,
double* value) {
hipLaunchKernelGGL(( _trace_mat_mat_trans), dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value);
}
void cudaD_trace_mat_mat(dim3 Gr, dim3 Bl, const double* A, const double* B,
MatrixDim dA, int B_stride, double* value) {
hipLaunchKernelGGL(( _trace_mat_mat<32>) , dim3(Gr),dim3(Bl), 0, 0, A,B,dA,B_stride,value);
}
void cudaD_add_diag_mat_mat_MNT(int Gr, int Bl, const double alpha,
const double* M, const MatrixDim dim_M,
const double* N, const int stride_N,
const double beta, double* v) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MNT), dim3(Gr),dim3(Bl), 0, 0, alpha,M,dim_M,N,stride_N,beta,v);
}
void cudaD_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const double alpha,
const double* M, const int stride_M,
const double* N, const MatrixDim dim_N,
const double beta, double* v,
const int stride_v) {
if (Bl.x == 16) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<16>) , dim3(Gr), dim3(Bl), 0, 0, alpha, M, stride_M, N, dim_N, beta,
v, stride_v);
} else if (Bl.x == 32) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MTN<32>) , dim3(Gr), dim3(Bl), 0, 0, alpha, M, stride_M, N, dim_N, beta,
v, stride_v);
}
}
void cudaD_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const double alpha,
const double* M, const int stride_M,
const double* N, const MatrixDim dim_N,
const double beta, double* v) {
if (Bl.x == 16) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MN<16>) , dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
hipLaunchKernelGGL(( _add_diag_mat_mat_MN<32>), dim3(Gr),dim3(Bl), 0, 0, alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x,
const double* y, double beta, int dim) {
hipLaunchKernelGGL(( _add_vec_vec), dim3(Gr),dim3(Bl), 0, 0, alpha,v,x,y,beta,dim);
}
void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col,
const double* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_df), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col,
const double* mat, MatrixDim dmat, int dim) {
hipLaunchKernelGGL(( _copy_col_from_mat_fd), dim3(Gr),dim3(Bl), 0, 0, v,col,mat,dmat,dim);
}
void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) {
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, v,value,dim,inc,
TransReduceOp<SUM, double>());
}
void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
double alpha, MatrixElement<double>* x,
int num_elements) {
hipLaunchKernelGGL(( _cuda_matrix_add_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, alpha, x, num_elements);
}
void cudaD_vector_copy_elements(dim3 Gr, dim3 Bl, double *data, int dim,
const double *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
hipLaunchKernelGGL(( _cuda_vector_copy_elements), dim3(Gr), dim3(Bl), 0, 0, data, dim, src_mat, mat_stride,
transpose, elements);
}
void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim,
double alpha, const Int32Pair* indices,
const double* x, int s, double* data) {
hipLaunchKernelGGL(( _cuda_matrix_add_indexed_values), dim3(Gr), dim3(Bl), 0, 0, dim, alpha, indices, x, s, data);
}
void cudaD_matrix_add_to_elements(dim3 Gr, dim3 Bl, double alpha,
double* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
hipLaunchKernelGGL(( _cuda_matrix_add_to_elements), dim3(Gr), dim3(Bl), 0, 0, alpha, mat, dim, elements);
}
void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst,
const double *src, int dim) {
hipLaunchKernelGGL(( _vec_copy_diag_from_packed), dim3(Gr),dim3(Bl), 0, 0, dst,src,dim);
}
void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val,
float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_floor), dim3(Gr),dim3(Bl), 0, 0, v,floor_val,count,dim);
}
void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val,
float *count, int dim) {
hipLaunchKernelGGL(( _vec_apply_ceiling), dim3(Gr),dim3(Bl), 0, 0, v,ceiling_val,count,dim);
}
void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) {
hipLaunchKernelGGL(( _vec_apply_exp), dim3(Gr),dim3(Bl), 0, 0, v,dim);
}
void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) {
hipLaunchKernelGGL(( _vec_apply_log), dim3(Gr),dim3(Bl), 0, 0, v,flag,dim);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
hipLaunchKernelGGL(( _invert_elements), dim3(Gr),dim3(Bl), 0, 0, data, d);
}
void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d,
const double *Adata, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, double alpha, double beta,
int B_trans) {
if (B_trans) {
hipLaunchKernelGGL(( _add_mat_blockmat_trans), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
} else {
hipLaunchKernelGGL(( _add_mat_blockmat), dim3(Gr),dim3(Bl), 0, 0, data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
}
}
void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data,
int num_blocks, const double *C_data,
int C_num_cols, int C_row_stride, int C_col_stride,
const double *D_data, int D_row_stride,
int D_col_stride, double alpha, double beta) {
hipLaunchKernelGGL(( _block_add_mat_mat), dim3(Gr),dim3(Bl), 0, 0, B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride,
alpha, beta);
}
/*
* cu::
*/
void cudaD_soft_hinge(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _soft_hinge), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size,
double power) {
hipLaunchKernelGGL(( _group_pnorm), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size, power);
}
void cudaD_group_spec_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size,
double power) {
if (power == double(0)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L0NORM, double>());
} else if (power == double(1)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L1NORM, double>());
} else if (power == double(2)) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<L2NORM, double>());
} else if (power == std::numeric_limits<double>::infinity()) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<LINFNORM, double>());
} else {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr), dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<LPNORM, double>(power));
}
}
void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride, int group_size) {
hipLaunchKernelGGL(( _group_transform_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, group_size,
TransReduceOp<MAX, double>());
}
void cudaD_sigmoid(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _sigmoid), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_diff_sigmoid(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride,
int y_stride) {
hipLaunchKernelGGL(( _diff_sigmoid), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaD_tanh(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _tanh), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_diff_tanh(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride, int y_stride) {
hipLaunchKernelGGL(( _diff_tanh), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride);
}
void cudaD_ensure_nonzero(dim3 Gr, dim3 Bl, const double *x, MatrixDim d,
double epsilon, int y_stride, double *y) {
hipLaunchKernelGGL(( _ensure_nonzero), dim3(Gr),dim3(Bl), 0, 0, x, d, epsilon, y_stride, y);
}
void cudaD_parametric_relu(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride,
const double* a, const double* b) {
hipLaunchKernelGGL(( _parametric_relu), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride, a, b);
}
void cudaD_diff_parametric_relu(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride,
int y_stride, const double* a, const double* b) {
hipLaunchKernelGGL(( _diff_parametric_relu), dim3(Gr),dim3(Bl), 0, 0, eout, e, y, d, e_stride, y_stride, a, b);
}
void cudaD_heaviside(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
hipLaunchKernelGGL(( _heaviside), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x,
MatrixDim d, int src_stride) {
hipLaunchKernelGGL(( _softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, d, src_stride);
}
void cudaD_log_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x,
MatrixDim y_dim, int x_stride) {
hipLaunchKernelGGL(( _log_softmax_reduce), dim3(Gr),dim3(Bl), 0, 0, y, x, y_dim, x_stride);
}
void cudaD_normalize_per_row(size_t Gr, size_t Bl, double *y, int y_stride,
const double *x, MatrixDim x_d, double target_rms,
bool add_log_stddev) {
hipLaunchKernelGGL(( _normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, y, y_stride, x, x_d, target_rms, add_log_stddev);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _splice), dim3(Gr),dim3(Bl), 0, 0, y,x,off,d_out,d_in);
}
void cudaD_one(int Gr, int Bl, double* x, int dim) {
hipLaunchKernelGGL(( _one), dim3(Gr),dim3(Bl), 0, 0, x,dim);
}
void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_mean), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_lower), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
hipLaunchKernelGGL(( _take_upper), dim3(Gr),dim3(Bl), 0, 0, x,y,d_in);
}
void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_out) {
hipLaunchKernelGGL(( _copy_from_sp), dim3(Gr),dim3(Bl), 0, 0, x,y,d_out);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
hipLaunchKernelGGL(( _copy), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* copy_from, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _randomize), dim3(Gr),dim3(Bl), 0, 0, y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1,
double lr, MatrixDim d, int stride_grad) {
hipLaunchKernelGGL(( _regularize_l1), dim3(Gr),dim3(Bl), 0, 0, wei,grad,l1,lr,d,stride_grad);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val,
int32_cuda* vec_id, MatrixDim d) {
hipLaunchKernelGGL(( _find_row_max_id), dim3(Gr),dim3(Bl), 0, 0, mat, vec_val, vec_id, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt,
double* mat_net_out, double* vec_log_post, MatrixDim d) {
hipLaunchKernelGGL(( _diff_xent), dim3(Gr),dim3(Bl), 0, 0, vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaD_diff_softmax(dim3 Gr, dim3 Bl, double* x, const MatrixDim dim,
const double* value, const int value_stride,
const double* diff, const int diff_stride) {
hipLaunchKernelGGL(( _diff_softmax), dim3(Gr), dim3(Bl), 0, 0, x, dim, value, value_stride, diff, diff_stride);
}
void cudaD_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim,
const double* out_value, const int out_value_stride,
const double* out_deriv, const int out_deriv_stride,
double* in_deriv) {
hipLaunchKernelGGL(( _diff_log_softmax), dim3(Gr), dim3(Bl), 0, 0, in_deriv_dim, out_value, out_value_stride,
out_deriv, out_deriv_stride, in_deriv);
}
void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out,
MatrixDim d_out, const double *v_in) {
hipLaunchKernelGGL(( _copy_rows_from_vec), dim3(Gr),dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
hipLaunchKernelGGL(( _sum_column_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indices);
}
void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indexes) {
hipLaunchKernelGGL(( _add_row_ranges), dim3(Gr),dim3(Bl), 0, 0, data, dim, src_data, src_dim, indexes);
}
void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
double *output) {
hipLaunchKernelGGL(( _matrix_lookup), dim3(Gr),dim3(Bl), 0, 0, data, dim, indices, indices_size, output);
}
void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1,
const double *mat2, double *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
hipLaunchKernelGGL(( _equal_element_mask), dim3(Gr),dim3(Bl), 0, 0, mat1, mat2, mask, mat1_dim, mat2_stride,
mask_stride);
}
// Some conversion kernels for which it's more convenient
// to not name them F or D.
void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat), dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
hipLaunchKernelGGL(( _copy_from_mat_trans<32>) , dim3(Gr),dim3(Bl), 0, 0, mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const double* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const double* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const float* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const double* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const float* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const double* smat_val) {
hipLaunchKernelGGL(( _copy_from_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val, float* trace_vec) {
hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val,
float* trace_vec) {
hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val,
double* trace_vec) {
hipLaunchKernelGGL(( _trace_mat_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val,
double* trace_vec) {
hipLaunchKernelGGL(( _trace_mat_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_lstm_nonlinearity(dim3 Gr, dim3 Bl, const double* in,
const int in_stride, const double* params,
const int params_stride, const int out_stride,
const int cell_dim, const int have_dropout_mask,
const int num_rows, double* out) {
hipLaunchKernelGGL(( _lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0,
in, in_stride, params, params_stride,
out_stride, cell_dim, have_dropout_mask, num_rows, out);
}
void cudaF_lstm_nonlinearity(dim3 Gr, dim3 Bl, const float* in,
const int in_stride, const float* params,
const int params_stride, const int out_stride,
const int cell_dim, const int have_dropout_mask,
const int num_rows, float* out) {
hipLaunchKernelGGL(( _lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0,
in, in_stride, params, params_stride,
out_stride, cell_dim, have_dropout_mask, num_rows, out);
}
void cudaD_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim,
const int have_dropout_mask,
const int num_rows, const double* input,
const int input_stride, const double* params,
const int params_stride,
const double* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const double* self_repair_config,
double count, double* input_deriv,
const int input_deriv_stride,
double* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
double* self_repair_sum_out,
const int self_repair_sum_out_stride) {
hipLaunchKernelGGL(( _diff_lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0,
cell_dim, have_dropout_mask, num_rows, input,
input_stride, params, params_stride, output_deriv, output_deriv_stride,
deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv,
input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out,
value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride,
self_repair_sum_out, self_repair_sum_out_stride);
}
void cudaF_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim,
const int have_dropout_mask,
const int num_rows, const float* input,
const int input_stride, const float* params,
const int params_stride,
const float* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const float* self_repair_config, double count,
float* input_deriv,
const int input_deriv_stride,
float* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
float* self_repair_sum_out,
const int self_repair_sum_out_stride) {
hipLaunchKernelGGL(( _diff_lstm_nonlinearity), dim3(Gr), dim3(Bl), 0, 0,
cell_dim, have_dropout_mask, num_rows, input,
input_stride, params, params_stride, output_deriv, output_deriv_stride,
deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv,
input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out,
value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride,
self_repair_sum_out, self_repair_sum_out_stride);
}
void cudaD_copy_cols_from_vec(dim3 Gr, dim3 Bl, double *mat_out,
MatrixDim d_out, const double *v_in) {
hipLaunchKernelGGL(( _copy_cols_from_vec), dim3(Gr), dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaF_copy_cols_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out,
const float *v_in) {
hipLaunchKernelGGL(( _copy_cols_from_vec), dim3(Gr), dim3(Bl), 0, 0, mat_out, d_out, v_in);
}
void cudaF_diff_normalize_per_row(size_t Gr, size_t Bl, float *id,
int id_stride, const float *iv,
MatrixDim iv_dim, const float* od,
int od_stride, float target_rms,
bool add_log_stddev) {
hipLaunchKernelGGL(( _diff_normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, id, id_stride, iv, iv_dim, od, od_stride,
target_rms, add_log_stddev);
}
void cudaD_diff_normalize_per_row(size_t Gr, size_t Bl, double *id,
int id_stride, const double *iv,
MatrixDim iv_dim, const double* od,
int od_stride, double target_rms,
bool add_log_stddev) {
hipLaunchKernelGGL(( _diff_normalize_per_row), dim3(Gr), dim3(Bl), 0, 0, id, id_stride, iv, iv_dim, od, od_stride,
target_rms, add_log_stddev);
}
void cudaD_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr,
int* out_col_idx, double* out_val,
const int* row_indexes, const int num_selected_rows,
const int* in_row_ptr, const int* in_col_idx,
const double* in_val) {
hipLaunchKernelGGL(( _select_rows), dim3(Gr), dim3(Bl), 0, 0, out_row_ptr, out_col_idx, out_val, row_indexes,
num_selected_rows, in_row_ptr, in_col_idx, in_val);
}
void cudaF_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr,
int* out_col_idx, float* out_val, const int* row_indexes,
const int num_selected_rows, const int* in_row_ptr,
const int* in_col_idx, const float* in_val) {
hipLaunchKernelGGL(( _select_rows), dim3(Gr), dim3(Bl), 0, 0, out_row_ptr, out_col_idx, out_val, row_indexes,
num_selected_rows, in_row_ptr, in_col_idx, in_val);
}
void cudaD_add_smat(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
double alpha, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val) {
hipLaunchKernelGGL(( _add_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_add_smat(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
float alpha, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val) {
hipLaunchKernelGGL(( _add_smat), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaD_add_smat_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
double alpha, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val) {
hipLaunchKernelGGL(( _add_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_add_smat_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
float alpha, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val) {
hipLaunchKernelGGL(( _add_smat_trans), dim3(Gr), dim3(Bl), 0, 0, mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaD_apply_exp_special(dim3 Gr, dim3 Bl, double* out, MatrixDim out_dim,
const double* in, int in_stride) {
hipLaunchKernelGGL(( _apply_exp_special), dim3(Gr), dim3(Bl), 0, 0, out, out_dim, in, in_stride);
}
void cudaF_apply_exp_special(dim3 Gr, dim3 Bl, float* out, MatrixDim out_dim,
const float* in, int in_stride) {
hipLaunchKernelGGL(( _apply_exp_special), dim3(Gr), dim3(Bl), 0, 0, out, out_dim, in, in_stride);
}
void cuda_compress_uint8_sign(dim3 Gr, dim3 Bl, const float *src, MatrixDim dim,
unsigned char *dest, int dest_stride) {
hipLaunchKernelGGL(( _cuda_compress_uint8_sign), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride);
}
void cuda_compress_int16(dim3 Gr, dim3 Bl, const float *src,
MatrixDim dim, int16_t *dest,
int dest_stride, float inv_scale,
bool bounds_check) {
if (bounds_check) {
hipLaunchKernelGGL(( _cuda_compress_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale);
} else {
hipLaunchKernelGGL(( _cuda_compress_no_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale);
}
}
void cuda_compress_uint16(dim3 Gr, dim3 Bl, const float *src,
MatrixDim dim, uint16_t *dest,
int dest_stride, float inv_scale,
bool bounds_check) {
if (bounds_check) {
hipLaunchKernelGGL(( _cuda_compress_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale);
} else {
hipLaunchKernelGGL(( _cuda_compress_no_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale);
}
}
void cuda_compress_int8(dim3 Gr, dim3 Bl, const float *src,
MatrixDim dim, int8_t *dest,
int dest_stride, float inv_scale,
bool bounds_check) {
if (bounds_check) {
hipLaunchKernelGGL(( _cuda_compress_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale);
} else {
hipLaunchKernelGGL(( _cuda_compress_no_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale);
}
}
void cuda_compress_uint8(dim3 Gr, dim3 Bl, const float *src,
MatrixDim dim, uint8_t *dest,
int dest_stride, float inv_scale,
bool bounds_check) {
if (bounds_check) {
hipLaunchKernelGGL(( _cuda_compress_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale);
} else {
hipLaunchKernelGGL(( _cuda_compress_no_bounds_check), dim3(Gr), dim3(Bl), 0, 0, src, dim, dest, dest_stride, inv_scale);
}
}
void cuda_uncompress_uint8(dim3 Gr, dim3 Bl, float *dest,
MatrixDim dim, const uint8_t *src,
int src_stride, float scale) {
hipLaunchKernelGGL(( _cuda_uncompress), dim3(Gr), dim3(Bl), 0, 0, dest, dim, src, src_stride, scale);
}
void cuda_uncompress_int8(dim3 Gr, dim3 Bl, float *dest,
MatrixDim dim, const int8_t *src,
int src_stride, float scale) {
hipLaunchKernelGGL(( _cuda_uncompress), dim3(Gr), dim3(Bl), 0, 0, dest, dim, src, src_stride, scale);
}
void cuda_uncompress_uint16(dim3 Gr, dim3 Bl, float *dest,
MatrixDim dim, const uint16_t *src,
int src_stride, float scale) {
hipLaunchKernelGGL(( _cuda_uncompress), dim3(Gr), dim3(Bl), 0, 0, dest, dim, src, src_stride, scale);
}
void cuda_uncompress_int16(dim3 Gr, dim3 Bl, float *dest,
MatrixDim dim, const int16_t *src,
int src_stride, float scale) {
hipLaunchKernelGGL(( _cuda_uncompress), dim3(Gr), dim3(Bl), 0, 0, dest, dim, src, src_stride, scale);
}
|
cu-kernels.cu
|
// cudamatrix/cu-kernels.cu
// Copyright 2009-2012 Karel Vesely
// 2013 Ehsan Variani
// 2013 Johns Hopkins University (author: Daniel Povey)
// 2013 Hainan Xu
// 2013 Xiaohui Zhang
// 2013-2015 Guoguo Chen
// 2016-2018 Shiyin Kang
// 2017 Hossein Hadian, Daniel Galvez
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY IMPLIED
// WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR PURPOSE,
// MERCHANTABLITY OR NON-INFRINGEMENT.
// See the Apache 2 License for the specific language governing permissions and
// limitations under the License.
// In this file is the CUDA code of the CUDA kernels, plus the ANSI-C wrappers
#include <cfloat>
#include <limits>
#include <math_constants.h>
#include "cudamatrix/cu-kernels-ansi.h"
/***********************************************************************
* Generic __device__ functions
*/
template<typename Real>
__device__
static Real _sum_reduce(Real buffer[]) {
// Total number of active threads
int32_cuda nTotalThreads = blockDim.x;
__syncthreads();
// perform tree-based reduction (sum)
while (nTotalThreads > 1) {
int32_cuda halfPoint = ((1 + nTotalThreads) >> 1); // divide by two
// only the first half of the threads will be active.
if (threadIdx.x >= halfPoint) { // was <
// Get the shared value stored by another thread
Real temp = 0.0;
if (threadIdx.x < nTotalThreads) { // was +halfPoint
temp = buffer[threadIdx.x]; // was +halfPoint
}
buffer[threadIdx.x - halfPoint] += temp;
}
__syncthreads();
nTotalThreads = ((1 + nTotalThreads) >> 1); // divide by two.
}
// the result
return buffer[0];
}
/***********************************************************************
* CUDA kernels
* the functions are templated to have the float/double operations
*/
/*
* CuMatrix
*/
template<typename Real>
__global__
static void _copy_low_upp(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i <= j || i >= dimA.rows)
return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
template<typename Real>
__global__
static void _copy_upp_low(Real* A, MatrixDim dimA) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j <= i || j >= dimA.rows)
return;
int index_1 = i * dimA.stride + j;
int index_2 = j * dimA.stride + i;
A[index_2] = A[index_1];
}
// mat += diag(vec) * mat2.
template<typename Real>
__global__
static void _add_diag_vec_mat(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *vec, const Real *mat2,
int mat2_row_stride, int mat2_col_stride,
Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = j * mat_dim.stride + i, index2 = j * mat2_row_stride
+ i * mat2_col_stride;
if (i < mat_dim.cols && j < mat_dim.rows) {
mat[index] = alpha * vec[j] * mat2[index2] + beta * mat[index];
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp(Real* A, const OtherReal* B, MatrixDim dmat) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dmat.cols && j < dmat.rows) {
int32_cuda index_B = (j * (j + 1) / 2) + i;
int32_cuda index_A = j * dmat.stride + i;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_tp_trans(Real* A, const OtherReal* B, MatrixDim dmat) {
// we interpret these indexes oppositely from normal, but it doesn't
// matter as it's invoked in a symmetric way.
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
// transpose the indices used to index the source TpMatrix.
if (i < dmat.rows && j < dmat.cols) {
int32_cuda index_B = (j * (j + 1) / 2) + i;
int32_cuda index_A = i * dmat.stride + j;
if (i <= j) {
A[index_A] = B[index_B];
} else {
A[index_A] = 0.0;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _copy_from_mat(Real* mat_out, const OtherReal* mat_in,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row-index.
int32_cuda index_out = i + j * d_out.stride;
int32_cuda index_in = i + j * d_in.stride;
if (i < d_out.cols && j < d_out.rows)
mat_out[index_out] = static_cast<Real>(mat_in[index_in]);
}
template<int TileDim, typename Real, typename OtherReal>
__global__
static void _copy_from_mat_trans(Real* mat_out, const OtherReal* mat_in,
MatrixDim d_out, MatrixDim d_in) {
// Use shared meme to achieve both coalesced memory reading and writing
// '+1' to avoid bank conflict when reading sbuf
__shared__ Real sbuf[TileDim][TileDim + 1];
const int32_cuda i_in = blockIdx.y * TileDim + threadIdx.y; // row-index
const int32_cuda j_in = blockIdx.x * TileDim + threadIdx.x; // col-index
const int32_cuda tile_stride_in = CU1DBLOCK / TileDim * d_in.stride;
int32_cuda index_in = i_in * d_in.stride + j_in;
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_in + i < d_in.rows && j_in < d_in.cols) {
sbuf[threadIdx.y + i][threadIdx.x] = static_cast<Real>(mat_in[index_in]);
}
index_in += tile_stride_in;
}
__syncthreads();
// Grid is transposed, but block is not yet.
// Warp (blockDim.x) is always along the row-dim.
const int32_cuda i_out = blockIdx.x * TileDim + threadIdx.y;
const int32_cuda j_out = blockIdx.y * TileDim + threadIdx.x;
const int32_cuda tile_stride_out = CU1DBLOCK / TileDim * d_out.stride;
int32_cuda index_out = i_out * d_out.stride + j_out;
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_out + i < d_out.rows && j_out < d_out.cols) {
// block is tranposed when reading sbuf
mat_out[index_out] = sbuf[threadIdx.x][threadIdx.y + i];
}
index_out += tile_stride_out;
}
}
// Copy from CSR sparse matrix to dense matrix
//
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_smat(Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const OtherReal* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.rows) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx
mat[i * mat_dim.stride + j] = static_cast<Real>(smat_val[nz_id]);
}
}
}
/// Select a subset of the rows of a CSR SparseMatrix.
/// Sets 'out' to only the rows of 'in' that are listed
/// in 'row_indexes'. 'row_indexes' must be sorted and unique,
/// and satisfy 0 <= row_indexes[i] < in.size().
///
/// Note: 'out_row_ptr' is an input parameter that is calculated before
/// calling this kernel function
///
/// We use warpSize threads per row to access only the nnz elements.
/// Every CU1DBLOCK/warpSize rows share one thread block.
/// 1D grid to cover all selected rows.
template<typename Real>
__global__
static void _select_rows(const int* out_row_ptr, int* out_col_idx,
Real* out_val, const int* row_indexes,
const int num_selected_rows, const int* in_row_ptr,
const int* in_col_idx, const Real* in_val) {
const int out_i = blockIdx.x * blockDim.y + threadIdx.y; // out row idx
if (out_i < num_selected_rows) {
const int in_i = row_indexes[out_i];
const int in_row_start = in_row_ptr[in_i];
const int out_row_start = out_row_ptr[out_i];
const int row_length = in_row_ptr[in_i + 1] - in_row_start;
for (int k = threadIdx.x; k < row_length; k += warpSize) {
const int in_n = in_row_start + k;
const int out_n = out_row_start + k;
out_col_idx[out_n] = in_col_idx[in_n];
out_val[out_n] = in_val[in_n];
}
}
}
// mat += alpha * smat
//
// We use warpSize threads per row to access only the nonzero elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _add_smat(Real* mat, MatrixDim mat_dim, Real alpha,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.rows) {
const int row_start = smat_row_ptr[i];
const int row_end = smat_row_ptr[i + 1];
for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) {
const int j = smat_col_idx[n]; // col idx of smat
mat[i * mat_dim.stride + j] += alpha * smat_val[n];
}
}
}
// mat += alpha * smat^T
//
// We use warpSize threads per row to access only the nonzero elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _add_smat_trans(Real* mat, MatrixDim mat_dim, Real alpha,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx
if (i < mat_dim.cols) {
const int row_start = smat_row_ptr[i];
const int row_end = smat_row_ptr[i + 1];
for (int n = row_start + threadIdx.x; n < row_end; n += warpSize) {
const int j = smat_col_idx[n]; // col idx of smat
mat[j * mat_dim.stride + i] += alpha * smat_val[n];
}
}
}
/// For each element x of the matrix, set it to
/// (x < 0 ? exp(x) : x + 1).
/// Use block/grid sizes for simple matrix ops
template<typename T>
__global__
static void _apply_exp_special(T* out, MatrixDim out_dim, const T* in,
int in_stride) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i < out_dim.rows && j < out_dim.cols) {
T x = in[i * in_stride + j];
if (x < T(0)) {
out[i * out_dim.stride + j] = exp(x);
} else {
out[i * out_dim.stride + j] = x + T(1);
}
}
}
/// Fill the array 'data' with the sequence [base ... base + length)
/// Use 1D block and 1D grid
template<typename T>
__global__
static void _sequence(T* data, int length, T base) {
const int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < length) {
data[i] = base + T(i);
}
}
// Copy from CSR sparse matrix to transposed dense matrix
//
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows.
template<typename Real, typename OtherReal>
__global__
static void _copy_from_smat_trans(Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr,
const int* smat_col_idx,
const OtherReal* smat_val) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.cols) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
mat[j * mat_dim.stride + i] = static_cast<Real>(smat_val[nz_id]);
}
}
}
// First stage of trace(mat * smat^T)
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _trace_mat_smat_trans(const Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr,
const int* smat_col_idx, const Real* smat_val,
Real* trace_vec) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.rows) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
trace_vec[nz_id] = mat[i * mat_dim.stride + j] * smat_val[nz_id];
}
}
}
// First stage of trace(mat * smat)
// We use warpSize threads per row to access only the nnz elements.
// Every CU1DBLOCK/warpSize rows share one thread block.
// 1D grid to cover all rows of smat.
template<typename Real>
__global__
static void _trace_mat_smat(const Real* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const Real* smat_val, Real* trace_vec) {
const int i = blockIdx.x * blockDim.y + threadIdx.y; // row idx of smat
if (i < mat_dim.cols) {
const int nz_start = smat_row_ptr[i];
const int nz_end = smat_row_ptr[i + 1];
for (int nz_id = nz_start + threadIdx.x; nz_id < nz_end; nz_id +=
warpSize) {
const int j = smat_col_idx[nz_id]; // col idx of smat
trace_vec[nz_id] = mat[j * mat_dim.stride + i] * smat_val[nz_id];
}
}
}
template<typename Real>
__global__
static void _apply_exp(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
mat[index] = exp(mat[index]);
}
}
template<typename Real>
__global__
static void _apply_exp_limited(Real* mat, MatrixDim d,
Real lower_limit, Real upper_limit) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
Real x = mat[index];
// I'm writing !(x >= lower_limit) instead of (x < lower_limit) so that
// nan's will be set to the lower-limit.
if (!(x >= lower_limit))
x = lower_limit;
else if (x > upper_limit)
x = upper_limit;
mat[index] = exp(x);
}
}
template<typename Real>
__global__
static void _scale_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = value * mat[index];
}
}
template<typename Real>
__global__
static void _set_diag(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = i + i * d.stride;
if (i < d.rows && i < d.cols) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _set_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = value;
}
}
template<typename Real>
__global__
static void _add_diag_packed(Real* mat, Real value, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
mat[index] = mat[index] + value;
}
}
template<typename Real>
__global__
static void _set_const(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = value;
}
template<typename Real>
__global__
static void _set_zero_above_diag(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < i)
mat[index] = 0.0;
}
template<typename Real>
__global__
static void _add(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] + value;
}
template<typename Real>
__global__
static void _scale(Real* mat, Real value, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = mat[index] * value;
}
template<typename Real>
__global__
static void _apply_log(Real* mat, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = log(mat[index]);
}
template<typename Real>
__global__
static void _mul_elements(Real* mat, const Real* A, MatrixDim dst_d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] * A[src_index];
}
template<typename Real>
__global__
static void _div_elements(Real* mat, const Real* A, MatrixDim dst_d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows)
mat[dst_index] = mat[dst_index] / A[src_index];
}
template<typename Real>
__global__
static void _max(Real* mat, const Real* A, MatrixDim dst_d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda dst_index = i + j * dst_d.stride, src_index = i + j * src_stride;
if (i < dst_d.cols && j < dst_d.rows) {
Real a = mat[dst_index], b = A[src_index];
mat[dst_index] = fmax(a, b);
}
}
template<typename Real>
__global__
static void _min(Real* mat, const Real* other, MatrixDim mat_d,
int other_stride) {
int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda mat_index = i * mat_d.stride + j;
int32_cuda other_index = i * other_stride + j;
if (j < mat_d.cols && i < mat_d.rows) {
Real a = mat[mat_index], b = other[other_index];
mat[mat_index] = fmin(a, b);
}
}
template<typename Real>
__global__
static void _vec_mul_elements(Real* v, const Real* a, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
v[i] = v[i] * a[i];
}
template<typename Real>
__global__
static void _mul_cols_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[i];
}
template<typename Real>
__global__
static void _mul_rows_vec(Real* mat, const Real* scale, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] *= scale[j];
}
template<typename Real>
__global__
static void _mul_rows_group_mat(Real *y, const Real *x, MatrixDim d,
int src_stride, int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
int src_index = i / group_size + j * src_stride;
y[dst_index] *= x[src_index];
}
}
template<typename Real>
__global__
void _diff_group_pnorm(Real *id, const Real *iv, const Real *ov, const Real* od,
MatrixDim id_dim, int iv_stride, int ov_stride,
int od_stride, int group_size, Real power) {
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j < id_dim.cols) {
const int grid_stride = gridDim.y * blockDim.y;
const int src_j = j / group_size;
int i = blockIdx.y * blockDim.y + threadIdx.y;
for (; i < id_dim.rows; i += grid_stride) {
const int iv_index = j + i * iv_stride;
Real iv_ij = iv[iv_index];
Real ans;
if (power == Real(2)) {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
ans = ov_ij <= 0.0 ? 0.0 : iv_ij / ov_ij;
} else if (power == Real(1)) {
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
ans = (iv_ij == Real(0) ? 0.0 : iv_ij_sign);
} else if (power
== (sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF)) {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
ans =
ov_ij <= 0.0 ?
0.0 : (iv_ij_sign * (abs(iv_ij) == ov_ij ? 1.0 : 0.0));
} else {
const int ov_index = src_j + i * ov_stride;
Real ov_ij = ov[ov_index];
Real iv_ij_sign = (iv_ij >= 0 ? 1 : -1);
if (ov_ij <= 0.0) {
ans = 0.0; // The derivative is either 0 or undefined at the origin.
} else {
ans = iv_ij_sign * pow(std::abs(iv_ij), power - 1)
* pow(ov_ij, 1 - power);
}
}
const int od_index = src_j + i * od_stride;
const int id_index = j + i * id_dim.stride;
id[id_index] = ans * od[od_index];
}
}
}
/// deriv is the derivative we will output; vec is the input we're computing
/// the group max on, "maxv" is the previously computed group max.
template<typename Real>
__global__
static void _calc_group_max_deriv(Real *deriv, const Real *vec,
const Real *maxv, MatrixDim deriv_dim,
int vec_stride, int maxv_stride,
int group_size) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < deriv_dim.rows && i < deriv_dim.cols) {
int deriv_index = i + j * deriv_dim.stride;
int vec_index = i + j * vec_stride;
int maxv_index = i / group_size + j * maxv_stride;
Real vec_element = vec[vec_index], // The element of the original vector.
max_element = maxv[maxv_index]; // this is the max value
Real ans = (max_element == vec_element ? 1.0 : 0.0);
deriv[deriv_index] = ans;
}
}
/// Set each element to y = (x == orig ? changed : x).
template<typename Real>
__global__
static void _replace_value(Real *vec, int dim, Real orig, Real changed) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim)
if (vec[i] == orig)
vec[i] = changed;
}
template<typename Real>
__global__
static void _div_rows_vec(Real* mat, const Real* vec_div, MatrixDim d) {
const int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
if (i < d.rows) {
const int32_cuda start = i * d.stride;
const Real scale = Real(1) / vec_div[i];
const int32_cuda grid_stride = blockDim.x * gridDim.x;
for (int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x; j < d.cols; j +=
grid_stride) {
mat[start + j] *= scale;
}
}
}
template<typename Real>
__global__
static void _add_mat(Real alpha, const Real* src, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int32_cuda index = i + j * d.stride;
int32_cuda index_src = i + j * src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_trans(Real alpha, const Real* src, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = j + i * src_stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * src[index_src] + dst[index];
}
template<typename Real>
__global__
static void _add_mat_blocks(Real alpha, const Real* src,
int32_cuda num_row_blocks,
int32_cuda num_col_blocks, Real* dst, MatrixDim d,
int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = i + j * src_stride;
if (i < d.cols && j < d.rows)
for (int32_cuda p = 0; p < num_row_blocks; p++) {
for (int32_cuda q = 0; q < num_col_blocks; q++) {
dst[index] = alpha
* src[index_src + p * src_stride * d.rows + q * d.cols]
+ dst[index];
}
}
}
template<typename Real>
__global__
static void _add_mat_repeated(Real alpha, const Real* src,
MatrixDim src_dim, Real* dst,
MatrixDim dst_dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda src_i = i % src_dim.cols,
src_j = j % src_dim.rows,
dst_index = i + j * dst_dim.stride,
src_index = src_i + src_j * src_dim.stride;
if (i < dst_dim.cols && j < dst_dim.rows)
dst[dst_index] += alpha * src[src_index];
}
template<typename Real>
__global__
static void _add_mat_blocks_trans(Real alpha, const Real* src,
int32_cuda num_row_blocks,
int32_cuda num_col_blocks, Real* dst,
MatrixDim d, int src_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
int32_cuda index_src = j + i * src_stride;
if (i < d.cols && j < d.rows)
for (int32_cuda p = 0; p < num_row_blocks; p++) {
for (int32_cuda q = 0; q < num_col_blocks; q++) {
dst[index] = alpha
* src[index_src + p * src_stride * d.cols + q * d.rows]
+ dst[index];
}
}
}
template<typename Real>
__global__
static void _set_mat_mat_div_mat(const Real* A, const Real* B, const Real* C,
Real* dst, MatrixDim d, int stride_a,
int stride_b, int stride_c) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride, a_index = i + j * stride_a, b_index = i
+ j * stride_b, c_index = i + j * stride_c;
if (i < d.cols && j < d.rows)
if (C[c_index] == 0)
dst[index] = A[a_index];
else
dst[index] = A[a_index] * B[b_index] / C[c_index];
}
// Given a matrix input S (not packed!) and a lower-triangular matrix L, this
// function does S = beta S + alpha * L^T L. This is used in PSD matrix
// inversion. The i index is the row of the destination S and the j the column
// (although of course the output is symmetric so it doesn't matter in a sense).
// The main point of this is to make use of various symmetries and zero-ness.
template<typename Real>
__global__
static void _sy_add_tr2(Real alpha, Real beta, const Real *T, MatrixDim tdim,
Real *S, MatrixDim sdim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (i >= sdim.rows || j > i)
return;
// this thread computes the dot-product of the i'th column of
// L with the j'th column of L. The values we're multiplying
// are only nonzero for row-index k greater or equal to
// max(i, j), which equals i.
Real sum = 0.0;
for (int k = i; k < sdim.rows; k++) {
int i_index = i + tdim.stride * k, j_index = j + tdim.stride * k;
sum += T[i_index] * T[j_index];
}
int output_index1 = i * sdim.stride + j, output_index2 = j * sdim.stride + i;
S[output_index1] = alpha * sum + beta * S[output_index1];
S[output_index2] = alpha * sum + beta * S[output_index2];
}
template<typename Real>
__global__
static void _add_vec_to_cols(Real alpha, const Real* col, Real beta, Real* dst,
MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * col[j] + beta * dst[index];
}
template<typename Real>
__global__
static void _add_vec_to_rows(Real alpha, const Real* row, Real beta, Real* dst,
MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride;
if (i < d.cols && j < d.rows)
dst[index] = alpha * row[i] + beta * dst[index];
}
template<typename Real>
__global__
static void _apply_mask(Real* mat, const char* mask, MatrixDim dmat,
MatrixDim dmask) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * dmat.stride;
int32_cuda index2 = i + j * dmask.stride;
if (i < dmat.cols && j < dmat.rows)
if (mask[index2] == 0)
mat[index] = 0;
}
template<typename Real>
__global__
static void _add_mat_diag_vec(Real alpha, Real *mat, MatrixDim mat_dim,
const Real *mat2, int mat2_row_stride,
int mat2_col_stride, const Real *vec, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * mat_dim.stride, index2 = i * mat2_col_stride
+ j * mat2_row_stride;
if (j < mat_dim.rows && i < mat_dim.cols)
mat[index] = alpha * mat2[index2] * vec[i] + beta * mat[index];
}
template<typename Real>
__global__
static void _add_mat_mat_elements(Real *data, const Real *srcA_data,
const Real *srcB_data, MatrixDim dim,
int srcA_stride, int srcB_stride, Real alpha,
Real beta) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda tgt_index = i + j * dim.stride;
int32_cuda srcA_index = i + j * srcA_stride;
int32_cuda srcB_index = i + j * srcB_stride;
if (i < dim.cols && j < dim.rows) {
data[tgt_index] = alpha * srcA_data[srcA_index] * srcB_data[srcB_index]
+ beta * data[tgt_index];
}
}
/*
* CuVector
*/
// very limited application!
template<typename Real>
__global__
static void _set_bias_params(Real* v, const Real* a, Real param_1, Real param_2,
Real param_3, int* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
Real ratio = a[i] / param_3;
if ((ratio < 0.0) || (ratio >= 1.01)) {
*flag = 1;
return;
}
if (ratio < param_1) {
Real factor = ((param_1 / ratio) > param_2) ? param_2 : (param_1 / ratio);
v[i] = v[i] / factor;
} else if (ratio > param_1) {
Real factor = ((ratio / param_1) > param_2) ? param_2 : (ratio / param_1);
v[i] = v[i] * factor;
}
}
}
template<typename Real, typename OtherReal>
__global__
static void _cublas_copy_kaldi(int n, const Real* x, int incx, OtherReal* y,
int incy) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < n) {
y[i * incy] = static_cast<OtherReal>(x[i * incx]);
}
}
// This kernel writes a copy of the vector "v_in" to each row of the matrix
// "m_out". the dimension of v_in should be equal to the #columns of m_out.
template<typename Real>
__global__
static void _copy_rows_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index.
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index.
if (i < d.cols && j < d.rows) {
int index = i + j * d.stride;
m_out[index] = v_in[i];
}
}
// This kernel writes a copy of the vector "v_in" to each col of the matrix
// "m_out". the dimension of v_in should be equal to the #row of m_out.
template<typename Real>
__global__
static void _copy_cols_from_vec(Real* m_out, MatrixDim d, const Real* v_in) {
int i = blockIdx.y * blockDim.y + threadIdx.y; // row id
int j = blockIdx.x * blockDim.x + threadIdx.x; // col id
if (i < d.rows && j < d.cols) {
m_out[i * d.stride + j] = v_in[i];
}
}
// _trace_mat_mat reduce the partial sum to
// value[blockIdx.y * gridDim.x + blockIdx.x]
// It use shared mem to transpose matrix B to ensure coalesced memory access
template<int TileDim, typename Real>
__global__
static void _trace_mat_mat(const Real* A, const Real* B, MatrixDim dA,
int B_stride, Real* value) {
// Reuse shared mem and make indexing easier. "+1" to avoid bank conflict
__shared__ union {
Real trans[TileDim][TileDim + 1];
Real sum[CU1DBLOCK];
} smem;
// linear thread id;
const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x;
const int32_cuda grid_height = gridDim.y * TileDim;
const int32_cuda ja = blockIdx.x * TileDim + threadIdx.x;
const int32_cuda ib = blockIdx.x * TileDim + threadIdx.y;
int32_cuda ia = blockIdx.y * TileDim + threadIdx.y;
int32_cuda jb = blockIdx.y * TileDim + threadIdx.x;
// Grid reduce
Real tsum = Real(0);
for (int32_cuda i0 = 0; i0 < dA.rows; i0 += grid_height) {
// Load from B, transpose the block and store in shared mem
if (jb < dA.rows) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (ib + i < dA.cols) {
smem.trans[threadIdx.x][threadIdx.y + i] =
B[(ib + i) * B_stride + jb];
}
}
}
__syncthreads();
// Load from A, sum up the product.
if (ja < dA.cols) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (ia + i < dA.rows) {
tsum += A[(ia + i) * dA.stride + ja]
* smem.trans[threadIdx.y + i][threadIdx.x];
}
}
}
__syncthreads();
ia += grid_height;
jb += grid_height;
}
smem.sum[tid] = tsum;
__syncthreads();
// Block reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
smem.sum[tid] += smem.sum[tid + shift];
__syncthreads();
}
// Warp reduce. Implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem.sum[tid] += smem.sum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
value[blockIdx.y * gridDim.x + blockIdx.x] = smem.sum[0];
}
}
// _trace_mat_mat_trans reduce the partial sum to
// value[blockIdx.y * gridDim.x + blockIdx.x]
template<typename Real>
__global__
static void _trace_mat_mat_trans(const Real* A, const Real* B, MatrixDim dA,
int B_stride, Real* value) {
__shared__ Real ssum[CU1DBLOCK];
// linear thread id;
const int32_cuda tid = threadIdx.y * blockDim.x + threadIdx.x;
const int32_cuda j = blockIdx.x * blockDim.x + threadIdx.x;
const int32_cuda grid_height = gridDim.y * blockDim.y;
int32_cuda i = blockIdx.y * blockDim.y + threadIdx.y;
// Grid reduce
Real tsum = Real(0);
if (j < dA.cols) {
while (i < dA.rows) {
tsum += A[i * dA.stride + j] * B[i * B_stride + j];
i += grid_height;
}
}
ssum[tid] = tsum;
__syncthreads();
// Block reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Warp reduce. Implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
value[blockIdx.y * gridDim.x + blockIdx.x] = ssum[0];
}
}
// v = alpha * diag(M * N^T) + beta * v
template<typename Real>
__global__
static void _add_diag_mat_mat_MNT(const Real alpha, const Real* M,
const MatrixDim dim_M, const Real* N,
const int stride_N, const Real beta,
Real* v) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int m_start = i * dim_M.stride;
const int n_start = i * stride_N;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < dim_M.cols; j += CU1DBLOCK) {
tsum += M[m_start + j] * N[n_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output 1 sum per thread block
if (tid == 0) {
v[i] = alpha * ssum[0] + beta * v[i];
}
}
// v = alpha * diag(M^T * N) + beta * v
template<int TileDim, typename Real>
__global__
static void _add_diag_mat_mat_MTN(const Real alpha, const Real* M,
const int stride_M, const Real* N,
const MatrixDim dim_N, const Real beta,
Real* v, const int stride_v) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int j = blockIdx.x * blockDim.x + threadIdx.x;
if (j >= dim_N.cols)
return;
// Loop along the matrix column.
// Reduce to gridDim.y * CU1DBLOCK / TileDim elements per column.
Real tsum = Real(0);
const int grid_stride_y = blockDim.y * gridDim.y;
for (int i = blockIdx.y * blockDim.y + threadIdx.y; i < dim_N.rows; i +=
grid_stride_y) {
tsum += M[i * stride_M + j] * N[i * dim_N.stride + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize / TileDim elements per column.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim;
shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element per column.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift >= TileDim; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// output TileDim sums per thread block
if (tid < TileDim) {
if (beta != Real(0)) {
v[blockIdx.y * stride_v + j] = alpha * ssum[tid]
+ beta * v[blockIdx.y * stride_v + j];
} else {
v[blockIdx.y * stride_v + j] = alpha * ssum[tid];
}
}
}
// v = alpha * diag(M * N) + beta * v
template<int TileDim, typename Real>
__global__
static void _add_diag_mat_mat_MN(const Real alpha, const Real* M,
const int stride_M, const Real* N,
const MatrixDim dim_N, const Real beta,
Real* v) {
// Reuse shared mem and make indexing easier. "+1" to avoid bank conflict
__shared__ union {
Real trans[TileDim][TileDim + 1];
Real sum[CU1DBLOCK];
} smem;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int i_m = blockIdx.x * TileDim + threadIdx.y;
const int j_n = blockIdx.x * TileDim + threadIdx.x;
int i_n = threadIdx.y;
int j_m = threadIdx.x;
// Loop along the matrix column.
// Reduce to CU1DBLOCK / TileDim elements per column.
Real tsum = Real(0);
for (int block_i_n = 0; block_i_n < dim_N.rows; block_i_n += TileDim) {
// Load, transpose and store M to shared mem.
if (j_m < dim_N.rows) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_m + i < dim_N.cols) {
smem.trans[threadIdx.x][threadIdx.y + i] = M[(i_m + i) * stride_M
+ j_m];
}
}
}
__syncthreads();
// Load N, sum up the product.
if (j_n < dim_N.cols) {
# pragma unroll
for (int i = 0; i < TileDim; i += CU1DBLOCK / TileDim) {
if (i_n + i < dim_N.rows) {
tsum += N[(i_n + i) * dim_N.stride + j_n]
* smem.trans[threadIdx.y + i][threadIdx.x];
}
}
}
__syncthreads();
i_n += TileDim;
j_m += TileDim;
}
smem.sum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize / TileDim elements per column.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize && shift >= TileDim;
shift >>= 1) {
if (tid < shift) {
smem.sum[tid] += smem.sum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element per column.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift >= TileDim; shift >>= 1) {
smem.sum[tid] += smem.sum[tid + shift];
}
}
// output TileDim sums per thread block
if (tid < TileDim && j_n < dim_N.cols) {
v[j_n] = alpha * smem.sum[tid] + beta * v[j_n];
}
}
template<typename Real>
__global__
static void _add_vec_vec(Real alpha, Real* v, const Real* x, const Real* y,
Real beta, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = alpha * x[i] * y[i] + beta * v[i];
}
template<typename Real>
__global__
static void _copy_col_from_mat_df(double* v, int col, const Real* mat,
MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (double) mat[index];
}
template<typename Real>
__global__
static void _copy_col_from_mat_fd(float* v, int col, const Real* mat,
MatrixDim dmat, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = col + i * dmat.stride;
// if (blockIdx.y > 0) return;
if (i < dim)
v[i] = (float) mat[index];
}
template<typename Real>
__global__
static void _vec_apply_exp(Real* v, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
v[i] = exp(v[i]);
}
}
template<typename Real>
__global__
static void _vec_apply_log(Real* v, Real* flag, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
// if (blockIdx.y > 0) return;
if (i < dim) {
if (v[i] < 0) {
*flag = 1;
return;
}
v[i] = log(v[i]);
}
}
template<typename Real>
__global__
static void _cuda_comp_obj_deriv(MatrixElement<Real> *x, int s, const Real* z,
MatrixDim d, Real* z2, MatrixDim d2, Real* t) {
int i = threadIdx.x;
__shared__ Real tot_objf[CU1DBLOCK];
__shared__ Real tot_weight[CU1DBLOCK];
Real tmp_weight_sum = 0;
Real tmp_tot_objf = 0;
int size = s / CU1DBLOCK; //the least size in a loop (later part)
int threshold = s - size * CU1DBLOCK; //any loop below this number would + 1
int loop_start;
int loop_end;
if (i < threshold) {
loop_start = i * (size + 1);
loop_end = (i + 1) * (size + 1);
} else {
loop_start = threshold + i * size;
loop_end = threshold + (i + 1) * size;
}
for (int j = loop_start; j < loop_end; j++) {
//* ((int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )) );
int m = (x + j)->row;
//*(int*) ((size_t)x + j * (2 * sizeof(int) + sizeof(Real) )+ sizeof(int));
int label = (x + j)->column;
// *(Real*) ((size_t)x + j*(2*sizeof(int) + sizeof(Real)) + 2*sizeof(int));
Real weight = (x + j)->weight;
tmp_weight_sum += weight;
Real this_prob = *(z + m * d.stride + label);
tmp_tot_objf += weight * log(this_prob);
// there might be problems here....
*(z2 + m * d2.stride + label) += weight / this_prob;
}
tot_objf[i] = tmp_tot_objf;
tot_weight[i] = tmp_weight_sum;
__syncthreads();
*t = _sum_reduce(tot_objf);
__syncthreads();
*(t + 1) = _sum_reduce(tot_weight);
return;
}
template<typename Real>
__global__
static void _cuda_vector_copy_elements(Real *data, int dim,
const Real *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= dim)
return;
int j = elements[i];
int mat_index;
if (transpose)
mat_index = i + j * mat_stride;
else
mat_index = j + i * mat_stride;
data[i] = src_mat[mat_index];
}
template<typename Real>
__global__
static void _cuda_matrix_add_elements(Real *data, MatrixDim dim, Real alpha,
MatrixElement<Real>* x,
int num_elements) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= num_elements)
return;
data[x[i].row * dim.stride + x[i].column] += alpha * x[i].weight;
}
template<typename Real>
__global__
static void _cuda_matrix_add_indexed_values(MatrixDim dim, Real alpha,
const Int32Pair* indices,
const Real* x, int s, Real* data) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i >= s)
return;
int data_i = indices[i].first * dim.stride + indices[i].second;
data[data_i] += alpha * x[i];
}
template<typename Real>
__global__
static void _cuda_matrix_add_to_elements(Real alpha,
Real* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
int row = blockIdx.x * blockDim.x + threadIdx.x;
if (row < dim.rows) {
int col = elements[row];
if (col >= 0) {
int index = col + row * dim.stride;
mat[index] += alpha;
}
}
}
template<typename Real>
__global__
static void _matrix_lookup(const Real *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
Real *output) {
int ind = blockIdx.x * blockDim.x + threadIdx.x;
if (ind >= indices_size)
return;
int data_ind = indices[ind].first * dim.stride + indices[ind].second;
output[ind] = data[data_ind];
}
template<typename Real>
__global__
static void _equal_element_mask(const Real *mat1, const Real *mat2, Real *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // col
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // row
int32_cuda index_mat1 = i + j * mat1_dim.stride;
int32_cuda index_mat2 = i + j * mat2_stride;
int32_cuda index_mask = i + j * mask_stride;
if (i < mat1_dim.cols && j < mat1_dim.rows)
mask[index_mask] = (mat1[index_mat1] == mat2[index_mat2] ? 1.0 : 0.0);
}
enum EnumTransformReduce {
SUMAB, SUM, MAX, MIN, LINFNORM, L2NORM, L1NORM, L0NORM, LPNORM
};
template<EnumTransformReduce TransReduceType, typename Real>
struct TransReduceOp {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return Real(0);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return Real(0);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return Real(0);
}
};
template<typename Real>
struct TransReduceOp<SUMAB, Real> {
const Real alpha_;
const Real beta_;
TransReduceOp(const Real& a, const Real& b) :
alpha_(a), beta_(b) {
}
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
if (beta_ == Real(0)) {
return alpha_ * x;
} else {
return alpha_ * x + beta_ * output;
}
}
};
template<typename Real>
struct TransReduceOp<SUM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<MAX, Real> {
__forceinline__
__device__ Real InitValue() const {
return sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<MIN, Real> {
__forceinline__
__device__ Real InitValue() const {
return sizeof(Real) == sizeof(float) ? CUDART_INF_F : CUDART_INF;
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return min(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<LINFNORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return abs(x);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<L2NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return x * x;
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return sqrt(x);
}
};
template<typename Real>
struct TransReduceOp<L1NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return abs(x);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<L0NORM, Real> {
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return Real(x == Real(0) ? 0 : 1);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return x;
}
};
template<typename Real>
struct TransReduceOp<LPNORM, Real> {
const Real power_;
TransReduceOp(const Real& p) :
power_(p) {
}
__forceinline__
__device__ Real InitValue() const {
return Real(0);
}
__forceinline__
__device__ Real Transform(const Real& x) const {
return pow(abs(x), power_);
}
__forceinline__
__device__ Real Reduce(const Real& a, const Real& b) const {
return a + b;
}
__forceinline__
__device__ Real PostReduce(const Real& x, const Real& output) const {
return pow(x, Real(1) / power_);
}
};
// Vector reduce.
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _vec_transform_reduce(
const Real* v, Real* result, const int dim, const int inc,
const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sdata[CU1DBLOCK];
Real tdata = op.InitValue();
const int tid = threadIdx.x;
const int vec_len = dim * inc;
const int grid_stride = gridDim.x * blockDim.x * inc;
int i = (blockIdx.x * blockDim.x + tid) * inc;
// Grid reduce. Loop over the whole vector v.
for (; i < vec_len; i += grid_stride) {
tdata = op.Reduce(tdata, op.Transform(v[i]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
__syncthreads();
}
// Reduce last warp. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
}
// Output to vector result.
if (tid == 0)
result[blockIdx.x] = op.PostReduce(sdata[0], result[blockIdx.x]);
}
// Reduce a matrix 'mat' to a column vector 'result'
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _transform_reduce_mat_cols(
Real *result, const Real *mat, const MatrixDim d,
const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sdata[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int row_start = i * d.stride;
Real tdata = op.InitValue();
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tdata = op.Reduce(tdata, op.Transform(mat[row_start + j]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
__syncthreads();
}
// Reduce last warp. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1)
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
// Output to vector result.
if (tid == 0) {
result[i] = op.PostReduce(sdata[0], result[i]);
}
}
template<EnumTransformReduce TransReduceType, typename Real>
__global__
static void _group_transform_reduce(
Real *y, const Real *x, const MatrixDim d, const int src_stride,
const int group_size, const TransReduceOp<TransReduceType, Real> op) {
__shared__ Real sreduction[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * src_stride;
const int y_start = i * d.stride;
const int threads_per_group = blockDim.x;
// Reduce n groups per thread block
const int n = blockDim.y;
const int len = group_size * n;
// linear thread id
const int tid = threadIdx.y * threads_per_group + threadIdx.x;
int j = threadIdx.y * group_size + threadIdx.x; // col-id of *x
int group_id = threadIdx.y; // col-id of *y
int group_end = x_start + (group_id + 1) * group_size;
while (group_id < d.cols) {
// reduce to threads_per_group elements per group
int x_idx = x_start + j;
Real treduction = op.Transform(x[x_idx]);
x_idx += threads_per_group;
while (x_idx < group_end) {
treduction = op.Reduce(treduction, op.Transform(x[x_idx]));
x_idx += threads_per_group;
}
sreduction[tid] = treduction;
if (threads_per_group > warpSize) {
__syncthreads();
}
// tree-reduce to 2x warpSize elements per group
# pragma unroll
for (int shift = threads_per_group / 2; shift > warpSize; shift >>= 1) {
if (threadIdx.x < shift) {
sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]);
}
__syncthreads();
}
// Warp-reduce to 1 element per group.
// Threads implicitly synchronized within the warp.
const int warp_reduce_size =
threads_per_group / 2 < warpSize ? threads_per_group / 2 : warpSize;
if (threadIdx.x < warp_reduce_size) {
# pragma unroll
for (int shift = warp_reduce_size; shift > 0; shift >>= 1) {
sreduction[tid] = op.Reduce(sreduction[tid], sreduction[tid + shift]);
}
}
// Store the result.
if (threadIdx.x == 0) {
y[y_start + group_id] = op.PostReduce(sreduction[tid],
y[y_start + group_id]);
}
j += len;
group_end += len;
group_id += n;
}
}
template<typename Real>
__global__
static void _vec_apply_floor(Real *v, Real floor_val, float *count, int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
if (v[i] < floor_val) {
v[i] = floor_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
template<typename Real>
__global__
static void _vec_apply_ceiling(Real *v, Real ceiling_val, float *count,
int dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
if (v[i] > ceiling_val) {
v[i] = ceiling_val;
count[i] = 1;
} else {
count[i] = 0;
}
}
}
template<typename Real>
__global__
static void _apply_pow(Real* mat, Real power, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (power == 1.0)
return;
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
if (!(mat[index] >= 0.0))
return;
mat[index] = sqrt(mat[index]);
} else {
mat[index] = pow(mat[index], power);
}
}
}
template<typename Real>
__global__
static void _apply_pow_abs(Real* mat, Real power, bool include_sign,
MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
if (include_sign == true && mat[index] < 0) {
if (power == 1.0)
mat[index] = -std::abs(mat[index]);
if (power == 2.0) {
mat[index] = -mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = -sqrt(std::abs(mat[index]));
} else {
mat[index] = -pow(std::abs(mat[index]), power);
}
} else {
if (power == 1.0)
mat[index] = std::abs(mat[index]);
if (power == 2.0) {
mat[index] = mat[index] * mat[index];
} else if (power == 0.5) {
mat[index] = sqrt(std::abs(mat[index]));
} else if (power < 0.0 && mat[index] == 0.0) {
mat[index] = 0.0;
} else {
mat[index] = pow(std::abs(mat[index]), power);
}
}
}
}
template<typename Real>
__global__
static void _apply_heaviside(Real* mat, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows)
mat[index] = (mat[index] > 0.0 ? 1.0 : 0.0);
}
template<typename Real>
__global__
static void _apply_floor(Real* mat, Real floor_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
mat[index] = max(mat[index], floor_val);
}
}
template<typename Real>
__global__
static void _copy_cols(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[i], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = j * src_stride + reorder[i];
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0.0;
}
}
}
template<typename Real>
__global__
static void _add_cols(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[i], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = j * src_stride + index;
Real val = src[src_index];
dst[dst_index] += val;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int index = reorder[j], dst_index = j * dst_dim.stride + i;
if (index >= 0) {
int src_index = reorder[j] * src_stride + i;
Real val = src[src_index];
dst[dst_index] = val;
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _copy_rows(Real* dst, const Real * const *src, MatrixDim dst_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
const Real *pointer = src[j];
if (pointer != NULL) {
dst[dst_index] = pointer[i];
} else {
dst[dst_index] = 0;
}
}
}
template<typename Real>
__global__
static void _copy_to_rows(Real* const * dst, const Real *src,
MatrixDim src_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < src_dim.cols && j < src_dim.rows) {
Real *pointer = dst[j];
if (pointer != NULL) {
pointer[i] = src[j * src_dim.stride + i];
}
}
}
template<typename Real>
__global__
static void _add_rows(Real alpha, Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (reorder[j] >= 0) {
int src_index = reorder[j] * src_stride + i;
dst[dst_index] += alpha * src[src_index];
}
}
}
template<typename Real>
__global__
static void _mul_rows(Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (reorder[j] >= 0) {
int src_index = reorder[j] * src_stride + i;
dst[dst_index] *= src[src_index];
}
}
}
template<typename Real>
__global__
static void _add_rows(Real alpha, Real* dst, const Real * const *src,
MatrixDim dst_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < dst_dim.cols && j < dst_dim.rows) {
int dst_index = j * dst_dim.stride + i;
if (src[j] != NULL) {
dst[dst_index] += alpha * src[j][i];
}
}
}
template<typename Real>
__global__
static void _add_to_rows(Real alpha, Real* dst, const Real *src,
const MatrixIndexT_cuda* reorder, MatrixDim src_dim,
int dst_stride) {
int c = blockIdx.x * blockDim.x + threadIdx.x; // col index
int r = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (c < src_dim.cols && r < src_dim.rows) {
int src_index = r * src_dim.stride + c;
if (reorder[r] >= 0) {
int dst_index = reorder[r] * dst_stride + c;
dst[dst_index] += alpha * src[src_index];
}
}
}
template<typename Real>
__global__
static void _add_to_rows(Real alpha, Real* const * dst, const Real *src,
MatrixDim src_dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // col index
int j = blockIdx.y * blockDim.y + threadIdx.y; // row index
if (i < src_dim.cols && j < src_dim.rows) {
if (dst[j] != NULL) {
dst[j][i] += alpha * src[j * src_dim.stride + i];
}
}
}
template<typename Real>
__global__
static void _apply_ceiling(Real* mat, Real ceiling_val, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows) {
mat[index] = min(mat[index], ceiling_val);
}
}
template<typename Real>
__global__
static void _invert_elements(Real* data, MatrixDim d) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int index = i + j * d.stride;
if (i < d.cols && j < d.rows)
data[index] = 1.0 / data[index];
}
// matrix-wise, do data = alpha * data + beta * A * B^T,
// where B is a block matrix.
template<typename Real>
__global__
static void _add_mat_blockmat_trans(Real *data, MatrixDim dim,
const Real *A_data, int A_num_rows,
int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols =
cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride
+ BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _add_mat_blockmat(Real *data, MatrixDim dim, const Real *A_data,
int A_num_rows, int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha, Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &block_data = B_cu_data[j];
int B_row_start = block_data.row_offset, B_col_start = block_data.col_offset,
B_num_rows = block_data.matrix_dim.rows, B_num_cols =
block_data.matrix_dim.cols, B_row_stride =
block_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(block_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < B_num_cols; k++) {
const Real *this_B_col = B_data + k;
const Real *this_A_row = A_data + i * A_row_stride
+ B_row_start * A_col_stride;
// this_A_row points to the element A[i][B_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < B_num_rows; l++) // l indexes rows of B.
sum += this_B_col[l * B_row_stride] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + B_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
// For a block matrix B, does B = alpha * C * D + beta * B.
// the (x,y,z) indices are the block index, then the row
// and column indices within the block. Note: transposition of C and D
// is handled by swapping the (num_rows,num_cols) and (row_stride,col_stride),
// so it's invisible to this code. The num-cols and num-rows of C and D
// are only provided to the extent that they are not already determined
// by other quantities.
template<typename Real>
__global__
static void _block_add_mat_mat(CuBlockMatrixData *B_cu_data, int num_blocks,
const Real *C_data, int C_num_cols,
int C_row_stride, int C_col_stride,
const Real *D_data, int D_row_stride,
int D_col_stride, Real alpha, Real beta) {
int b = blockIdx.x * blockDim.x + threadIdx.x; // block-index into B.
int i = blockIdx.y * blockDim.y + threadIdx.y; // row-index into b'th block
int j = blockIdx.z * blockDim.z + threadIdx.z; // col-index into b'th block
if (b >= num_blocks)
return;
const CuBlockMatrixData &block_data = B_cu_data[b];
if (i >= block_data.matrix_dim.rows || j >= block_data.matrix_dim.cols)
return; // we're outside the dimensions of the b'th block.
// B_elem is the element of B we're writing to.
Real *B_elem = reinterpret_cast<Real*>(block_data.matrix_data)
+ i * block_data.matrix_dim.stride + j;
Real B_val = *B_elem;
// B_row and B_col are the (row, col) index into the full matrix B.
int B_row = block_data.row_offset + i, B_col = block_data.col_offset + j;
const Real *C_row_data = C_data + C_row_stride * B_row, *D_col_data = D_data
+ D_col_stride * B_col;
Real sum = 0.0;
for (int k = 0; k < C_num_cols; k++) {
sum += C_row_data[k * C_col_stride] * D_col_data[k * D_row_stride];
}
*B_elem = alpha * sum + beta * B_val;
}
template<typename Real>
__global__
static void _blockadd_mat_blockmat_trans(Real *data, MatrixDim dim,
const Real *A_data, int A_num_rows,
int A_num_cols, int A_row_stride,
int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, Real alpha,
Real beta) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index into "data"
int j = blockIdx.y * blockDim.y + threadIdx.y; // block-index into B.
if (i >= A_num_rows || j >= B_num_blocks)
return;
const CuBlockMatrixData &cu_data = B_cu_data[j];
// BT means B transposed.
int BT_row_start = cu_data.col_offset, BT_col_start = cu_data.row_offset,
BT_num_rows = cu_data.matrix_dim.cols, BT_num_cols =
cu_data.matrix_dim.rows, BT_col_stride = cu_data.matrix_dim.stride;
// Cast from void;
const Real *B_data = static_cast<Real*>(cu_data.matrix_data);
// we avoided a bunch of hassle by doing this (relates to Ansi-C requirement).
for (int k = 0; k < BT_num_cols; k++) {
const Real *this_BT_col = B_data + k * BT_col_stride;
const Real *this_A_row = A_data + i * A_row_stride
+ BT_row_start * A_col_stride;
// this_A_row points to the element A[i][BT_row_start], it's really just
// part of this row of A.
Real sum = 0.0;
for (int l = 0; l < BT_num_rows; l++) // l indexes rows of B.
sum += this_BT_col[l] * this_A_row[l * A_col_stride];
int index = i * dim.stride + (k + BT_col_start);
data[index] = alpha * sum + beta * data[index];
}
}
template<typename Real>
__global__
static void _sum_column_ranges(Real *data, MatrixDim dim, const Real *src_data,
MatrixDim src_dim, const Int32Pair *indices) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col, src_start_index = row * src_dim.stride
+ indices[col].first, src_end_index = row * src_dim.stride
+ indices[col].second;
Real sum = 0.0;
for (int index = src_start_index; index < src_end_index; index++)
sum += src_data[index];
data[dst_index] = sum;
}
template<typename Real>
__global__
static void _add_row_ranges(Real *data, MatrixDim dim, const Real *src_data,
MatrixDim src_dim, const Int32Pair *indexes) {
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (row >= dim.rows || col >= dim.cols)
return;
int dst_index = row * dim.stride + col;
int src_index_start = indexes[row].first, src_index_end = indexes[row].second;
for (int row_index = src_index_start; row_index < src_index_end; row_index++)
data[dst_index] += src_data[row_index * src_dim.stride + col];
}
template<typename Real>
__global__
static void _soft_hinge(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
// compute the function y[index] = log(1 + exp(x[index]))
if (i < d.cols && j < d.rows) {
Real val = x[src_index], result;
if (val >= 10.0)
result = val; // function approaches y=x as x gets large
else
result = log1p(exp(val));
y[dst_index] = result;
}
}
template<typename Real>
__global__
static void _group_pnorm(Real *y, const Real *x, MatrixDim d, int src_stride,
int group_size, Real power) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
if (j < d.rows && i < d.cols) {
int dst_index = i + j * d.stride;
Real tmp = 0;
int src_begin_index = i * group_size + j * src_stride;
int src_end_index = src_begin_index + group_size;
for (int src_index = src_begin_index; src_index < src_end_index;
src_index++) {
tmp += pow(std::abs(x[src_index]), power);
}
tmp = pow(tmp, Real(1.0 / power));
if (!isnan(tmp)) {
y[dst_index] = tmp;
} else {
Real max_value = x[src_begin_index], min_value = max_value;
for (int src_index = src_begin_index + 1; src_index < src_end_index;
src_index++) {
if (x[src_index] > max_value)
max_value = x[src_index];
if (x[src_index] < min_value)
min_value = x[src_index];
}
tmp = 0.0;
// let max_value be the largest abs(value)
Real max_abs_value = (max_value > -min_value ? max_value : -min_value);
if (max_abs_value == 0) {
y[dst_index] = 0.0;
} else {
for (int src_index = src_begin_index; src_index < src_end_index;
src_index++) {
Real x_scaled = x[src_index] / max_abs_value;
tmp += pow(std::abs(x_scaled), Real(power));
}
y[dst_index] = pow(tmp, Real(1.0 / power)) * max_abs_value;
}
}
}
}
/*
* cu::
*/
template<typename Real>
__global__
static void _sigmoid(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = 1.0 / (1.0 + exp(-x[src_index]));
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_sigmoid(Real*eout, const Real*e, const Real*y, MatrixDim d,
int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows)
eout[dst_index] = y[y_index] * (1.0 - y[y_index]) * e[e_index];
}
template<typename Real>
__global__
static void _tanh(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real exp_2x = exp(2.0 * x[src_index]);
Real res;
if (isinf(exp_2x)) {
res = 1.0;
} else {
res = (exp_2x - 1.0) / (exp_2x + 1.0);
}
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_tanh(Real*eout, const Real*e, const Real*y, MatrixDim d,
int e_stride, int y_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows)
eout[dst_index] = (1.0 - y[y_index] * y[y_index]) * e[e_index];
}
/*
This function copies x to y while bounding the elements
away from zero using the scalar function:
y = x if x <= -epsilon or x >= +epsilon
+epsilon if 0 <= x < epsilon
-epsilon if -epsilon < x < 0.
where:
x is the source matrix, of dimension and stride given by d
epsilon > 0
y is the destination matrix, with the num-rows and num-cols
given by d, but stride given by y_stride.
*/
template<typename Real>
__global__
static void _ensure_nonzero(const Real *x, MatrixDim d, Real epsilon,
int y_stride, Real *y) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int x_index = i + j * d.stride,
y_index = i + j * y_stride;
if (i < d.cols && j < d.rows) {
Real src = x[x_index], dst;
if (src <= -epsilon || src >= epsilon)
dst = src;
else if (src >= 0)
dst = epsilon;
else
dst = -epsilon;
__syncthreads(); // This allows it to do consolidated write below, which
// should improve speed.
y[y_index] = dst;
}
}
template<typename Real>
__global__
static void _parametric_relu(Real* y, const Real* x, MatrixDim d, int src_stride,
const Real* a, const Real* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride,
src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = (x[src_index] > 0.0) ? a[i] * x[src_index] : b[i] * x[src_index];
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _diff_parametric_relu(Real* eout, const Real* e, const Real* y,
MatrixDim d, int e_stride, int y_stride,
const Real* a, const Real* b) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride;
int e_index = i + j * e_stride;
int y_index = i + j * y_stride;
if (i < d.cols && j < d.rows )
eout[dst_index] = (y[y_index] > 0.0 ? a[i] * e[e_index] : b[i] * e[e_index]);
}
template<typename Real>
__global__
static void _heaviside(Real*y, const Real*x, MatrixDim d, int src_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dst_index = i + j * d.stride, src_index = i + j * src_stride;
if (i < d.cols && j < d.rows) {
Real res = (x[src_index] > 0.0 ? 1.0 : 0.0);
y[dst_index] = res;
}
}
template<typename Real>
__global__
static void _softmax_reduce(Real*y, const Real*x, MatrixDim d, int src_stride) {
__shared__ Real smem[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * src_stride;
const int y_start = i * d.stride;
const int tid = threadIdx.x;
// find max element of the row
// reduce to CU1DBLOCK elements per row.
Real tmax = sizeof(Real) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tmax = fmax(tmax, x[x_start + j]);
}
smem[tid] = tmax;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
}
// broadcast max to all threads
__syncthreads();
Real max = smem[0];
// sum_j(exp(x(i,j)-max))
// reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
tsum += exp(x[x_start + j] - max);
}
smem[tid] = tsum;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] += smem[tid + shift];
}
}
// broadcast sum to all threads
__syncthreads();
Real inv_sum = Real(1) / smem[0];
// normalize the row
for (int j = tid; j < d.cols; j += CU1DBLOCK) {
y[y_start + j] = exp(x[x_start + j] - max) * inv_sum;
}
}
// The output y_i = scale * x_i,
// and we want to RMS value of the y_i to equal target_rms,
// so y^t y = D * target_rms^2 (if y is one row of the input).
// we need to have scale = 1.0 / sqrt(x^t x / (D * target_rms^2)).
// there is also flooring involved, to avoid division-by-zero
// problems. It's important for the backprop, that the floor's
// square root is exactly representable as float.
// If add_log_stddev is true, log(max(epsi, sqrt(x^t x / D)))
// is an extra dimension of the output.
//
// 1D grid is used. Each 256-thread block works on 1 row of the data matrix.
// The block is also of 1D. Strided memory access is used if the length of the
// row is longer than 256.
template<typename Real>
__global__
static void _normalize_per_row(Real *y, int y_stride, const Real *x,
MatrixDim x_d, Real target_rms,
bool add_log_stddev) {
const int i = blockIdx.x;
const int tid = threadIdx.x;
const Real* x_row = x + i * x_d.stride;
__shared__ Real ssum[CU1DBLOCK];
// Reduce x_j^2 to CU1DBLOCK elements per row
Real tsum = Real(0);
for (int j = tid; j < x_d.cols; j += CU1DBLOCK) {
tsum += x_row[j] * x_row[j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift)
ssum[tid] += ssum[tid + shift];
__syncthreads();
}
// Reduce last warp to 1 element per row.
// Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66
if (tid == 0) {
ssum[0] = sqrt(
fmax(ssum[0] / (target_rms * target_rms * x_d.cols), kSquaredNormFloor));
}
// Broadcast floored stddev to all threads.
__syncthreads();
const Real stddev_div_target_rms = ssum[0];
const Real scale = Real(1) / stddev_div_target_rms;
// Store normalized input to output
Real* y_row = y + i * y_stride;
for (int j = tid; j < x_d.cols; j += CU1DBLOCK) {
y_row[j] = x_row[j] * scale;
}
if (tid == 0 && add_log_stddev) {
y_row[x_d.cols] = log(stddev_div_target_rms * target_rms);
}
}
template<typename Real>
__global__
static void _diff_normalize_per_row(Real *id, int id_stride, const Real *iv,
MatrixDim iv_dim, const Real* od,
int od_stride, Real target_rms,
bool add_log_stddev) {
const Real kSquaredNormFloor = 1.3552527156068805425e-20; // 2^-66
const Real kInvNormFloor = 8589934592.0;
const int tid = threadIdx.x;
const int i = blockIdx.x;
const Real* iv_row = iv + i * iv_dim.stride;
const Real* od_row = od + i * od_stride;
// reduce to CU1DBLOCK elements per row
Real dot_products = Real(0);
Real in_norm = Real(0);
for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) {
const Real iv_ij = iv_row[j];
dot_products += iv_ij * od_row[j];
in_norm += iv_ij * iv_ij;
}
__shared__ Real sprod[CU1DBLOCK];
__shared__ Real snorm[CU1DBLOCK];
sprod[tid] = dot_products;
snorm[tid] = in_norm;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sprod[tid] += sprod[tid + shift];
snorm[tid] += snorm[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
sprod[tid] += sprod[tid + shift];
snorm[tid] += snorm[tid + shift];
}
}
// broadcast the sum results
__syncthreads();
dot_products = sprod[0];
in_norm = snorm[0];
Real log_stddev_deriv;
if (add_log_stddev) {
log_stddev_deriv = Real(1) / max(in_norm, iv_dim.cols * kSquaredNormFloor)
* od_row[iv_dim.cols];
}
const Real inv_d_scaled = Real(1) / (iv_dim.cols * target_rms * target_rms);
in_norm = Real(1) / sqrt(max(in_norm * inv_d_scaled, kSquaredNormFloor));
const Real f = in_norm == kInvNormFloor ? Real(0) : in_norm;
dot_products *= f * f * f * inv_d_scaled;
for (int j = tid; j < iv_dim.cols; j += CU1DBLOCK) {
const Real iv_ij = iv_row[j];
Real id_ij = id[i * id_stride + j];
if (add_log_stddev) {
id_ij += log_stddev_deriv * iv_ij;
}
if (id != od) {
id_ij += in_norm * od_row[j];
} else {
id_ij *= in_norm;
}
id_ij -= dot_products * iv_ij;
id[i * id_stride + j] = id_ij;
}
}
// Per-row log-softmax operation on 'x', with writing to 'y'.
// note, x and y may point to the same memory. This is equivalent to setting
// matrix y to matrix x and then, for each row of y, subtracting the offset that
// will make exp(y.row[j]) sum to 1 for each row j.
//
// It expects to be called with CU1DBLOCK threads.
// The number of blocks [i.e. the gridDim] equals to y_dim.rows,
// so one block of threads processes each row. x and y are
// expected to have the same dimension, but possibly different row strides.
template<typename Real>
__global__
static void _log_softmax_reduce(Real* y, const Real* x, MatrixDim y_dim,
int x_stride) {
__shared__ Real smem[CU1DBLOCK];
const int i = blockIdx.x;
const int x_start = i * x_stride;
const int y_start = i * y_dim.stride;
const int tid = threadIdx.x;
// find max element of the row
// reduce to CU1DBLOCK elements per row.
Real tmax = -1e20;
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
tmax = fmax(tmax, x[x_start + j]);
}
smem[tid] = tmax;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] = fmax(smem[tid], smem[tid + shift]);
}
}
// broadcast max to all threads
__syncthreads();
Real max = smem[0];
// sum_j(exp(x(i,j)-max))
// reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
tsum += exp(x[x_start + j] - max);
}
smem[tid] = tsum;
__syncthreads();
// reduce to 2x warpSize elements per row
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
__syncthreads();
}
// reduce to 1 element per row
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
smem[tid] += smem[tid + shift];
}
}
// broadcast sum to all threads
__syncthreads();
Real log_sum = log(smem[0]);
// normalize the row
for (int j = tid; j < y_dim.cols; j += CU1DBLOCK) {
y[y_start + j] = x[x_start + j] - max - log_sum;
}
}
template<typename Real>
__global__
static void _splice(Real* y, const Real* x, const int32_cuda* off,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_col = i % d_in.cols;
int32_cuda src_row = j + off[i / d_in.cols];
if (src_row < 0)
src_row = 0;
if (src_row >= d_in.rows)
src_row = d_in.rows - 1;
y[index] = x[src_col + src_row * d_in.stride];
}
}
template<typename Real>
__global__
static void _take_mean(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index1 = i + j * d_in.stride;
int32_cuda index2 = j + i * d_in.stride;
if (i <= j && j < d_in.rows) {
int32_cuda index_sp = (j * (j + 1) / 2) + i;
y[index_sp] = 0.5 * (x[index1] + x[index2]);
}
}
template<typename Real>
__global__
static void _take_lower(const Real* x, Real* y, MatrixDim d_in) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j > i || i >= d_in.rows)
return;
int index = i * d_in.stride + j;
Real val = x[index];
int index_sp = (i * (i + 1) / 2) + j;
y[index_sp] = val;
}
template<typename Real>
__global__
static void _take_upper(const Real* x, Real* y, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x; // row-index
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y; // col-index
if (j < i || j >= d_in.rows)
return;
int32_cuda index = i * d_in.stride + j;
int32_cuda index_sp = (j * (j + 1) / 2) + i;
y[index_sp] = x[index];
}
template<typename Real>
__global__
static void _vec_copy_diag_from_packed(Real* y, const Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda index = ((i + 1) * (i + 2) / 2) - 1;
if (i < dim) {
y[i] = x[index];
}
}
template<typename Real>
__global__
static void _copy_from_sp(const Real* x, Real* y, MatrixDim dim) {
int i = blockIdx.x * blockDim.x + threadIdx.x; // column index
int j = blockIdx.y * blockDim.y + threadIdx.y; //
if (i < dim.cols && j < dim.rows) {
int dst_index = i + j * dim.stride, src_index;
if (j <= i) { // no transpose
src_index = (i * (i + 1) / 2) + j;
} else { // transpose.
src_index = (j * (j + 1) / 2) + i;
}
y[dst_index] = x[src_index];
}
}
template<typename Real>
__global__
static void _copy(Real* y, const Real* x, const int32_cuda* copy_from,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_col = copy_from[i];
if (src_col >= 0 && src_col < d_in.cols) {
y[index] = x[src_col + j * d_in.stride];
} else {
y[index] = 1.0 / 0.0;
}
}
}
template<typename Real>
__global__
static void _one(Real* x, int dim) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < dim) {
x[i] = 1.0;
}
}
template<typename Real>
__global__
static void _randomize(Real* y, const Real* x, const int32_cuda* copy_from,
MatrixDim d_out, MatrixDim d_in) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d_out.stride;
if (i < d_out.cols && j < d_out.rows) {
int32_cuda src_row = copy_from[j];
y[index] = x[i + src_row * d_in.stride];
}
}
template<typename Real>
__global__
static void _regularize_l1(Real* wei, Real* grad, Real l1, Real lr, MatrixDim d,
int stride_grad) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
int32_cuda index = i + j * d.stride, grad_index = i + j * stride_grad;
if (i < d.cols && j < d.rows) {
if (wei[index] == 0.0)
return; //skip L1 if zero weight!
Real l1_signed = l1;
if (wei[index] < 0.0) //flip sign
l1_signed = -l1;
Real before = wei[index];
//simulate update
Real after = wei[index] - lr * grad[grad_index] - l1_signed;
if ((after > 0.0) ^ (before > 0.0)) { //sign changed?
wei[index] = 0.0;
grad[grad_index] = 0.0;
} else {
wei[index] -= l1_signed;
}
}
}
template<typename Real>
__global__
static void _find_row_max_id(const Real* mat, Real* vec_val, int32_cuda* vec_id,
MatrixDim d) {
const int32_cuda i = blockIdx.x;
const int32_cuda base = i * d.stride;
const int32_cuda tid = threadIdx.x;
__shared__ Real smax[CU1DBLOCK];
__shared__ int32_cuda sidx[CU1DBLOCK];
Real tmax = -1e20;
int32_cuda tidx = -1;
// Loop over blocks for coalesced memory access.
for (int32_cuda j = tid; j < d.cols; j += CU1DBLOCK) {
const Real val = mat[base + j];
if (val > tmax) {
tmax = val;
tidx = j;
}
}
smax[tid] = tmax;
sidx[tid] = tidx;
// Parallel reduce
#pragma unroll
for (int32_cuda num_working_threads = CU1DBLOCK / 2;
num_working_threads >= warpSize; num_working_threads >>= 1) {
__syncthreads();
if (tid < num_working_threads) {
if (smax[tid + num_working_threads] > smax[tid]) {
smax[tid] = smax[tid + num_working_threads];
sidx[tid] = sidx[tid + num_working_threads];
}
}
}
// Warp reduce without __syncthreads()
// (note.: synchronizes implicitly within a warp at the multiprocessor)
if (tid < warpSize / 2) {
#pragma unroll
for (int32_cuda num_working_threads = warpSize / 2; num_working_threads > 0;
num_working_threads >>= 1) {
if (smax[tid + num_working_threads] > smax[tid]) {
smax[tid] = smax[tid + num_working_threads];
sidx[tid] = sidx[tid + num_working_threads];
}
}
}
if (tid == 0) {
if (vec_val) {
vec_val[i] = smax[0];
}
vec_id[i] = sidx[0];
}
}
template<typename Real>
__global__
static void _diff_xent(const int32_cuda* vec_tgt, Real* mat_net_out,
Real* vec_log_post, MatrixDim d) {
int32_cuda i = blockIdx.x * blockDim.x + threadIdx.x;
int32_cuda j = blockIdx.y * blockDim.y + threadIdx.y;
if (i > 0)
return;
if (j < d.rows) {
int32_cuda index = vec_tgt[j] + j * d.stride;
Real value = mat_net_out[index];
if (value < 1e-20)
value = 1e-20;
vec_log_post[j] = log(value);
mat_net_out[index] -= 1.0;
}
}
template<typename Real>
__global__
static void _diff_softmax(Real* x, const MatrixDim dim, const Real* value,
const int value_stride, const Real* diff,
const int diff_stride) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int value_start = i * value_stride;
const int diff_start = i * diff_stride;
const int x_start = i * dim.stride;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < dim.cols; j += CU1DBLOCK) {
tsum += value[value_start + j] * diff[diff_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// Broadcast result to all threads
__syncthreads();
const Real pe = ssum[0];
// Apply element-wise x = value * (diff - pe)
for (int j = tid; j < dim.cols; j += CU1DBLOCK) {
x[x_start + j] = value[value_start + j] * (diff[diff_start + j] - pe);
}
}
// Differentiate backward through the log softmax function.
// "out_value" is the log softmax output. Does, for each row i,
// in_deriv(i) = out_deriv(i) - sum(out_deriv(i)) .* exp(out_value(i))
// ???(i) is row-vector.
// CUDA thread layout: 1 thread block (CU1DBLOCK == 256 threads) per matrix-row.
template<typename Real>
__global__
static void _diff_log_softmax(const MatrixDim in_deriv_dim,
const Real* out_value, const int out_value_stride,
const Real* out_deriv, const int out_deriv_stride,
Real* in_deriv) {
__shared__ Real ssum[CU1DBLOCK];
const int tid = threadIdx.x;
const int i = blockIdx.x;
const int out_value_start = i * out_value_stride;
const int out_deriv_start = i * out_deriv_stride;
const int in_deriv_start = i * in_deriv_dim.stride;
// Loop along the matrix row. Reduce to CU1DBLOCK elements per row.
Real tsum = Real(0);
for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) {
tsum += out_deriv[out_deriv_start + j];
}
ssum[tid] = tsum;
__syncthreads();
// Tree reduce to 2x warpSize elements.
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
ssum[tid] += ssum[tid + shift];
}
__syncthreads();
}
// Warp reduce to 1 element. Threads implicitly synchronized within a warp.
if (tid < warpSize) {
# pragma unroll
for (int shift = warpSize; shift > 0; shift >>= 1) {
ssum[tid] += ssum[tid + shift];
}
}
// Broadcast result to all threads
__syncthreads();
const Real sum_e = ssum[0];
// Apply element-wise x = out_deriv - exp(value) * sum_e
for (int j = tid; j < in_deriv_dim.cols; j += CU1DBLOCK) {
in_deriv[in_deriv_start + j] = out_deriv[out_deriv_start + j]
- exp(out_value[out_value_start + j]) * sum_e;
}
}
/**
this function computes the core part of the LSTM nonlinearity.
@param [in] in A matrix, of dimension num_rows by 5*cell_dim
(i.e. its num-cols must be a multiple of 5).
The column-space is interpreted as 5
consecutive blocks, each of dimension cell_dim,
which we name:
(i_part, f_part, c_part, o_part, c_{t-1}).
If 'have_dropout_mask' is nonzero, each row of
'in' will have 3 extra elements, interpreted
as dropout masks/scales for i_t, f_t and o_t.
@param [in] params A matrix, of dimension 3 by cell_dim,
with rows containing the 3 diagonal parameter matrices
used in LSTMs, namely
w_{ic}, w_{fc} and w_{oc}.
@param [out] out A matrix, of dimension num_rows by 2*cell_dim.
The quantities c_t and m_t respectively are put there
(in two blocks of column-dimension cell_dim),
according to the following equations:
i_t = Sigmoid(i_part + w_{ic}*c_{t-1})
f_t = Sigmoid(f_part + w_{fc}*c_{t-1})
c_t = f_t*c_{t-1} + i_t * Tanh(c_part)
o_t = Sigmoid(o_part + w_{oc}*c_t)
m_t = o_t * Tanh(c_t)
We use 1D thread block with CU1DBLOCK threads.
It works best when cell_dim is a multiple of CU1DBLOCK.
We use 1d Grid. Each block is working on one row of the in and out matrices.
*/
template<typename Real>
__global__
static void _lstm_nonlinearity(const Real* in, const int in_stride,
const Real* params, const int params_stride,
const int out_stride, const int cell_dim,
const int have_dropout_mask, const int num_rows,
Real* out) {
const int tid = threadIdx.x;
const int i = blockIdx.x;
const Real* i_part = in + i * in_stride;
const Real* f_part = in + i * in_stride + cell_dim;
const Real* c_part = in + i * in_stride + cell_dim * 2;
const Real* o_part = in + i * in_stride + cell_dim * 3;
const Real* c_tm1 = in + i * in_stride + cell_dim * 4;
const Real* w_ic = params;
const Real* w_fc = params + params_stride;
const Real* w_oc = params + params_stride * 2;
Real* c_t = out + i * out_stride;
Real* m_t = out + i * out_stride + cell_dim;
Real i_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5] : 1),
f_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 1] : 1),
o_scale = (have_dropout_mask ? in[i * in_stride + cell_dim * 5 + 2] : 1);
for (int j = tid; j < cell_dim; j += CU1DBLOCK) {
Real c_tm1_j = c_tm1[j];
Real i_t_j = Real(1) / (Real(1) + exp(-i_part[j] - w_ic[j] * c_tm1_j));
Real f_t_j = Real(1) / (Real(1) + exp(-f_part[j] - w_fc[j] * c_tm1_j));
Real c_t_j = f_t_j * f_scale * c_tm1_j + i_t_j * i_scale * tanh(c_part[j]);
Real o_t_j = Real(1) / (Real(1) + exp(-o_part[j] - w_oc[j] * c_t_j));
c_t[j] = c_t_j;
m_t[j] = o_t_j * o_scale * tanh(c_t_j);
}
}
/**
This function does the 'backward' pass corresponding to the function
ComputeLstmNonlinearity. It's a little more complicated than you might
expect because of the 'self-repair' mechanism that we use to prevent the
sigmoid and tanh nonlinearities oversaturating, and because of the
average-activation and average-derivative stats that we store for these
nonlinearites (these stats are used both to control the self-repair
mechanism, and for diagnostic purposes).
Because the forward pass computes various intermediate values that are not
output, this function actually has to do the same computations as the
forward pass before it actually does the backprop.
In the following description, `C` is for `cell_dim`, `N` is for `num_rows`.
@param [in] input The same as in ComputeLstmNonlinearity().
A matrix, of dimension N by 5C (i.e. its num-cols must be
a multiple of 5). The column-space is interpreted as 5
consecutive blocks, each of dimension C, which we name:
(i_part, f_part, c_part, o_part, c_{t-1}).
If 'have_dropout_mask' is nonzero, each row of
'in' will have 3 extra elements, interpreted
as dropout masks/scales for i_t, f_t and o_t.
@param [in] params The same as in ComputeLstmNonlinearity().
A matrix, of dimension 3 by C, with rows containing the
three diagonal parameter matrices used in LSTMs, namely
w_{ic}, w_{fc} and w_{oc}.
@param [in] output_deriv
A matrix, of dimension N by 2C, containing the derivative
of the objective function we're backpropagating,
w.r.t. the quantities c_t and m_t (in two blocks of
column-dimension C).
@param [in] deriv_sum_in
This is used in the self-repair code to identify
oversaturated nonlinearities.
It is a matrix, of dimension 5 by C, corresponding to
the totals of the derivatives of the 5 sigmoid and tanh
nonlinearities, in they order they appear in the equations
in the documentation of ComputeLstmNonlinearity()
respectively,
they appear in the equations for (i_t, f_t, c_t, o_t, m_t).
This will be divided by 'count_in' to get the average
derivative value so far, for each of the nonlinearities.
@param [in] self_repair_config
A vector of dimension 10, containing the configuration of
the self-repair to be used for the 5 nonlinearities.
The first 5 elements are the self_repair_lower_threshold
values (typically 0.05 for sigmoid and 0.2 for tanh),
and the next 5 elements are the corresponding
self-repair-scales (typically 10^-5).
@param [in] count_in The data-count that corresponds to the stats in
'deriv_sum_in' at entry to the function.
This function should tolerate the count being zero
(in that case, it is free to do the self-repair or not,
as this should only happen on the 1st minibatch of each
training job).
@param [out] input_deriv
May be NULL; if not, this function writes, to this
location, the backpropagated derivative of the objective
function w.r.t. the 'input' matrix. This matrix should
have the same dimension as 'input' i.e. N by 5C. In
addition to the regular backpropagated derivative, the
output will include small values relating to 'self-repair'.
@param [out] params_deriv
May be NULL; if not, this is where this function *writes*
[not adds] the backpropagated derivative of the objective
function w.r.t. 'params'; it should have the same dimension
as 'params' (3 by C). (This matrix will then be processed
by the natural gradient code and added to the appropriate
copy of the parameter matrix, outside this function).
@param [out] value_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C. This function *adds* to this location
the total value of each of the sigmoid/tanh nonlinearities
that it computes (this is for diagnostic purposes).
@param [out] deriv_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C; this function *adds* to this location the
total of the derivative of each of the sigmoid/tanh
nonlinearities that it computes (this is for diagnostic
purposes and to control the self-repair). This function
should tolerate the case when 'deriv_sum_out' points to the
same data as 'deriv_sum_in'.
@param [out] self_repair_sum_out
Must be NULL if params_deriv is NULL; if not, a matrix of
dimension 5 by C; this function *writes* to this location
the sum of the number of times the self-repair code was
activated (integer values 0 <= k <= N). This will be
processed outside this function into self-repair stats for
diagnostics.
// Use 2D block (8x32 threads) as we need to compute column sum.
// Use 1D grid to cover the data matrix `cell_dim`.
*/
template<typename Real>
__global__
static void _diff_lstm_nonlinearity(const int cell_dim, const int have_dropout_mask,
const int num_rows,
const Real* input, const int input_stride,
const Real* params, const int params_stride,
const Real* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const Real* self_repair_config,
double count, Real* input_deriv,
const int input_deriv_stride,
Real* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
Real* self_repair_sum_out,
const int self_repair_sum_out_stride) {
__shared__ Real smem[CU1DBLOCK];
const int j = blockIdx.x * blockDim.x + threadIdx.x;
const int tid = threadIdx.y * blockDim.x + threadIdx.x;
const int grid_stride = gridDim.y * blockDim.y;
const int i0 = blockIdx.y * blockDim.y + threadIdx.y;
Real w_ic_deriv_sum = 0;
Real w_fc_deriv_sum = 0;
Real w_oc_deriv_sum = 0;
Real i_t_value_sum = 0, i_t_deriv_sum = 0;
Real f_t_value_sum = 0, f_t_deriv_sum = 0;
Real c_part_value_sum = 0, c_part_deriv_sum = 0;
Real o_t_value_sum = 0, o_t_deriv_sum = 0;
Real c_t_value_sum = 0, c_t_deriv_sum = 0;
bool update_sr[5];
if (j < cell_dim) {
const Real w_ic = params[j];
const Real w_fc = params[params_stride + j];
const Real w_oc = params[2 * params_stride + j];
const Real* sr_config = self_repair_config;
# pragma unroll
for (int i = 0; i < 5; i++) {
update_sr[i] =
deriv_sum_in[i * deriv_sum_in_stride + j] < sr_config[i] * count;
}
const Real i_t_self_repair = (update_sr[0] ? sr_config[5] : 0);
const Real f_t_self_repair = (update_sr[1] ? sr_config[6] : 0);
const Real c_part_self_repair = (update_sr[2] ? sr_config[7] : 0);
const Real o_t_self_repair = (update_sr[3] ? sr_config[8] : 0);
const Real c_t_self_repair = (update_sr[4] ? sr_config[9] : 0);
for (int i = i0; i < num_rows; i += grid_stride) {
const Real i_part = input[i * input_stride + j];
const Real f_part = input[i * input_stride + j + cell_dim];
const Real c_part = input[i * input_stride + j + 2 * cell_dim];
const Real o_part = input[i * input_stride + j + 3 * cell_dim];
const Real c_prev = input[i * input_stride + j + 4 * cell_dim];
const Real i_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5] : 1),
f_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5 + 1] :1),
o_scale = (have_dropout_mask ?
input[i * input_stride + cell_dim * 5 + 2] :1);
const Real i_t = Real(1) / (1 + exp(-i_part - w_ic * c_prev));
const Real f_t = Real(1) / (1 + exp(-f_part - w_fc * c_prev));
const Real tanh_c_part = tanh(c_part);
const Real c_t = f_t * f_scale * c_prev + i_t * i_scale * tanh_c_part;
const Real o_t = 1 / (1 + exp(-o_part - w_oc * c_t));
const Real tanh_c_t = tanh(c_t);
const Real i_t_deriv = i_t * (1 - i_t);
const Real f_t_deriv = f_t * (1 - f_t);
const Real c_part_deriv = 1 - tanh_c_part * tanh_c_part;
const Real o_t_deriv = o_t * (1 - o_t);
const Real c_t_deriv = 1 - tanh_c_t * tanh_c_t;
if (params_deriv) {
i_t_value_sum += i_t;
f_t_value_sum += f_t;
c_part_value_sum += tanh_c_part;
o_t_value_sum += o_t;
c_t_value_sum += tanh_c_t;
i_t_deriv_sum += i_t_deriv;
f_t_deriv_sum += f_t_deriv;
c_part_deriv_sum += c_part_deriv;
o_t_deriv_sum += o_t_deriv;
c_t_deriv_sum += c_t_deriv;
}
const Real dc_t_out = output_deriv[i * output_deriv_stride + j];
const Real dm_t = output_deriv[i * output_deriv_stride + j + cell_dim];
const Real dtanh_c_t = o_t * o_scale * dm_t;
const Real do_t = o_scale * tanh_c_t * dm_t;
const Real do_t_input = (o_t_deriv * do_t
- (2 * o_t - 1) * o_t_self_repair);
const Real dc_t = (c_t_deriv * dtanh_c_t + dc_t_out + do_t_input * w_oc)
- tanh_c_t * c_t_self_repair;
const Real dtanh_c_part = i_t * i_scale * dc_t;
const Real df_t = dc_t * f_scale * c_prev;
const Real df_t_input = (df_t * f_t_deriv
- (2 * f_t - 1) * f_t_self_repair);
const Real di_t = dc_t * i_scale * tanh_c_part;
const Real di_t_input = (di_t * i_t_deriv
- (2 * i_t - 1) * i_t_self_repair);
if (params_deriv) {
w_ic_deriv_sum += c_prev * di_t_input;
w_fc_deriv_sum += c_prev * df_t_input;
w_oc_deriv_sum += c_t * do_t_input;
}
const Real dc_prev = w_ic * di_t_input + w_fc * df_t_input + f_t * f_scale * dc_t;
const Real do_part = do_t_input;
const Real dc_part = (c_part_deriv * dtanh_c_part
- tanh_c_part * c_part_self_repair);
const Real df_part = df_t_input;
const Real di_part = di_t_input;
if (input_deriv) {
input_deriv[i * input_deriv_stride + j] = di_part;
input_deriv[i * input_deriv_stride + j + cell_dim] = df_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 2] = dc_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 3] = do_part;
input_deriv[i * input_deriv_stride + j + cell_dim * 4] = dc_prev;
}
}
}
if (params_deriv) {
// compute params_deriv
smem[tid] = w_ic_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[j] = smem[tid];
}
__syncthreads();
smem[tid] = w_fc_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[params_deriv_stride + j] = smem[tid];
}
__syncthreads();
smem[tid] = w_oc_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
params_deriv[2 * params_deriv_stride + j] = smem[tid];
}
// compute value_sum_out
__syncthreads();
smem[tid] = i_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[j] += smem[tid];
}
__syncthreads();
smem[tid] = f_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_part_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[2 * value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = o_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[3 * value_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_t_value_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
value_sum_out[4 * value_sum_out_stride + j] += smem[tid];
}
// need to update self_repair_sum_out before deriv_sum_out, because
// deriv_sum_out and deriv_sum_in might point to the same memory.
if (i0 < 5 && j < cell_dim) {
self_repair_sum_out[i0 * self_repair_sum_out_stride + j] =
update_sr[i0] ? num_rows : 0;
}
// compute derive_sum_out
__syncthreads();
smem[tid] = i_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[j] += smem[tid];
}
__syncthreads();
smem[tid] = f_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_part_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[2 * deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = o_t_deriv_sum;
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[3 * deriv_sum_out_stride + j] += smem[tid];
}
__syncthreads();
smem[tid] = c_t_deriv_sum;
__syncthreads();
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift >= warpSize; shift >>= 1) {
__syncthreads();
if (tid < shift) {
smem[tid] += smem[tid + shift];
}
}
if (tid < warpSize && j < cell_dim) {
deriv_sum_out[4 * deriv_sum_out_stride + j] += smem[tid];
}
}
}
__global__
static void _cuda_compress_uint8_sign(const float *src, MatrixDim dim,
unsigned char *dest, int dest_stride) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dest_index = i + j * dest_stride,
src_index = i + j * dim.stride;
if (i < dim.cols && j < dim.rows) {
float f = src[src_index];
dest[dest_index] = (f > 0.0 ? (unsigned char)1 : (unsigned char)0);
}
}
// The following inline templated functions are a workaround for the
// fact that (I believe) std::numeric_limits is not available in CUDA;
// they allow us to access the minimum and maximum elements of certain
// types from templated code.
template <typename I> __device__ static inline int minimum_integer_value();
template <typename I> __device__ static inline int maximum_integer_value();
template<> __device__ int maximum_integer_value<int8_t>() { return 127; }
template<> __device__ int minimum_integer_value<int8_t>() { return -128; }
template<> __device__ int maximum_integer_value<uint8_t>() { return 255; }
template<> __device__ int minimum_integer_value<uint8_t>() { return 0; }
template<> __device__ int maximum_integer_value<int16_t>() { return 32767; }
template<> __device__ int minimum_integer_value<int16_t>() { return -32768; }
template<> __device__ int maximum_integer_value<uint16_t>() { return 65535; }
template<> __device__ int minimum_integer_value<uint16_t>() { return 0; }
template <typename I>
__global__
static void _cuda_compress_bounds_check(const float *src, MatrixDim dim,
I *dest, int dest_stride, float inv_scale) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dest_index = i + j * dest_stride,
src_index = i + j * dim.stride;
const int min_value = minimum_integer_value<I>(),
max_value = maximum_integer_value<I>();
int compressed_value;
int ok = (i < dim.cols && j < dim.rows);
if (ok) {
float f = src[src_index];
// note: I'm not sure what __float2int_rn does if input is outside of
// integer range, but it doesn't matter much as in the situations where this
// type of compression would make sense, the input should be well inside the
// range of 'int', and if it fails, we've probably already catastrophically
// diverged.
int i = __float2int_rn(f * inv_scale);
if (i < min_value) compressed_value = min_value;
else if (i > max_value) compressed_value = max_value;
else compressed_value = i;
}
__syncthreads();
if (ok) {
dest[dest_index] = compressed_value;
}
}
template <typename I>
__global__
static void _cuda_compress_no_bounds_check(const float *src, MatrixDim dim,
I *dest, int dest_stride,
float inv_scale) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int dest_index = i + j * dest_stride,
src_index = i + j * dim.stride;
if (i < dim.cols && j < dim.rows) {
float f = src[src_index];
int i = __float2int_rn(f * inv_scale);
I s = i;
dest[dest_index] = s;
}
}
template <typename I>
__global__
static void _cuda_uncompress(float *dest, MatrixDim dim,
const I *src, int src_stride,
float scale) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int j = blockIdx.y * blockDim.y + threadIdx.y;
int src_index = i + j * src_stride,
dest_index = i + j * dim.stride;
if (i < dim.cols && j < dim.rows) {
I s = src[src_index];
dest[dest_index] = float(s * scale);
}
}
/***********************************************************************
* ANSI-C wrappers of CUDA kernels
*/
/*
* "int32"
*/
void cuda_int32_set_const(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value,
MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cuda_int32_add(dim3 Gr, dim3 Bl, int32_cuda* mat, int32_cuda value,
MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cuda_int32_sequence(dim3 Gr, dim3 Bl, int32_cuda* data, int length,
int32_cuda base) {
_sequence<<<Gr, Bl>>>(data, length, base);
}
/*
* "float"
*/
/*
* CuMatrix
*/
void cudaF_copy_upp_low(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {
_copy_upp_low<<<Gr,Bl>>>(A,dimA);}
void cudaF_copy_low_upp(dim3 Gr, dim3 Bl, float* A, MatrixDim dimA) {
_copy_low_upp<<<Gr,Bl>>>(A,dimA);}
void cudaF_add_diag_vec_mat(dim3 Gr, dim3 Bl, float alpha, float *mat,
MatrixDim mat_dim, const float *vec,
const float *mat2, int mat2_row_stride,
int mat2_col_stride, float beta) {
_add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaF_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const float* B,
MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaFD_copy_from_tp_trans(dim3 Gr, dim3 Bl, float* A, const double* B,
MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaF_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const float* B,
MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaFD_copy_from_tp(dim3 Gr, dim3 Bl, float* A, const double* B,
MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaF_apply_exp(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaF_apply_exp_limited(dim3 Gr, dim3 Bl, float* mat, MatrixDim d,
float lower_limit, float upper_limit) {
_apply_exp_limited<<<Gr,Bl>>>(mat, d, lower_limit, upper_limit);
}
void cudaF_apply_pow(dim3 Gr, dim3 Bl, float* mat, float power, MatrixDim d) {
_apply_pow<<<Gr,Bl>>>(mat, power, d);
}
void cudaF_apply_pow_abs(dim3 Gr, dim3 Bl, float* mat, float power,
bool include_sign, MatrixDim d) {
_apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d);
}
void cudaF_apply_heaviside(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_heaviside<<<Gr,Bl>>>(mat, d);
}
void cudaF_copy_cols(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaF_add_cols(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaF_copy_rows_direct(dim3 Gr, dim3 Bl, float* dst,
const float* const * src, MatrixDim dst_dim) {
_copy_rows<<<Gr,Bl>>>(dst, src, dst_dim);
}
void cudaF_copy_to_rows_direct(dim3 Gr, dim3 Bl, float* const * dst,
const float* src, MatrixDim src_dim) {
_copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim);
}
void cudaF_add_rows(dim3 Gr, dim3 Bl, float alpha, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride);
}
void cudaF_mul_rows(dim3 Gr, dim3 Bl, float* dst, const float* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_mul_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaF_add_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* dst,
const float* const * src, MatrixDim dst_dim) {
_add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim);
}
void cudaF_add_to_rows(dim3 Gr, dim3 Bl, float alpha,
float* dst, const float* src, const MatrixIndexT_cuda* reorder,
MatrixDim src_dim, int dst_stride) {
_add_to_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, src_dim, dst_stride);
}
void cudaF_add_to_rows_direct(dim3 Gr, dim3 Bl, float alpha, float* const * dst,
const float* src, MatrixDim src_dim) {
_add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim);
}
void cudaF_apply_floor(dim3 Gr, dim3 Bl, float* mat, float floor_val,
MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat, floor_val, d);
}
void cudaF_apply_ceiling(dim3 Gr, dim3 Bl, float* mat, float ceiling_val,
MatrixDim d) {
_apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d);
}
void cudaF_set_diag(int Gr, int Bl, float* mat, float value, MatrixDim d) {
_set_diag<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_set_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
_set_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaF_add_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
_add_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaF_set_const(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_set_zero_above_diag(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_set_zero_above_diag<<<Gr,Bl>>>(mat, d);
}
void cudaF_add(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_scale_diag_packed(int Gr, int Bl, float* mat, float value, int dim) {
_scale_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaF_scale(dim3 Gr, dim3 Bl, float* mat, float value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaF_apply_log(dim3 Gr, dim3 Bl, float* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaF_mul_elements(dim3 Gr, dim3 Bl, float* mat, const float* A,
MatrixDim dst_d, int src_stride) {
_mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaF_div_elements(dim3 Gr, dim3 Bl, float* mat, const float* A,
MatrixDim dst_d, int src_stride) {
_div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaF_max(dim3 Gr, dim3 Bl, float* mat, const float* A, MatrixDim dst_d,
int src_stride) {
_max<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaF_min(dim3 Gr, dim3 Bl, float* mat, const float* other,
MatrixDim mat_d, int other_stride) {
_min<<<Gr,Bl>>>(mat,other,mat_d,other_stride);
}
void cudaF_mul_cols_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale,
MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_mul_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* scale,
MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaF_mul_rows_group_mat(dim3 Gr, dim3 Bl, float *y, const float *x,
MatrixDim d, int src_stride, int group_size) {
_mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size);
}
void cudaF_diff_group_pnorm(dim3 Gr, dim3 Bl, float *id, const float *iv,
const float *ov, const float* od, MatrixDim id_dim,
int iv_stride, int ov_stride, int od_stride,
int group_size, float power) {
_diff_group_pnorm<<<Gr, Bl>>>(id, iv, ov, od, id_dim, iv_stride, ov_stride,
od_stride, group_size, power);
}
void cudaF_calc_group_max_deriv(dim3 Gr, dim3 Bl, float *y, const float *x1,
const float *x2, MatrixDim y_dim, int x1_stride,
int x2_stride, int group_size) {
_calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, y_dim, x1_stride, x2_stride,
group_size);
}
void cudaF_div_rows_vec(dim3 Gr, dim3 Bl, float* mat, const float* vec_div,
MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaF_add_mat(dim3 Gr, dim3 Bl, float alpha, const float* src, float* dst,
MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
_add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
} else {
_add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
}
}
void cudaF_add_mat_blocks(dim3 Gr, dim3 Bl, float alpha, const float* src,
int32_cuda num_row_blocks, int32_cuda num_col_blocks,
float* dst, MatrixDim d, int src_stride,
int A_trans) {
if (A_trans) {
_add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks,
dst, d, src_stride);
} else {
_add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst,
d, src_stride);
}
}
void cudaF_add_mat_repeated(dim3 Gr, dim3 Bl, float alpha, const float* src,
MatrixDim src_dim, float *dst, MatrixDim dst_dim) {
_add_mat_repeated<<<Gr,Bl>>>(alpha, src, src_dim, dst, dst_dim);
}
void cudaF_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const float *A, const float *B,
const float *C, float *dst, MatrixDim d,
int stride_a, int stride_b, int stride_c) {
_set_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d, stride_a, stride_b, stride_c);
}
void cudaF_sy_add_tr2(dim3 Gr, dim3 Bl, float alpha, float beta, const float* T,
MatrixDim tdim, float *S, MatrixDim sdim) {
_sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim);
}
void cudaF_add_vec_to_cols(dim3 Gr, dim3 Bl, float alpha, const float* col,
float beta, float* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaF_add_vec_to_rows(dim3 Gr, dim3 Bl, float alpha, const float* row,
float beta, float* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
void cudaF_add_mat_diag_vec(dim3 Gr, dim3 Bl, float alpha, float *mat,
MatrixDim mat_dim, const float *mat2,
int mat2_row_stride, int mat2_col_stride,
const float *vec, float beta) {
_add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaF_add_mat_mat_elements(dim3 Gr, dim3 Bl, float *data,
const float *srcA_data, const float *srcB_data,
MatrixDim dim, int srcA_stride, int srcB_stride,
float alpha, float beta) {
_add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim,
srcA_stride, srcB_stride, alpha, beta);
}
// CURRENTLY UNUSED...
void cudaF_apply_mask(dim3 Gr, dim3 Bl, float* mat, const char* mask,
MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaF_max_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<MAX,float>());
}
void cudaF_min_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<MIN,float>());
}
void cudaF_sum_mat_cols(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<SUM,float>());
}
void cudaF_add_col_sum_mat(int Gr, int Bl, float* result, const float* mat,
const MatrixDim d, const float alpha,
const float beta) {
_transform_reduce_mat_cols<<<Gr, Bl>>>(result, mat, d,
TransReduceOp<SUMAB, float>(alpha, beta));
}
void cudaF_replace_value(int Gr, int Bl, float *v, int dim, float orig,
float changed) {
_replace_value<<<Gr,Bl>>>(v, dim, orig, changed);
}
void cudaF_set_bias_params(int Gr, int Bl, float* v, const float* a,
float param_1, float param_2, float param_3,
int* flag, int dim) {
_set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim);
}
void cublas_copy_kaldi_fd(int Gr, int Bl, int n, const float* x, int incx,
double* y, int incy) {
_cublas_copy_kaldi<<<Gr,Bl>>>(n, x, incx, y, incy);
}
void cublas_copy_kaldi_df(int Gr, int Bl, int n, const double* x, int incx,
float* y, int incy) {
_cublas_copy_kaldi<<<Gr,Bl>>>(n, x, incx, y, incy);
}
void cudaF_vec_mul_elements(int Gr, int Bl, float* v, const float* a, int dim) {
_vec_mul_elements<<<Gr,Bl>>>(v, a, dim);
}
void cudaF_vec_min(int Gr, int Bl, const float* v, float* value, int dim,
int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<MIN, float>());
}
void cudaF_vec_max(int Gr, int Bl, const float* v, float* value, int dim,
int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<MAX, float>());
}
void cudaF_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const float* A, const float* B,
MatrixDim dA, int B_stride, float* value) {
_trace_mat_mat_trans<<<Gr,Bl>>>(A,B,dA,B_stride,value);
}
void cudaF_trace_mat_mat(dim3 Gr, dim3 Bl, const float* A, const float* B,
MatrixDim dA, int B_stride, float* value) {
_trace_mat_mat<32> <<<Gr,Bl>>>(A,B,dA,B_stride,value);
}
void cudaF_add_diag_mat_mat_MNT(int Gr, int Bl, const float alpha,
const float* M, const MatrixDim dim_M,
const float* N, const int stride_N,
const float beta, float* v) {
_add_diag_mat_mat_MNT<<<Gr,Bl>>>(alpha,M,dim_M,N,stride_N,beta,v);
}
void cudaF_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const float alpha,
const float* M, const int stride_M,
const float* N, const MatrixDim dim_N,
const float beta, float* v,
const int stride_v) {
if (Bl.x == 16) {
_add_diag_mat_mat_MTN<16> <<<Gr, Bl>>>(alpha, M, stride_M, N, dim_N, beta,
v, stride_v);
} else if (Bl.x == 32) {
_add_diag_mat_mat_MTN<32> <<<Gr, Bl>>>(alpha, M, stride_M, N, dim_N, beta,
v, stride_v);
}
}
void cudaF_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const float alpha,
const float* M, const int stride_M,
const float* N, const MatrixDim dim_N,
const float beta, float* v) {
if (Bl.x == 16) {
_add_diag_mat_mat_MN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
_add_diag_mat_mat_MN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaF_add_vec_vec(int Gr, int Bl, float alpha, float* v, const float* x,
const float* y, float beta, int dim) {
_add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim);
}
void cudaF_vec_sum(int Gr, int Bl, float* v, float* value, int dim, int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<SUM, float>());
}
void cudaF_matrix_add_elements(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
float alpha, MatrixElement<float>* x,
int num_elements) {
_cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements);
}
void cudaF_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim,
float alpha, const Int32Pair* indices,
const float* x, int s, float* data) {
_cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data);
}
void cudaF_matrix_add_to_elements(dim3 Gr, dim3 Bl, float alpha,
float* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
_cuda_matrix_add_to_elements<<<Gr, Bl>>>(alpha, mat, dim, elements);
}
void cudaF_vector_copy_elements(dim3 Gr, dim3 Bl, float *data, int dim,
const float *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
_cuda_vector_copy_elements<<<Gr, Bl>>>(data, dim, src_mat, mat_stride,
transpose, elements);
}
void cudaF_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<float>* x, int s,
const float* z, MatrixDim d, float* z2, MatrixDim d2,
float* t) {
_cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t);
}
void cudaD_comp_obj_deriv(dim3 Gr, dim3 Bl, MatrixElement<double>* x, int s,
const double* z, MatrixDim d, double* z2,
MatrixDim d2, double* t) {
_cuda_comp_obj_deriv<<<Gr,Bl>>>(x,s,z,d,z2,d2,t);
}
void cudaF_vec_copy_diag_from_packed(int Gr, int Bl, float *dst,
const float *src, int dim) {
_vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim);
}
void cudaF_vec_apply_floor(int Gr, int Bl, float* v, float floor_val,
float *count, int dim) {
_vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim);
}
void cudaF_vec_apply_ceiling(int Gr, int Bl, float* v, float ceiling_val,
float *count, int dim) {
_vec_apply_ceiling<<<Gr,Bl>>>(v, ceiling_val,count,dim);
}
void cudaF_vec_apply_exp(int Gr, int Bl, float* v, int dim) {
_vec_apply_exp<<<Gr,Bl>>>(v,dim);
}
void cudaF_vec_apply_log(int Gr, int Bl, float* v, float* flag, int dim) {
_vec_apply_log<<<Gr,Bl>>>(v,flag,dim);
}
void cudaF_invert_elements(dim3 Gr, dim3 Bl, float* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
void cudaF_add_mat_blockmat(dim3 Gr, dim3 Bl, float *data, MatrixDim d,
const float *Adata, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, float alpha, float beta,
int B_trans) {
if (B_trans) {
_add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
} else {
_add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
}
}
void cudaF_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data,
int num_blocks, const float *C_data,
int C_num_cols, int C_row_stride, int C_col_stride,
const float *D_data, int D_row_stride,
int D_col_stride, float alpha, float beta) {
_block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride, alpha,
beta);
}
/*
* cu::
*/
void cudaF_soft_hinge(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
_soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_group_pnorm(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d,
int src_stride, int group_size, float power) {
_group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power);
}
void cudaF_group_spec_pnorm(dim3 Gr, dim3 Bl, float* y, const float* x,
MatrixDim d, int src_stride, int group_size,
float power) {
if (power == float(0)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L0NORM, float>());
} else if (power == float(1)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L1NORM, float>());
} else if (power == float(2)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L2NORM, float>());
} else if (power == std::numeric_limits<float>::infinity()) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<LINFNORM, float>());
} else {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<LPNORM, float>(power));
}
}
void cudaF_group_max(dim3 Gr, dim3 Bl, float *y, const float *x, MatrixDim d,
int src_stride, int group_size) {
_group_transform_reduce<<<Gr,Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<MAX, float>());
}
void cudaF_sigmoid(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
_sigmoid<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_diff_sigmoid(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride,
int y_stride) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaF_tanh(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
_tanh<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_diff_tanh(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride, int y_stride) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaF_ensure_nonzero(dim3 Gr, dim3 Bl, const float *x, MatrixDim d,
float epsilon, int y_stride, float *y) {
_ensure_nonzero<<<Gr,Bl>>>(x, d, epsilon, y_stride, y);
}
void cudaF_parametric_relu(dim3 Gr, dim3 Bl, float* y, const float* x,
MatrixDim d, int src_stride,
const float* a, const float* b) {
_parametric_relu<<<Gr,Bl>>>(y, x, d, src_stride, a, b);
}
void cudaF_diff_parametric_relu(dim3 Gr, dim3 Bl, float* eout, const float* e,
const float* y, MatrixDim d, int e_stride,
int y_stride, const float* a, const float* b) {
_diff_parametric_relu<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride, a, b);
}
void cudaF_heaviside(dim3 Gr, dim3 Bl, float* y, const float* x, MatrixDim d,
int src_stride) {
_heaviside<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x,
MatrixDim d, int src_stride) {
_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaF_log_softmax_reduce(size_t Gr, size_t Bl, float* y, const float* x,
MatrixDim y_dim, int x_stride) {
_log_softmax_reduce<<<Gr,Bl>>>(y, x, y_dim, x_stride);
}
void cudaF_splice(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaF_normalize_per_row(size_t Gr, size_t Bl, float *y, int y_stride,
const float *x, MatrixDim x_d, float target_rms,
bool add_log_stddev) {
_normalize_per_row<<<Gr, Bl>>>(y, y_stride, x, x_d, target_rms, add_log_stddev);
}
void cudaF_one(int Gr, int Bl, float* x, int dim) {
_one<<<Gr,Bl>>>(x,dim);
}
void cudaF_take_mean(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
_take_mean<<<Gr,Bl>>>(x,y,d_in);
}
void cudaF_take_lower(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
_take_lower<<<Gr,Bl>>>(x,y,d_in);
}
void cudaF_take_upper(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim d_in) {
_take_upper<<<Gr,Bl>>>(x,y,d_in);
}
void cudaF_copy_from_sp(dim3 Gr, dim3 Bl, const float* x, float* y,
MatrixDim dim) {
_copy_from_sp<<<Gr,Bl>>>(x, y, dim);
}
void cudaF_copy(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_randomize(dim3 Gr, dim3 Bl, float* y, const float* x,
const int32_cuda* copy_from, MatrixDim d_out,
MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaF_regularize_l1(dim3 Gr, dim3 Bl, float* wei, float* grad, float l1,
float lr, MatrixDim d, int stride_grad) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad);
}
void cudaF_find_row_max_id(dim3 Gr, dim3 Bl, const float* mat, float* vec_val,
int32_cuda* vec_id, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, d);
}
void cudaF_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt,
float* mat_net_out, float* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaF_diff_softmax(dim3 Gr, dim3 Bl, float* x, const MatrixDim dim,
const float* value, const int value_stride,
const float* diff, const int diff_stride) {
_diff_softmax<<<Gr, Bl>>>(x, dim, value, value_stride, diff, diff_stride);
}
void cudaF_copy_rows_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out,
const float *v_in) {
_copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in);
}
void cudaF_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim,
const float* out_value, const int out_value_stride,
const float* out_deriv, const int out_deriv_stride,
float* in_deriv) {
_diff_log_softmax<<<Gr, Bl>>>(in_deriv_dim, out_value, out_value_stride,
out_deriv, out_deriv_stride, in_deriv);
}
void cudaF_copy_col_from_mat_df(int Gr, int Bl, double* v, int col,
const float* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaF_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col,
const float* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaF_sum_column_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
_sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices);
}
void cudaF_add_row_ranges(dim3 Gr, dim3 Bl, float *data, MatrixDim dim,
const float *src_data, MatrixDim src_dim,
const Int32Pair *indexes) {
_add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes);
}
void cudaF_matrix_lookup(dim3 Gr, dim3 Bl, const float *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
float *output) {
_matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output);
}
void cudaF_equal_element_mask(dim3 Gr, dim3 Bl, const float *mat1,
const float *mat2, float *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
_equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride,
mask_stride);
}
/*
* "double"
*/
/*
* CuMatrix
*/
void cudaD_copy_upp_low(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {
_copy_upp_low<<<Gr,Bl>>>(A,dimA);}
void cudaD_copy_low_upp(dim3 Gr, dim3 Bl, double* A, MatrixDim dimA) {
_copy_low_upp<<<Gr,Bl>>>(A,dimA);}
void cudaD_add_diag_vec_mat(dim3 Gr, dim3 Bl, double alpha, double *mat,
MatrixDim mat_dim, const double *vec,
const double *mat2, int mat2_row_stride,
int mat2_col_stride, double beta) {
_add_diag_vec_mat<<<Gr,Bl>>>(alpha, mat, mat_dim, vec, mat2, mat2_row_stride,
mat2_col_stride, beta);
}
void cudaD_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const double* B,
MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaDF_copy_from_tp_trans(dim3 Gr, dim3 Bl, double* A, const float* B,
MatrixDim dmat) {
_copy_from_tp_trans<<<Gr,Bl>>>(A,B,dmat);
}
void cudaD_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const double* B,
MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaDF_copy_from_tp(dim3 Gr, dim3 Bl, double* A, const float* B,
MatrixDim dmat) {
_copy_from_tp<<<Gr,Bl>>>(A,B,dmat);
}
void cudaD_apply_exp(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_exp<<<Gr,Bl>>>(mat,d);
}
void cudaD_apply_exp_limited(dim3 Gr, dim3 Bl, double* mat, MatrixDim d,
double lower_limit, double upper_limit) {
_apply_exp_limited<<<Gr,Bl>>>(mat, d, lower_limit, upper_limit);
}
void cudaD_apply_pow(dim3 Gr, dim3 Bl, double* mat, double power, MatrixDim d) {
_apply_pow<<<Gr,Bl>>>(mat, power, d);
}
void cudaD_apply_pow_abs(dim3 Gr, dim3 Bl, double* mat, double power,
bool include_sign, MatrixDim d) {
_apply_pow_abs<<<Gr,Bl>>>(mat, power, include_sign, d);
}
void cudaD_apply_heaviside(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_heaviside<<<Gr,Bl>>>(mat, d);
}
void cudaD_copy_cols(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_copy_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaD_add_cols(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_add_cols<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows(dim3 Gr, dim3 Bl, double* dst, const double* src,
const MatrixIndexT_cuda* reorder, MatrixDim dst_dim,
int src_stride) {
_copy_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaD_copy_rows_direct(dim3 Gr, dim3 Bl, double* dst,
const double* const * src, MatrixDim dst_dim) {
_copy_rows<<<Gr,Bl>>>(dst, src, dst_dim);
}
void cudaD_copy_to_rows_direct(dim3 Gr, dim3 Bl, double* const * dst,
const double* src, MatrixDim src_dim) {
_copy_to_rows<<<Gr,Bl>>>(dst, src, src_dim);
}
void cudaD_add_rows(dim3 Gr, dim3 Bl, double alpha, double* dst,
const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim dst_dim, int src_stride) {
_add_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, dst_dim, src_stride);
}
void cudaD_mul_rows(dim3 Gr, dim3 Bl, double* dst,
const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim dst_dim, int src_stride) {
_mul_rows<<<Gr,Bl>>>(dst, src, reorder, dst_dim, src_stride);
}
void cudaD_add_rows_direct(dim3 Gr, dim3 Bl, double alpha, double* dst,
const double* const * src, MatrixDim dst_dim) {
_add_rows<<<Gr,Bl>>>(alpha, dst, src, dst_dim);
}
void cudaD_add_to_rows(dim3 Gr, dim3 Bl, double alpha,
double* dst, const double* src, const MatrixIndexT_cuda* reorder,
MatrixDim src_dim, int dst_stride) {
_add_to_rows<<<Gr,Bl>>>(alpha, dst, src, reorder, src_dim, dst_stride);
}
void cudaD_add_to_rows_direct(dim3 Gr, dim3 Bl, double alpha,
double* const * dst, const double* src,
MatrixDim src_dim) {
_add_to_rows<<<Gr,Bl>>>(alpha, dst, src, src_dim);
}
void cudaD_apply_floor(dim3 Gr, dim3 Bl, double* mat, double floor_val,
MatrixDim d) {
_apply_floor<<<Gr,Bl>>>(mat, floor_val, d);
}
void cudaD_apply_ceiling(dim3 Gr, dim3 Bl, double* mat, double ceiling_val,
MatrixDim d) {
_apply_ceiling<<<Gr,Bl>>>(mat, ceiling_val, d);
}
void cudaD_set_diag(int Gr, int Bl, double* mat, double value, MatrixDim d) {
_set_diag<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_set_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
_set_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaD_add_diag_packed(int Gr, int Bl, double* mat, double value, int dim) {
_add_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaD_set_const(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_set_const<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_set_zero_above_diag(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_set_zero_above_diag<<<Gr,Bl>>>(mat, d);
}
void cudaD_add(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_add<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_scale_diag_packed(int Gr, int Bl, double* mat, double value,
int dim) {
_scale_diag_packed<<<Gr,Bl>>>(mat,value,dim);
}
void cudaD_scale(dim3 Gr, dim3 Bl, double* mat, double value, MatrixDim d) {
_scale<<<Gr,Bl>>>(mat,value,d);
}
void cudaD_apply_log(dim3 Gr, dim3 Bl, double* mat, MatrixDim d) {
_apply_log<<<Gr,Bl>>>(mat,d);
}
void cudaD_mul_elements(dim3 Gr, dim3 Bl, double* mat, const double* A,
MatrixDim dst_d, int src_stride) {
_mul_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaD_div_elements(dim3 Gr, dim3 Bl, double* mat, const double* A,
MatrixDim dst_d, int src_stride) {
_div_elements<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaD_max(dim3 Gr, dim3 Bl, double* mat, const double* A, MatrixDim dst_d,
int src_stride) {
_max<<<Gr,Bl>>>(mat,A,dst_d,src_stride);
}
void cudaD_min(dim3 Gr, dim3 Bl, double* mat, const double* other, MatrixDim mat_d,
int other_stride) {
_min<<<Gr,Bl>>>(mat,other,mat_d,other_stride);
}
void cudaD_mul_cols_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale,
MatrixDim d) {
_mul_cols_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* scale,
MatrixDim d) {
_mul_rows_vec<<<Gr,Bl>>>(mat,scale,d);
}
void cudaD_mul_rows_group_mat(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size) {
_mul_rows_group_mat<<<Gr,Bl>>>(y, x, d, src_stride, group_size);
}
void cudaD_diff_group_pnorm(dim3 Gr, dim3 Bl, double *id, const double *iv,
const double *ov, const double* od,
MatrixDim id_dim, int iv_stride, int ov_stride,
int od_stride, int group_size, double power) {
_diff_group_pnorm<<<Gr, Bl>>>(id, iv, ov, od, id_dim, iv_stride, ov_stride,
od_stride, group_size, power);
}
void cudaD_calc_group_max_deriv(dim3 Gr, dim3 Bl, double*y, const double* x1,
const double* x2, MatrixDim y_dim,
int x1_stride, int x2_stride, int group_size) {
_calc_group_max_deriv<<<Gr,Bl>>>(y, x1, x2, y_dim, x1_stride, x2_stride,
group_size);
}
void cudaD_div_rows_vec(dim3 Gr, dim3 Bl, double* mat, const double* vec_div,
MatrixDim d) {
_div_rows_vec<<<Gr,Bl>>>(mat, vec_div, d);
}
void cudaD_add_mat(dim3 Gr, dim3 Bl, double alpha, const double* src,
double* dst, MatrixDim d, int src_stride, int A_trans) {
if (A_trans) {
_add_mat_trans<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
} else {
_add_mat<<<Gr,Bl>>>(alpha,src,dst,d,src_stride);
}
}
void cudaD_add_mat_blocks(dim3 Gr, dim3 Bl, double alpha, const double* src,
int32_cuda num_row_blocks, int32_cuda num_col_blocks,
double* dst, MatrixDim d, int src_stride,
int A_trans) {
if (A_trans) {
_add_mat_blocks_trans<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks,
dst, d, src_stride);
} else {
_add_mat_blocks<<<Gr,Bl>>>(alpha, src, num_row_blocks, num_col_blocks, dst,
d, src_stride);
}
}
void cudaD_add_mat_repeated(dim3 Gr, dim3 Bl, double alpha, const double* src,
MatrixDim src_dim, double *dst, MatrixDim dst_dim) {
_add_mat_repeated<<<Gr,Bl>>>(alpha, src, src_dim, dst, dst_dim);
}
void cudaD_set_mat_mat_div_mat(dim3 Gr, dim3 Bl, const double *A,
const double *B, const double *C, double *dst,
MatrixDim d, int stride_a, int stride_b,
int stride_c) {
_set_mat_mat_div_mat<<<Gr,Bl>>>(A,B,C,dst,d,stride_a,stride_b,stride_c);
}
void cudaD_sy_add_tr2(dim3 Gr, dim3 Bl, double alpha, double beta,
const double* T, MatrixDim tdim, double *S,
MatrixDim sdim) {
_sy_add_tr2<<<Gr,Bl>>>(alpha, beta, T, tdim, S, sdim);
}
void cudaD_add_vec_to_cols(dim3 Gr, dim3 Bl, double alpha, const double* col,
double beta, double* dst, MatrixDim d) {
_add_vec_to_cols<<<Gr,Bl>>>(alpha,col,beta,dst,d);
}
void cudaD_add_vec_to_rows(dim3 Gr, dim3 Bl, double alpha, const double* row,
double beta, double* dst, MatrixDim d) {
_add_vec_to_rows<<<Gr,Bl>>>(alpha,row,beta,dst,d);
}
void cudaD_add_mat_diag_vec(dim3 Gr, dim3 Bl, double alpha, double *mat,
MatrixDim mat_dim, const double *mat2,
int mat2_row_stride, int mat2_col_stride,
const double *vec, double beta) {
_add_mat_diag_vec<<<Gr,Bl>>>(alpha, mat, mat_dim, mat2, mat2_row_stride,
mat2_col_stride, vec, beta);
}
void cudaD_add_mat_mat_elements(dim3 Gr, dim3 Bl, double *data,
const double *srcA_data,
const double *srcB_data, MatrixDim dim,
int srcA_stride, int srcB_stride, double alpha,
double beta) {
_add_mat_mat_elements<<<Gr, Bl>>>(data, srcA_data, srcB_data, dim,
srcA_stride, srcB_stride, alpha, beta);
}
// CURRENTLY UNUSED...
void cudaD_apply_mask(dim3 Gr, dim3 Bl, double* mat, const char* mask,
MatrixDim dmat, MatrixDim dmask) {
_apply_mask<<<Gr,Bl>>>(mat,mask,dmat,dmask);
}
/*
* CuVector
*/
void cudaD_max_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<MAX,double>());
}
void cudaD_min_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<MIN,double>());
}
void cudaD_sum_mat_cols(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d) {
_transform_reduce_mat_cols<<<Gr,Bl>>>(result,mat,d,
TransReduceOp<SUM,double>());
}
void cudaD_add_col_sum_mat(int Gr, int Bl, double* result, const double* mat,
const MatrixDim d, const double alpha,
const double beta) {
_transform_reduce_mat_cols<<<Gr, Bl>>>(result, mat, d,
TransReduceOp<SUMAB, double>(alpha, beta));
}
void cudaD_replace_value(int Gr, int Bl, double *v, int dim, double orig,
double changed) {
_replace_value<<<Gr,Bl>>>(v, dim, orig, changed);
}
void cudaD_set_bias_params(int Gr, int Bl, double* v, const double* a,
double param_1, double param_2, double param_3,
int* flag, int dim) {
_set_bias_params<<<Gr,Bl>>>(v,a,param_1,param_2,param_3,flag,dim);
}
void cudaD_vec_mul_elements(int Gr, int Bl, double* v, const double* a,
int dim) {
_vec_mul_elements<<<Gr,Bl>>>(v, a, dim);
}
void cudaD_vec_min(int Gr, int Bl, const double* v, double* value, int dim,
int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<MIN, double>());
}
void cudaD_vec_max(int Gr, int Bl, const double* v, double* value, int dim,
int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v, value, dim, inc,
TransReduceOp<MAX, double>());
}
void cudaD_trace_mat_mat_trans(dim3 Gr, dim3 Bl, const double* A,
const double* B, MatrixDim dA, int B_stride,
double* value) {
_trace_mat_mat_trans<<<Gr,Bl>>>(A,B,dA,B_stride,value);
}
void cudaD_trace_mat_mat(dim3 Gr, dim3 Bl, const double* A, const double* B,
MatrixDim dA, int B_stride, double* value) {
_trace_mat_mat<32> <<<Gr,Bl>>>(A,B,dA,B_stride,value);
}
void cudaD_add_diag_mat_mat_MNT(int Gr, int Bl, const double alpha,
const double* M, const MatrixDim dim_M,
const double* N, const int stride_N,
const double beta, double* v) {
_add_diag_mat_mat_MNT<<<Gr,Bl>>>(alpha,M,dim_M,N,stride_N,beta,v);
}
void cudaD_add_diag_mat_mat_MTN(dim3 Gr, dim3 Bl, const double alpha,
const double* M, const int stride_M,
const double* N, const MatrixDim dim_N,
const double beta, double* v,
const int stride_v) {
if (Bl.x == 16) {
_add_diag_mat_mat_MTN<16> <<<Gr, Bl>>>(alpha, M, stride_M, N, dim_N, beta,
v, stride_v);
} else if (Bl.x == 32) {
_add_diag_mat_mat_MTN<32> <<<Gr, Bl>>>(alpha, M, stride_M, N, dim_N, beta,
v, stride_v);
}
}
void cudaD_add_diag_mat_mat_MN(dim3 Gr, dim3 Bl, const double alpha,
const double* M, const int stride_M,
const double* N, const MatrixDim dim_N,
const double beta, double* v) {
if (Bl.x == 16) {
_add_diag_mat_mat_MN<16> <<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
} else if (Bl.x==32) {
_add_diag_mat_mat_MN<32><<<Gr,Bl>>>(alpha,M,stride_M,N,dim_N,beta,v);
}
}
void cudaD_add_vec_vec(int Gr, int Bl, double alpha, double* v, const double* x,
const double* y, double beta, int dim) {
_add_vec_vec<<<Gr,Bl>>>(alpha,v,x,y,beta,dim);
}
void cudaD_copy_col_from_mat_df(int Gr, int Bl, double* v, int col,
const double* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_df<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaD_copy_col_from_mat_fd(int Gr, int Bl, float* v, int col,
const double* mat, MatrixDim dmat, int dim) {
_copy_col_from_mat_fd<<<Gr,Bl>>>(v,col,mat,dmat,dim);
}
void cudaD_vec_sum(int Gr, int Bl, double* v, double* value, int dim, int inc) {
_vec_transform_reduce<<<Gr,Bl>>>(v,value,dim,inc,
TransReduceOp<SUM, double>());
}
void cudaD_matrix_add_elements(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
double alpha, MatrixElement<double>* x,
int num_elements) {
_cuda_matrix_add_elements<<<Gr, Bl>>>(data, dim, alpha, x, num_elements);
}
void cudaD_vector_copy_elements(dim3 Gr, dim3 Bl, double *data, int dim,
const double *src_mat, int mat_stride,
bool transpose,
const MatrixIndexT_cuda* elements) {
_cuda_vector_copy_elements<<<Gr, Bl>>>(data, dim, src_mat, mat_stride,
transpose, elements);
}
void cudaD_matrix_add_indexed_values(dim3 Gr, dim3 Bl, MatrixDim dim,
double alpha, const Int32Pair* indices,
const double* x, int s, double* data) {
_cuda_matrix_add_indexed_values<<<Gr, Bl>>>(dim, alpha, indices, x, s, data);
}
void cudaD_matrix_add_to_elements(dim3 Gr, dim3 Bl, double alpha,
double* mat, MatrixDim dim,
const MatrixIndexT_cuda* elements) {
_cuda_matrix_add_to_elements<<<Gr, Bl>>>(alpha, mat, dim, elements);
}
void cudaD_vec_copy_diag_from_packed(int Gr, int Bl, double *dst,
const double *src, int dim) {
_vec_copy_diag_from_packed<<<Gr,Bl>>>(dst,src,dim);
}
void cudaD_vec_apply_floor(int Gr, int Bl, double* v, double floor_val,
float *count, int dim) {
_vec_apply_floor<<<Gr,Bl>>>(v,floor_val,count,dim);
}
void cudaD_vec_apply_ceiling(int Gr, int Bl, double* v, double ceiling_val,
float *count, int dim) {
_vec_apply_ceiling<<<Gr,Bl>>>(v,ceiling_val,count,dim);
}
void cudaD_vec_apply_exp(int Gr, int Bl, double* v, int dim) {
_vec_apply_exp<<<Gr,Bl>>>(v,dim);
}
void cudaD_vec_apply_log(int Gr, int Bl, double* v, double* flag, int dim) {
_vec_apply_log<<<Gr,Bl>>>(v,flag,dim);
}
void cudaD_invert_elements(dim3 Gr, dim3 Bl, double* data, MatrixDim d) {
_invert_elements<<<Gr,Bl>>>(data, d);
}
void cudaD_add_mat_blockmat(dim3 Gr, dim3 Bl, double *data, MatrixDim d,
const double *Adata, int A_num_rows, int A_num_cols,
int A_row_stride, int A_col_stride,
const CuBlockMatrixData *B_cu_data,
int B_num_blocks, double alpha, double beta,
int B_trans) {
if (B_trans) {
_add_mat_blockmat_trans<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
} else {
_add_mat_blockmat<<<Gr,Bl>>>(data, d, Adata, A_num_rows, A_num_cols,
A_row_stride, A_col_stride, B_cu_data, B_num_blocks, alpha, beta);
}
}
void cudaD_block_add_mat_mat(dim3 Gr, dim3 Bl, CuBlockMatrixData *B_cu_data,
int num_blocks, const double *C_data,
int C_num_cols, int C_row_stride, int C_col_stride,
const double *D_data, int D_row_stride,
int D_col_stride, double alpha, double beta) {
_block_add_mat_mat<<<Gr,Bl>>>(B_cu_data, num_blocks, C_data, C_num_cols,
C_row_stride, C_col_stride, D_data, D_row_stride, D_col_stride,
alpha, beta);
}
/*
* cu::
*/
void cudaD_soft_hinge(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
_soft_hinge<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_group_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size,
double power) {
_group_pnorm<<<Gr,Bl>>>(y, x, d, src_stride, group_size, power);
}
void cudaD_group_spec_pnorm(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride, int group_size,
double power) {
if (power == double(0)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L0NORM, double>());
} else if (power == double(1)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L1NORM, double>());
} else if (power == double(2)) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<L2NORM, double>());
} else if (power == std::numeric_limits<double>::infinity()) {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<LINFNORM, double>());
} else {
_group_transform_reduce<<<Gr, Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<LPNORM, double>(power));
}
}
void cudaD_group_max(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride, int group_size) {
_group_transform_reduce<<<Gr,Bl>>>(y, x, d, src_stride, group_size,
TransReduceOp<MAX, double>());
}
void cudaD_sigmoid(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
_sigmoid<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_diff_sigmoid(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride,
int y_stride) {
_diff_sigmoid<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaD_tanh(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
_tanh<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_diff_tanh(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride, int y_stride) {
_diff_tanh<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride);
}
void cudaD_ensure_nonzero(dim3 Gr, dim3 Bl, const double *x, MatrixDim d,
double epsilon, int y_stride, double *y) {
_ensure_nonzero<<<Gr,Bl>>>(x, d, epsilon, y_stride, y);
}
void cudaD_parametric_relu(dim3 Gr, dim3 Bl, double* y, const double* x,
MatrixDim d, int src_stride,
const double* a, const double* b) {
_parametric_relu<<<Gr,Bl>>>(y, x, d, src_stride, a, b);
}
void cudaD_diff_parametric_relu(dim3 Gr, dim3 Bl, double* eout, const double* e,
const double* y, MatrixDim d, int e_stride,
int y_stride, const double* a, const double* b) {
_diff_parametric_relu<<<Gr,Bl>>>(eout, e, y, d, e_stride, y_stride, a, b);
}
void cudaD_heaviside(dim3 Gr, dim3 Bl, double* y, const double* x, MatrixDim d,
int src_stride) {
_heaviside<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x,
MatrixDim d, int src_stride) {
_softmax_reduce<<<Gr,Bl>>>(y, x, d, src_stride);
}
void cudaD_log_softmax_reduce(size_t Gr, size_t Bl, double* y, const double* x,
MatrixDim y_dim, int x_stride) {
_log_softmax_reduce<<<Gr,Bl>>>(y, x, y_dim, x_stride);
}
void cudaD_normalize_per_row(size_t Gr, size_t Bl, double *y, int y_stride,
const double *x, MatrixDim x_d, double target_rms,
bool add_log_stddev) {
_normalize_per_row<<<Gr, Bl>>>(y, y_stride, x, x_d, target_rms, add_log_stddev);
}
void cudaD_splice(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* off, MatrixDim d_out, MatrixDim d_in) {
_splice<<<Gr,Bl>>>(y,x,off,d_out,d_in);
}
void cudaD_one(int Gr, int Bl, double* x, int dim) {
_one<<<Gr,Bl>>>(x,dim);
}
void cudaD_take_mean(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
_take_mean<<<Gr,Bl>>>(x,y,d_in);
}
void cudaD_take_lower(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
_take_lower<<<Gr,Bl>>>(x,y,d_in);
}
void cudaD_take_upper(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_in) {
_take_upper<<<Gr,Bl>>>(x,y,d_in);
}
void cudaD_copy_from_sp(dim3 Gr, dim3 Bl, const double* x, double* y,
MatrixDim d_out) {
_copy_from_sp<<<Gr,Bl>>>(x,y,d_out);
}
void cudaD_copy(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* copy_from, MatrixDim d_out, MatrixDim d_in) {
_copy<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_randomize(dim3 Gr, dim3 Bl, double* y, const double* x,
const int32_cuda* copy_from, MatrixDim d_out,
MatrixDim d_in) {
_randomize<<<Gr,Bl>>>(y,x,copy_from,d_out,d_in);
}
void cudaD_regularize_l1(dim3 Gr, dim3 Bl, double* wei, double* grad, double l1,
double lr, MatrixDim d, int stride_grad) {
_regularize_l1<<<Gr,Bl>>>(wei,grad,l1,lr,d,stride_grad);
}
void cudaD_find_row_max_id(dim3 Gr, dim3 Bl, const double* mat, double* vec_val,
int32_cuda* vec_id, MatrixDim d) {
_find_row_max_id<<<Gr,Bl>>>(mat, vec_val, vec_id, d);
}
void cudaD_diff_xent(dim3 Gr, dim3 Bl, const int32_cuda* vec_tgt,
double* mat_net_out, double* vec_log_post, MatrixDim d) {
_diff_xent<<<Gr,Bl>>>(vec_tgt,mat_net_out,vec_log_post,d);
}
void cudaD_diff_softmax(dim3 Gr, dim3 Bl, double* x, const MatrixDim dim,
const double* value, const int value_stride,
const double* diff, const int diff_stride) {
_diff_softmax<<<Gr, Bl>>>(x, dim, value, value_stride, diff, diff_stride);
}
void cudaD_diff_log_softmax(dim3 Gr, dim3 Bl, const MatrixDim in_deriv_dim,
const double* out_value, const int out_value_stride,
const double* out_deriv, const int out_deriv_stride,
double* in_deriv) {
_diff_log_softmax<<<Gr, Bl>>>(in_deriv_dim, out_value, out_value_stride,
out_deriv, out_deriv_stride, in_deriv);
}
void cudaD_copy_rows_from_vec(dim3 Gr, dim3 Bl, double *mat_out,
MatrixDim d_out, const double *v_in) {
_copy_rows_from_vec<<<Gr,Bl>>>(mat_out, d_out, v_in);
}
void cudaD_sum_column_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indices) {
_sum_column_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indices);
}
void cudaD_add_row_ranges(dim3 Gr, dim3 Bl, double *data, MatrixDim dim,
const double *src_data, MatrixDim src_dim,
const Int32Pair *indexes) {
_add_row_ranges<<<Gr,Bl>>>(data, dim, src_data, src_dim, indexes);
}
void cudaD_matrix_lookup(dim3 Gr, dim3 Bl, const double *data, MatrixDim dim,
const Int32Pair *indices, int indices_size,
double *output) {
_matrix_lookup<<<Gr,Bl>>>(data, dim, indices, indices_size, output);
}
void cudaD_equal_element_mask(dim3 Gr, dim3 Bl, const double *mat1,
const double *mat2, double *mask,
MatrixDim mat1_dim, int mat2_stride,
int mask_stride) {
_equal_element_mask<<<Gr,Bl>>>(mat1, mat2, mask, mat1_dim, mat2_stride,
mask_stride);
}
// Some conversion kernels for which it's more convenient
// to not name them F or D.
void cuda_copy_from_mat_df(dim3 Gr, dim3 Bl, double* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff(dim3 Gr, dim3 Bl, float* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd(dim3 Gr, dim3 Bl, float *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd(dim3 Gr, dim3 Bl, double *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat<<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_df_trans(dim3 Gr, dim3 Bl, double* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_ff_trans(dim3 Gr, dim3 Bl, float* mat_out,
const float* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_fd_trans(dim3 Gr, dim3 Bl, float *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_mat_dd_trans(dim3 Gr, dim3 Bl, double *mat_out,
const double* mat_in, MatrixDim d_out,
MatrixDim d_in) {
_copy_from_mat_trans<32> <<<Gr,Bl>>>(mat_out,mat_in,d_out,d_in);
}
void cuda_copy_from_smat_ff(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val) {
_copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_fd(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const double* smat_val) {
_copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_df(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val) {
_copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_dd(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const double* smat_val) {
_copy_from_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_ff_trans(dim3 Gr, dim3 Bl, float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const float* smat_val) {
_copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_fd_trans(dim3 Gr, dim3 Bl, float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const double* smat_val) {
_copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_df_trans(dim3 Gr, dim3 Bl, double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const float* smat_val) {
_copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cuda_copy_from_smat_dd_trans(dim3 Gr, dim3 Bl, double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx,
const double* smat_val) {
_copy_from_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_trace_mat_smat(dim3 Gr, dim3 Bl, const float* mat, MatrixDim mat_dim,
const int* smat_row_ptr, const int* smat_col_idx,
const float* smat_val, float* trace_vec) {
_trace_mat_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaF_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const float* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val,
float* trace_vec) {
_trace_mat_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_trace_mat_smat(dim3 Gr, dim3 Bl, const double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val,
double* trace_vec) {
_trace_mat_smat<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_trace_mat_smat_trans(dim3 Gr, dim3 Bl, const double* mat,
MatrixDim mat_dim, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val,
double* trace_vec) {
_trace_mat_smat_trans<<<Gr, Bl>>>(mat, mat_dim, smat_row_ptr, smat_col_idx,
smat_val, trace_vec);
}
void cudaD_lstm_nonlinearity(dim3 Gr, dim3 Bl, const double* in,
const int in_stride, const double* params,
const int params_stride, const int out_stride,
const int cell_dim, const int have_dropout_mask,
const int num_rows, double* out) {
_lstm_nonlinearity<<<Gr, Bl>>>(
in, in_stride, params, params_stride,
out_stride, cell_dim, have_dropout_mask, num_rows, out);
}
void cudaF_lstm_nonlinearity(dim3 Gr, dim3 Bl, const float* in,
const int in_stride, const float* params,
const int params_stride, const int out_stride,
const int cell_dim, const int have_dropout_mask,
const int num_rows, float* out) {
_lstm_nonlinearity<<<Gr, Bl>>>(
in, in_stride, params, params_stride,
out_stride, cell_dim, have_dropout_mask, num_rows, out);
}
void cudaD_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim,
const int have_dropout_mask,
const int num_rows, const double* input,
const int input_stride, const double* params,
const int params_stride,
const double* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const double* self_repair_config,
double count, double* input_deriv,
const int input_deriv_stride,
double* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
double* self_repair_sum_out,
const int self_repair_sum_out_stride) {
_diff_lstm_nonlinearity<<<Gr, Bl>>>(
cell_dim, have_dropout_mask, num_rows, input,
input_stride, params, params_stride, output_deriv, output_deriv_stride,
deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv,
input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out,
value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride,
self_repair_sum_out, self_repair_sum_out_stride);
}
void cudaF_diff_lstm_nonlinearity(dim3 Gr, dim3 Bl, const int cell_dim,
const int have_dropout_mask,
const int num_rows, const float* input,
const int input_stride, const float* params,
const int params_stride,
const float* output_deriv,
const int output_deriv_stride,
const double* deriv_sum_in,
const int deriv_sum_in_stride,
const float* self_repair_config, double count,
float* input_deriv,
const int input_deriv_stride,
float* params_deriv,
const int params_deriv_stride,
double* value_sum_out,
const int value_sum_out_stride,
double* deriv_sum_out,
const int deriv_sum_out_stride,
float* self_repair_sum_out,
const int self_repair_sum_out_stride) {
_diff_lstm_nonlinearity<<<Gr, Bl>>>(
cell_dim, have_dropout_mask, num_rows, input,
input_stride, params, params_stride, output_deriv, output_deriv_stride,
deriv_sum_in, deriv_sum_in_stride, self_repair_config, count, input_deriv,
input_deriv_stride, params_deriv, params_deriv_stride, value_sum_out,
value_sum_out_stride, deriv_sum_out, deriv_sum_out_stride,
self_repair_sum_out, self_repair_sum_out_stride);
}
void cudaD_copy_cols_from_vec(dim3 Gr, dim3 Bl, double *mat_out,
MatrixDim d_out, const double *v_in) {
_copy_cols_from_vec<<<Gr, Bl>>>(mat_out, d_out, v_in);
}
void cudaF_copy_cols_from_vec(dim3 Gr, dim3 Bl, float *mat_out, MatrixDim d_out,
const float *v_in) {
_copy_cols_from_vec<<<Gr, Bl>>>(mat_out, d_out, v_in);
}
void cudaF_diff_normalize_per_row(size_t Gr, size_t Bl, float *id,
int id_stride, const float *iv,
MatrixDim iv_dim, const float* od,
int od_stride, float target_rms,
bool add_log_stddev) {
_diff_normalize_per_row<<<Gr, Bl>>>(id, id_stride, iv, iv_dim, od, od_stride,
target_rms, add_log_stddev);
}
void cudaD_diff_normalize_per_row(size_t Gr, size_t Bl, double *id,
int id_stride, const double *iv,
MatrixDim iv_dim, const double* od,
int od_stride, double target_rms,
bool add_log_stddev) {
_diff_normalize_per_row<<<Gr, Bl>>>(id, id_stride, iv, iv_dim, od, od_stride,
target_rms, add_log_stddev);
}
void cudaD_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr,
int* out_col_idx, double* out_val,
const int* row_indexes, const int num_selected_rows,
const int* in_row_ptr, const int* in_col_idx,
const double* in_val) {
_select_rows<<<Gr, Bl>>>(out_row_ptr, out_col_idx, out_val, row_indexes,
num_selected_rows, in_row_ptr, in_col_idx, in_val);
}
void cudaF_select_rows(dim3 Gr, dim3 Bl, const int* out_row_ptr,
int* out_col_idx, float* out_val, const int* row_indexes,
const int num_selected_rows, const int* in_row_ptr,
const int* in_col_idx, const float* in_val) {
_select_rows<<<Gr, Bl>>>(out_row_ptr, out_col_idx, out_val, row_indexes,
num_selected_rows, in_row_ptr, in_col_idx, in_val);
}
void cudaD_add_smat(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
double alpha, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val) {
_add_smat<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_add_smat(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
float alpha, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val) {
_add_smat<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaD_add_smat_trans(dim3 Gr, dim3 Bl, double* mat, MatrixDim mat_dim,
double alpha, const int* smat_row_ptr,
const int* smat_col_idx, const double* smat_val) {
_add_smat_trans<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaF_add_smat_trans(dim3 Gr, dim3 Bl, float* mat, MatrixDim mat_dim,
float alpha, const int* smat_row_ptr,
const int* smat_col_idx, const float* smat_val) {
_add_smat_trans<<<Gr, Bl>>>(mat, mat_dim, alpha, smat_row_ptr, smat_col_idx,
smat_val);
}
void cudaD_apply_exp_special(dim3 Gr, dim3 Bl, double* out, MatrixDim out_dim,
const double* in, int in_stride) {
_apply_exp_special<<<Gr, Bl>>>(out, out_dim, in, in_stride);
}
void cudaF_apply_exp_special(dim3 Gr, dim3 Bl, float* out, MatrixDim out_dim,
const float* in, int in_stride) {
_apply_exp_special<<<Gr, Bl>>>(out, out_dim, in, in_stride);
}
void cuda_compress_uint8_sign(dim3 Gr, dim3 Bl, const float *src, MatrixDim dim,
unsigned char *dest, int dest_stride) {
_cuda_compress_uint8_sign<<<Gr, Bl>>>(src, dim, dest, dest_stride);
}
void cuda_compress_int16(dim3 Gr, dim3 Bl, const float *src,
MatrixDim dim, int16_t *dest,
int dest_stride, float inv_scale,
bool bounds_check) {
if (bounds_check) {
_cuda_compress_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale);
} else {
_cuda_compress_no_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale);
}
}
void cuda_compress_uint16(dim3 Gr, dim3 Bl, const float *src,
MatrixDim dim, uint16_t *dest,
int dest_stride, float inv_scale,
bool bounds_check) {
if (bounds_check) {
_cuda_compress_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale);
} else {
_cuda_compress_no_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale);
}
}
void cuda_compress_int8(dim3 Gr, dim3 Bl, const float *src,
MatrixDim dim, int8_t *dest,
int dest_stride, float inv_scale,
bool bounds_check) {
if (bounds_check) {
_cuda_compress_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale);
} else {
_cuda_compress_no_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale);
}
}
void cuda_compress_uint8(dim3 Gr, dim3 Bl, const float *src,
MatrixDim dim, uint8_t *dest,
int dest_stride, float inv_scale,
bool bounds_check) {
if (bounds_check) {
_cuda_compress_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale);
} else {
_cuda_compress_no_bounds_check<<<Gr, Bl>>>(src, dim, dest, dest_stride, inv_scale);
}
}
void cuda_uncompress_uint8(dim3 Gr, dim3 Bl, float *dest,
MatrixDim dim, const uint8_t *src,
int src_stride, float scale) {
_cuda_uncompress<<<Gr, Bl>>>(dest, dim, src, src_stride, scale);
}
void cuda_uncompress_int8(dim3 Gr, dim3 Bl, float *dest,
MatrixDim dim, const int8_t *src,
int src_stride, float scale) {
_cuda_uncompress<<<Gr, Bl>>>(dest, dim, src, src_stride, scale);
}
void cuda_uncompress_uint16(dim3 Gr, dim3 Bl, float *dest,
MatrixDim dim, const uint16_t *src,
int src_stride, float scale) {
_cuda_uncompress<<<Gr, Bl>>>(dest, dim, src, src_stride, scale);
}
void cuda_uncompress_int16(dim3 Gr, dim3 Bl, float *dest,
MatrixDim dim, const int16_t *src,
int src_stride, float scale) {
_cuda_uncompress<<<Gr, Bl>>>(dest, dim, src, src_stride, scale);
}
|
23d031ed7e029d74c74fddd0478ba95266761d8e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void matrixAdd_B_Kernel(float* A, float* B, float* C, size_t pitch, int width){
//compute indexes
int row = blockIdx.x * blockDim.x + threadIdx.x;
int rowWidthWithPad = pitch/sizeof(float);
if(row < width){
for (int col = 0; col < width; ++col) {
if(col < width)
C[row * rowWidthWithPad + col] = A[row * rowWidthWithPad + col] + B[row * rowWidthWithPad + col];
}
}
}
|
23d031ed7e029d74c74fddd0478ba95266761d8e.cu
|
#include "includes.h"
__global__ void matrixAdd_B_Kernel(float* A, float* B, float* C, size_t pitch, int width){
//compute indexes
int row = blockIdx.x * blockDim.x + threadIdx.x;
int rowWidthWithPad = pitch/sizeof(float);
if(row < width){
for (int col = 0; col < width; ++col) {
if(col < width)
C[row * rowWidthWithPad + col] = A[row * rowWidthWithPad + col] + B[row * rowWidthWithPad + col];
}
}
}
|
06f09c948dd96690c0494c734bb56a406121919d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define CUDA_ERROR_EXIT(str) do{\
hipError_t err = hipGetLastError();\
if( err != hipSuccess){\
printf("Cuda Error: '%s' for %s\n", hipGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
#define USAGE_EXIT(s) do\
{\
printf("Usage: %s <# of elements> <random seed> \n%s\n", argv[0], s);\
exit(-1);\
}while(0);
__global__ void xor_piece(int *arr, int *step, int num)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x);
if (((float) num) / i < *step)
return;
i *= *step;
if ((i >= num) || ((i + (*step) / 2) >= num))
return;
arr[i] ^= arr[i + (*step) / 2];
}
__global__ void double_step(int* step)
{
*step *= 2;
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
int i;
int *host_mem;
int *gpu_mem;
int *host_step;
int *gpu_step;
int *answer;
unsigned long num; /*Default value of num from MACRO*/
int blocks, seed;
if(argc != 3)
USAGE_EXIT("Not enough parameters");
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
USAGE_EXIT("Invalid number of elements");
seed = atoi(argv[2]); /*Update after checking*/
if(seed <= 0)
USAGE_EXIT("Invalid number of elements");
/* Allocate host (CPU) memory and initialize*/
host_mem = (int*)malloc(num * sizeof(int));
srand(seed);
for(i=0; i<num; ++i){
host_mem[i] = random();
}
answer = (int*)malloc(sizeof(int));
host_step = (int*)malloc(sizeof(int));
*host_step = 2;
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
hipMalloc(&gpu_mem, num * sizeof(int));
CUDA_ERROR_EXIT("hipMalloc");
hipMalloc(&gpu_step, sizeof(int));
CUDA_ERROR_EXIT("hipMalloc");
hipMemcpy(gpu_mem, host_mem, num * sizeof(int) , hipMemcpyHostToDevice);
CUDA_ERROR_EXIT("hipMemcpy");
hipMemcpy(gpu_step, host_step, sizeof(int) , hipMemcpyHostToDevice);
CUDA_ERROR_EXIT("hipMemcpy");
gettimeofday(&start, NULL);
blocks = num / 2048;
if(num % 2048)
++blocks;
while((*host_step / 2) <= num)
{
hipLaunchKernelGGL(( xor_piece), dim3(blocks), dim3(1024), 0, 0, gpu_mem, gpu_step, num);
CUDA_ERROR_EXIT("kernel invocation");
hipLaunchKernelGGL(( double_step), dim3(1), dim3(1), 0, 0, gpu_step);
CUDA_ERROR_EXIT("kernel invocation");
*host_step *= 2;
}
gettimeofday(&end, NULL);
/* Copy back result*/
hipMemcpy(answer, gpu_mem, sizeof(int) , hipMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("Total time = %ld microsecs. Processsing = %ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
hipFree(gpu_mem);
/*Print the answer*/
printf("Result = %d\n", *answer);
/**answer = 0;
for (i = 0; i < num; i++)
*answer ^= host_mem[i];
printf("Actual answer = %d\n", *answer);*/
free(host_mem);
free(host_step);
free(answer);
}
|
06f09c948dd96690c0494c734bb56a406121919d.cu
|
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#define CUDA_ERROR_EXIT(str) do{\
cudaError err = cudaGetLastError();\
if( err != cudaSuccess){\
printf("Cuda Error: '%s' for %s\n", cudaGetErrorString(err), str);\
exit(-1);\
}\
}while(0);
#define TDIFF(start, end) ((end.tv_sec - start.tv_sec) * 1000000UL + (end.tv_usec - start.tv_usec))
#define USAGE_EXIT(s) do\
{\
printf("Usage: %s <# of elements> <random seed> \n%s\n", argv[0], s);\
exit(-1);\
}while(0);
__global__ void xor_piece(int *arr, int *step, int num)
{
int i = (blockDim.x * blockIdx.x + threadIdx.x);
if (((float) num) / i < *step)
return;
i *= *step;
if ((i >= num) || ((i + (*step) / 2) >= num))
return;
arr[i] ^= arr[i + (*step) / 2];
}
__global__ void double_step(int* step)
{
*step *= 2;
}
int main(int argc, char **argv)
{
struct timeval start, end, t_start, t_end;
int i;
int *host_mem;
int *gpu_mem;
int *host_step;
int *gpu_step;
int *answer;
unsigned long num; /*Default value of num from MACRO*/
int blocks, seed;
if(argc != 3)
USAGE_EXIT("Not enough parameters");
num = atoi(argv[1]); /*Update after checking*/
if(num <= 0)
USAGE_EXIT("Invalid number of elements");
seed = atoi(argv[2]); /*Update after checking*/
if(seed <= 0)
USAGE_EXIT("Invalid number of elements");
/* Allocate host (CPU) memory and initialize*/
host_mem = (int*)malloc(num * sizeof(int));
srand(seed);
for(i=0; i<num; ++i){
host_mem[i] = random();
}
answer = (int*)malloc(sizeof(int));
host_step = (int*)malloc(sizeof(int));
*host_step = 2;
gettimeofday(&t_start, NULL);
/* Allocate GPU memory and copy from CPU --> GPU*/
cudaMalloc(&gpu_mem, num * sizeof(int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMalloc(&gpu_step, sizeof(int));
CUDA_ERROR_EXIT("cudaMalloc");
cudaMemcpy(gpu_mem, host_mem, num * sizeof(int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
cudaMemcpy(gpu_step, host_step, sizeof(int) , cudaMemcpyHostToDevice);
CUDA_ERROR_EXIT("cudaMemcpy");
gettimeofday(&start, NULL);
blocks = num / 2048;
if(num % 2048)
++blocks;
while((*host_step / 2) <= num)
{
xor_piece<<<blocks, 1024>>>(gpu_mem, gpu_step, num);
CUDA_ERROR_EXIT("kernel invocation");
double_step<<<1, 1>>>(gpu_step);
CUDA_ERROR_EXIT("kernel invocation");
*host_step *= 2;
}
gettimeofday(&end, NULL);
/* Copy back result*/
cudaMemcpy(answer, gpu_mem, sizeof(int) , cudaMemcpyDeviceToHost);
CUDA_ERROR_EXIT("memcpy");
gettimeofday(&t_end, NULL);
printf("Total time = %ld microsecs. Processsing = %ld microsecs\n", TDIFF(t_start, t_end), TDIFF(start, end));
cudaFree(gpu_mem);
/*Print the answer*/
printf("Result = %d\n", *answer);
/**answer = 0;
for (i = 0; i < num; i++)
*answer ^= host_mem[i];
printf("Actual answer = %d\n", *answer);*/
free(host_mem);
free(host_step);
free(answer);
}
|
316d22b8e328cfd53aa07562a6064bd05eef6069.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <vector>
#include <iostream>
#include <fstream>
#include <string>
#define BLOCK_SIZE 64
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
template< typename T >
void check(T result, char const *const func, const char *const file, int const line)
{
if (result)
{
fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n",
file, line, static_cast<unsigned int>(result), func);
hipDeviceReset();
// Make sure we call CUDA Device Reset before exiting
exit(EXIT_FAILURE);
}
}
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
__global__ void CalculateDrawdownSeq(float *prices, float currTp, float *CalculateDrawdown, int *duration, int N)
{
int absTx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("Result writing for %d", absTx);
if (absTx >= N)
return;
float threshold = prices[absTx] + currTp;
float openPrice = prices[absTx];
float drawdown = 0;
int step;
for (step = absTx; step < N ; step++)
{
if (openPrice - prices[step] > drawdown)
drawdown = openPrice - prices[step];
if (prices[step] >= threshold)
break;
}
CalculateDrawdown[absTx] = drawdown;
if (step < N)
duration[absTx] = step - absTx;
else
duration[absTx] = -1;
}
__global__ void CalculateDrawdownTile(float *prices, float currTp, float *CalculateDrawdown, int *duration, int N)
{
int absTx = blockIdx.x*blockDim.x + threadIdx.x;
int tx = threadIdx.x;
float maxDrawdown = 0;
float open = prices[absTx];
float threshold = open + currTp;
__shared__ float my_tile[BLOCK_SIZE];
__shared__ int calculatedCount;
calculatedCount = 0;
int stride = 0;
bool isPointCalculated = false;
__syncthreads();
for (; calculatedCount < BLOCK_SIZE; stride++)
{
int strideTile = stride *BLOCK_SIZE;
if (absTx + strideTile < N)
my_tile[tx] = prices[absTx + strideTile];
else
my_tile[tx] = 0;
__syncthreads();
if (!isPointCalculated)
{
int temp = strideTile + blockIdx.x * blockDim.x;
for (int i = stride == 0 ? tx : 0; i < BLOCK_SIZE; i++)
{
if (temp+ i >= N)
{
isPointCalculated = true;
atomicAdd(&calculatedCount, 1);
CalculateDrawdown[absTx] = maxDrawdown;
duration[absTx] = -1;
break;
}
if (maxDrawdown < open - my_tile[i])
maxDrawdown = open - my_tile[i];
if (my_tile[i] >= threshold)
{
duration[absTx] = strideTile + i - tx;
CalculateDrawdown[absTx] = maxDrawdown;
isPointCalculated = true;
atomicAdd(&calculatedCount, 1);
break;
}
}
}
__syncthreads();
}
}
void CudaCalculateAll(std::ofstream *outputFile, std::vector<float> &prices, std::vector<std::string> &vectorDates, float startTp, float endTp, float stepTp, bool tileMode=false)
{
int barCount = prices.size();
float *d_P;
checkCudaErrors(hipMalloc((void**)&d_P, barCount*sizeof(float)));
checkCudaErrors(hipMemcpy(d_P, &prices[0], barCount*sizeof(float), hipMemcpyHostToDevice));
for (float currTp = startTp; currTp <= endTp; currTp += stepTp)
{
std::vector<float> vectorDrawdown(barCount);
std::vector<int> vectorDuration(barCount);
float *d_Drawdown;
int *d_Duration;
checkCudaErrors(hipMalloc((void**)&d_Drawdown, barCount*sizeof(float)));
checkCudaErrors(hipMalloc((void**)&d_Duration, barCount*sizeof(int)));
dim3 dimGrid((barCount-1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
if ( tileMode)
hipLaunchKernelGGL(( CalculateDrawdownTile), dim3(dimGrid), dim3(dimBlock), 0, 0, d_P, currTp, d_Drawdown, d_Duration, barCount);
else
CalculateDrawdownSeq <<<dimGrid, dimBlock >> >(d_P, currTp, d_Drawdown, d_Duration, barCount);
checkCudaErrors(hipDeviceSynchronize());
checkCudaErrors(hipMemcpy(&vectorDrawdown[0], d_Drawdown, barCount*sizeof(float), hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(&vectorDuration[0], d_Duration, barCount*sizeof(int), hipMemcpyDeviceToHost));
if (outputFile != NULL)
for (int i = 0; i < barCount; i++)
*outputFile << vectorDates[i] << ", " << currTp << ", " << vectorDrawdown[i] << ", " << vectorDuration[i] << "\n";
}
}
|
316d22b8e328cfd53aa07562a6064bd05eef6069.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <vector>
#include <iostream>
#include <fstream>
#include <string>
#define BLOCK_SIZE 64
// This will output the proper CUDA error strings in the event that a CUDA host call returns an error
template< typename T >
void check(T result, char const *const func, const char *const file, int const line)
{
if (result)
{
fprintf(stderr, "CUDA error at %s:%d code=%d \"%s\" \n",
file, line, static_cast<unsigned int>(result), func);
cudaDeviceReset();
// Make sure we call CUDA Device Reset before exiting
exit(EXIT_FAILURE);
}
}
#define checkCudaErrors(val) check ( (val), #val, __FILE__, __LINE__ )
__global__ void CalculateDrawdownSeq(float *prices, float currTp, float *CalculateDrawdown, int *duration, int N)
{
int absTx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("Result writing for %d", absTx);
if (absTx >= N)
return;
float threshold = prices[absTx] + currTp;
float openPrice = prices[absTx];
float drawdown = 0;
int step;
for (step = absTx; step < N ; step++)
{
if (openPrice - prices[step] > drawdown)
drawdown = openPrice - prices[step];
if (prices[step] >= threshold)
break;
}
CalculateDrawdown[absTx] = drawdown;
if (step < N)
duration[absTx] = step - absTx;
else
duration[absTx] = -1;
}
__global__ void CalculateDrawdownTile(float *prices, float currTp, float *CalculateDrawdown, int *duration, int N)
{
int absTx = blockIdx.x*blockDim.x + threadIdx.x;
int tx = threadIdx.x;
float maxDrawdown = 0;
float open = prices[absTx];
float threshold = open + currTp;
__shared__ float my_tile[BLOCK_SIZE];
__shared__ int calculatedCount;
calculatedCount = 0;
int stride = 0;
bool isPointCalculated = false;
__syncthreads();
for (; calculatedCount < BLOCK_SIZE; stride++)
{
int strideTile = stride *BLOCK_SIZE;
if (absTx + strideTile < N)
my_tile[tx] = prices[absTx + strideTile];
else
my_tile[tx] = 0;
__syncthreads();
if (!isPointCalculated)
{
int temp = strideTile + blockIdx.x * blockDim.x;
for (int i = stride == 0 ? tx : 0; i < BLOCK_SIZE; i++)
{
if (temp+ i >= N)
{
isPointCalculated = true;
atomicAdd(&calculatedCount, 1);
CalculateDrawdown[absTx] = maxDrawdown;
duration[absTx] = -1;
break;
}
if (maxDrawdown < open - my_tile[i])
maxDrawdown = open - my_tile[i];
if (my_tile[i] >= threshold)
{
duration[absTx] = strideTile + i - tx;
CalculateDrawdown[absTx] = maxDrawdown;
isPointCalculated = true;
atomicAdd(&calculatedCount, 1);
break;
}
}
}
__syncthreads();
}
}
void CudaCalculateAll(std::ofstream *outputFile, std::vector<float> &prices, std::vector<std::string> &vectorDates, float startTp, float endTp, float stepTp, bool tileMode=false)
{
int barCount = prices.size();
float *d_P;
checkCudaErrors(cudaMalloc((void**)&d_P, barCount*sizeof(float)));
checkCudaErrors(cudaMemcpy(d_P, &prices[0], barCount*sizeof(float), cudaMemcpyHostToDevice));
for (float currTp = startTp; currTp <= endTp; currTp += stepTp)
{
std::vector<float> vectorDrawdown(barCount);
std::vector<int> vectorDuration(barCount);
float *d_Drawdown;
int *d_Duration;
checkCudaErrors(cudaMalloc((void**)&d_Drawdown, barCount*sizeof(float)));
checkCudaErrors(cudaMalloc((void**)&d_Duration, barCount*sizeof(int)));
dim3 dimGrid((barCount-1)/BLOCK_SIZE + 1, 1, 1);
dim3 dimBlock(BLOCK_SIZE, 1, 1);
if ( tileMode)
CalculateDrawdownTile<<<dimGrid, dimBlock>>>(d_P, currTp, d_Drawdown, d_Duration, barCount);
else
CalculateDrawdownSeq <<<dimGrid, dimBlock >> >(d_P, currTp, d_Drawdown, d_Duration, barCount);
checkCudaErrors(cudaDeviceSynchronize());
checkCudaErrors(cudaMemcpy(&vectorDrawdown[0], d_Drawdown, barCount*sizeof(float), cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(&vectorDuration[0], d_Duration, barCount*sizeof(int), cudaMemcpyDeviceToHost));
if (outputFile != NULL)
for (int i = 0; i < barCount; i++)
*outputFile << vectorDates[i] << ", " << currTp << ", " << vectorDrawdown[i] << ", " << vectorDuration[i] << "\n";
}
}
|
23fcebda6e337a8eced6cf4f46b8d77f9b168998.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Math.cuh>
namespace at { namespace native {
void digamma_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "digamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_digamma(a);
});
});
}
void trigamma_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "trigamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_trigamma(a);
});
});
}
void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) {
if (n == 0) {
digamma_kernel_cuda(iter);
} else if (n == 1) {
trigamma_kernel_cuda(iter);
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "polygamma_cuda", [&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_polygamma(int(n), a);
});
});
}
}
void lgamma_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lgamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::lgamma(a);
});
});
}
REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda);
REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda);
REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda);
}} // namespace at::native
|
23fcebda6e337a8eced6cf4f46b8d77f9b168998.cu
|
#include <limits>
#include <ATen/native/UnaryOps.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Math.cuh>
namespace at { namespace native {
void digamma_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "digamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_digamma(a);
});
});
}
void trigamma_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "trigamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_trigamma(a);
});
});
}
void polygamma_kernel_cuda(TensorIteratorBase& iter, int64_t n) {
if (n == 0) {
digamma_kernel_cuda(iter);
} else if (n == 1) {
trigamma_kernel_cuda(iter);
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "polygamma_cuda", [&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_polygamma(int(n), a);
});
});
}
}
void lgamma_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "lgamma_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::lgamma(a);
});
});
}
REGISTER_DISPATCH(digamma_stub, &digamma_kernel_cuda);
REGISTER_DISPATCH(polygamma_stub, &polygamma_kernel_cuda);
REGISTER_DISPATCH(lgamma_stub, &lgamma_kernel_cuda);
}} // namespace at::native
|
e6d33dc15daa1d0130488587494cd0f1a45f5bbf.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* ising_cuda_v2.cu
*
* Created on: Jan 01, 2019
* Author: Charalampos Eleftheriadis
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 512
#define threadsNum 64
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Kernel Function.
__global__ void spin(int *G, double *w, int *newG, int n) {
// Calculates the first Atomic Spin index. Note: n/blockDim.x=sqrt(gridDim.x).
int index = (blockIdx.x/(n/blockDim.x))*n*blockDim.x + (blockIdx.x%(n/blockDim.x))*blockDim.x + threadIdx.x;
// Checks for out of bounds indexing and if so quits.
if (index >= n*n)
return;
double weightSum;
for (int i=index; i<index+blockDim.x*n; i+=n) {
weightSum = 0;
// Calculates weight contribution for each neighboring Atomic Spin and sums it.
weightSum += w[0] * G[((i/n - 2 + n)%n) * n + (i - 2 + n)%n];
weightSum += w[1] * G[((i/n - 2 + n)%n) * n + (i - 1 + n)%n];
weightSum += w[2] * G[((i/n - 2 + n)%n) * n + (i)%n];
weightSum += w[3] * G[((i/n - 2 + n)%n) * n + (i + 1 + n)%n];
weightSum += w[4] * G[((i/n - 2 + n)%n) * n + (i + 2 + n)%n];
weightSum += w[5] * G[((i/n - 1 + n)%n) * n + (i - 2 + n)%n];
weightSum += w[6] * G[((i/n - 1 + n)%n) * n + (i - 1 + n)%n];
weightSum += w[7] * G[((i/n - 1 + n)%n) * n + (i)%n];
weightSum += w[8] * G[((i/n - 1 + n)%n) * n + (i + 1 + n)%n];
weightSum += w[9] * G[((i/n - 1 + n)%n) * n + (i + 2 + n)%n];
weightSum += w[10] * G[((i/n + n)%n) * n + (i - 2 + n)%n];
weightSum += w[11] * G[((i/n + n)%n) * n + (i - 1 + n)%n];
// w[12] is not contributing anything. It's the current Atomic Spin.
weightSum += w[13] * G[((i/n + n)%n) * n + (i + 1 + n)%n];
weightSum += w[14] * G[((i/n + n)%n) * n + (i + 2 + n)%n];
weightSum += w[15] * G[((i/n + 1 + n)%n) * n + (i - 2 + n)%n];
weightSum += w[16] * G[((i/n + 1 + n)%n) * n + (i - 1 + n)%n];
weightSum += w[17] * G[((i/n + 1 + n)%n) * n + (i)%n];
weightSum += w[18] * G[((i/n + 1 + n)%n) * n + (i + 1 + n)%n];
weightSum += w[19] * G[((i/n + 1 + n)%n) * n + (i + 2 + n)%n];
weightSum += w[20] * G[((i/n + 2 + n)%n) * n + (i - 2 + n)%n];
weightSum += w[21] * G[((i/n + 2 + n)%n) * n + (i - 1 + n)%n];
weightSum += w[22] * G[((i/n + 2 + n)%n) * n + (i)%n];
weightSum += w[23] * G[((i/n + 2 + n)%n) * n + (i + 1 + n)%n];
weightSum += w[24] * G[((i/n + 2 + n)%n) * n + (i + 2 + n)%n];
//! Can it be done more efficiently?
if (weightSum > 0.0001)
newG[i] = 1;
else if (weightSum < -0.0001)
newG[i] = -1;
else
newG[i] = G[i];
}
}
// Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one.
__global__ void check(int *G, int *newG, int n, int *same) {
// Calculates Atomic Spin index.
int index = (blockIdx.x/(n/blockDim.x))*n*blockDim.x + (blockIdx.x%(n/blockDim.x))*blockDim.x + threadIdx.x;
// Checks for out of bounds indexing and if so quits.
if (index >= n*n)
return;
for (int i=index; i<index+blockDim.x*n; i+=n)
if (G[index] != newG[index]) {
*same = 0;
break;
}
}
void ising(int *G, double *w, int k, int n) {
// Creates and transfers the Weight Matrix to GPU memory.
double *w_d;
int w_size = 25*sizeof(double);
gpuErrchk( hipMalloc((void **) &w_d, w_size) );
gpuErrchk( hipMemcpy(w_d, w, w_size, hipMemcpyHostToDevice) );
// Creates and transfers the Atomic Spins Matrix to GPU memory.
int *G_d;
int G_size = n*n*sizeof(int);
gpuErrchk( hipMalloc((void **) &G_d, G_size) );
gpuErrchk( hipMemcpy(G_d, G, G_size, hipMemcpyHostToDevice) );
// Creates the new Atomic Spins Matrix to GPU memory.
int *newG_d;
gpuErrchk( hipMalloc((void **) &newG_d, G_size) );
// Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory.
int same = 1;
int *same_d;
gpuErrchk( hipMalloc((void **) &same_d, sizeof(int)) );
gpuErrchk( hipMemcpy(same_d, &same, sizeof(int), hipMemcpyHostToDevice) );
// Creates a temporary variable for Atomic Spin Matrices' pointers swapping and allocates it to GPU memory.
int *temp_d;
// Checks if function has to be iterated.
for (int i=0; i<k; i++) {
// Calls the kernel function balancing load to (n/threadsNum)^2 blocks with threadsNum threads each.
// Each thread calculates threadsNum spins.
//! User has to specify numbers fitting the data correctly (sqrt(gridDim) * blockDim = n).
hipLaunchKernelGGL(( spin), dim3(n/threadsNum*n/threadsNum),dim3(threadsNum), 0, 0, G_d, w_d, newG_d, n);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
hipLaunchKernelGGL(( check), dim3(n/threadsNum*n/threadsNum),dim3(threadsNum), 0, 0, G_d, newG_d, n, same_d);
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
gpuErrchk( hipMemcpy(&same, same_d, sizeof(int), hipMemcpyDeviceToHost) );
if (same)
break;
// Atomix Spin Matrices' pointers swapping.
temp_d = G_d;
G_d = newG_d;
newG_d = temp_d;
}
// Copies data from GPU to CPU memory.
gpuErrchk( hipMemcpy(G, G_d, G_size, hipMemcpyDeviceToHost) );
// Cleanup.
gpuErrchk( hipFree(w_d) );
gpuErrchk( hipFree(G_d) );
gpuErrchk( hipFree(newG_d) );
}
int main() {
// Weight Matrix.
double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004,
0.016, 0.071, 0.117, 0.071, 0.016,
0.026, 0.117, 0.000, 0.117, 0.026,
0.016, 0.071, 0.117, 0.071, 0.016,
0.004, 0.016, 0.026, 0.016, 0.004 };
// Number of dimensions for the square Atomic Spins Matrix.
int n = N;
// Allocates memory for the Atomic Spins Matrix.
int *G = (int *)malloc(n*n * sizeof(int));
// Randomizes seed.
srand(time(NULL));
// Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution.
for (int i=0; i<n*n; i++)
G[i] = ((rand() % 2) * 2) - 1;
/*
// Reads configuration file.
size_t readStatus;
FILE *conf_init = fopen("conf-init.bin","rb");
int initG[n*n];
readStatus = fread(&initG, sizeof(int), n*n, conf_init);
if (readStatus != n*n)
printf("Could not read conf-init.bin file.\n");
fclose(conf_init);
// Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file.
for (int i=0; i<n*n; i++)
G[i] = initG[i];
*/
ising(G, w, 10, n);
/*
// Reads configuration file for state after one iteration.
size_t readStatus1;
FILE *conf_1 = fopen("conf-1.bin","rb");
int G1[n*n];
readStatus1 = fread(&G1, sizeof(int), n*n, conf_1);
if (readStatus1 != n*n)
printf("Could not read conf-1.bin file.\n");
fclose(conf_1);
// Checks for errors.
int errorsNum = 0;
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
if (G[i*n+j] != G1[i*n+j])
errorsNum++;
if (errorsNum == 0)
printf("Correct Results!\n");
else
printf("Wrong Results. Number of errors: %d\n", errorsNum);
// Checks the results.
for (int i=0; i<n; i++) {
for (int j=0; j<n; j++) {
if (G[i*n+j] == G1[i*n+j])
printf("=");
else
printf("!");
}
printf("\n");
}
printf("\n\n");
*/
return 0;
}
|
e6d33dc15daa1d0130488587494cd0f1a45f5bbf.cu
|
/*
* ising_cuda_v2.cu
*
* Created on: Jan 01, 2019
* Author: Charalampos Eleftheriadis
*/
#include <stdio.h>
#include <stdlib.h>
#include <time.h>
#define N 512
#define threadsNum 64
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
// Kernel Function.
__global__ void spin(int *G, double *w, int *newG, int n) {
// Calculates the first Atomic Spin index. Note: n/blockDim.x=sqrt(gridDim.x).
int index = (blockIdx.x/(n/blockDim.x))*n*blockDim.x + (blockIdx.x%(n/blockDim.x))*blockDim.x + threadIdx.x;
// Checks for out of bounds indexing and if so quits.
if (index >= n*n)
return;
double weightSum;
for (int i=index; i<index+blockDim.x*n; i+=n) {
weightSum = 0;
// Calculates weight contribution for each neighboring Atomic Spin and sums it.
weightSum += w[0] * G[((i/n - 2 + n)%n) * n + (i - 2 + n)%n];
weightSum += w[1] * G[((i/n - 2 + n)%n) * n + (i - 1 + n)%n];
weightSum += w[2] * G[((i/n - 2 + n)%n) * n + (i)%n];
weightSum += w[3] * G[((i/n - 2 + n)%n) * n + (i + 1 + n)%n];
weightSum += w[4] * G[((i/n - 2 + n)%n) * n + (i + 2 + n)%n];
weightSum += w[5] * G[((i/n - 1 + n)%n) * n + (i - 2 + n)%n];
weightSum += w[6] * G[((i/n - 1 + n)%n) * n + (i - 1 + n)%n];
weightSum += w[7] * G[((i/n - 1 + n)%n) * n + (i)%n];
weightSum += w[8] * G[((i/n - 1 + n)%n) * n + (i + 1 + n)%n];
weightSum += w[9] * G[((i/n - 1 + n)%n) * n + (i + 2 + n)%n];
weightSum += w[10] * G[((i/n + n)%n) * n + (i - 2 + n)%n];
weightSum += w[11] * G[((i/n + n)%n) * n + (i - 1 + n)%n];
// w[12] is not contributing anything. It's the current Atomic Spin.
weightSum += w[13] * G[((i/n + n)%n) * n + (i + 1 + n)%n];
weightSum += w[14] * G[((i/n + n)%n) * n + (i + 2 + n)%n];
weightSum += w[15] * G[((i/n + 1 + n)%n) * n + (i - 2 + n)%n];
weightSum += w[16] * G[((i/n + 1 + n)%n) * n + (i - 1 + n)%n];
weightSum += w[17] * G[((i/n + 1 + n)%n) * n + (i)%n];
weightSum += w[18] * G[((i/n + 1 + n)%n) * n + (i + 1 + n)%n];
weightSum += w[19] * G[((i/n + 1 + n)%n) * n + (i + 2 + n)%n];
weightSum += w[20] * G[((i/n + 2 + n)%n) * n + (i - 2 + n)%n];
weightSum += w[21] * G[((i/n + 2 + n)%n) * n + (i - 1 + n)%n];
weightSum += w[22] * G[((i/n + 2 + n)%n) * n + (i)%n];
weightSum += w[23] * G[((i/n + 2 + n)%n) * n + (i + 1 + n)%n];
weightSum += w[24] * G[((i/n + 2 + n)%n) * n + (i + 2 + n)%n];
//! Can it be done more efficiently?
if (weightSum > 0.0001)
newG[i] = 1;
else if (weightSum < -0.0001)
newG[i] = -1;
else
newG[i] = G[i];
}
}
// Kernel Function that checks whether the new Atomic Spins Matrix is the same as the old one.
__global__ void check(int *G, int *newG, int n, int *same) {
// Calculates Atomic Spin index.
int index = (blockIdx.x/(n/blockDim.x))*n*blockDim.x + (blockIdx.x%(n/blockDim.x))*blockDim.x + threadIdx.x;
// Checks for out of bounds indexing and if so quits.
if (index >= n*n)
return;
for (int i=index; i<index+blockDim.x*n; i+=n)
if (G[index] != newG[index]) {
*same = 0;
break;
}
}
void ising(int *G, double *w, int k, int n) {
// Creates and transfers the Weight Matrix to GPU memory.
double *w_d;
int w_size = 25*sizeof(double);
gpuErrchk( cudaMalloc((void **) &w_d, w_size) );
gpuErrchk( cudaMemcpy(w_d, w, w_size, cudaMemcpyHostToDevice) );
// Creates and transfers the Atomic Spins Matrix to GPU memory.
int *G_d;
int G_size = n*n*sizeof(int);
gpuErrchk( cudaMalloc((void **) &G_d, G_size) );
gpuErrchk( cudaMemcpy(G_d, G, G_size, cudaMemcpyHostToDevice) );
// Creates the new Atomic Spins Matrix to GPU memory.
int *newG_d;
gpuErrchk( cudaMalloc((void **) &newG_d, G_size) );
// Creates and transfers a flag that states whether the new Atomic Spins Matrix and the old are the same to GPU memory.
int same = 1;
int *same_d;
gpuErrchk( cudaMalloc((void **) &same_d, sizeof(int)) );
gpuErrchk( cudaMemcpy(same_d, &same, sizeof(int), cudaMemcpyHostToDevice) );
// Creates a temporary variable for Atomic Spin Matrices' pointers swapping and allocates it to GPU memory.
int *temp_d;
// Checks if function has to be iterated.
for (int i=0; i<k; i++) {
// Calls the kernel function balancing load to (n/threadsNum)^2 blocks with threadsNum threads each.
// Each thread calculates threadsNum spins.
//! User has to specify numbers fitting the data correctly (sqrt(gridDim) * blockDim = n).
spin<<<n/threadsNum*n/threadsNum,threadsNum>>>(G_d, w_d, newG_d, n);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
check<<<n/threadsNum*n/threadsNum,threadsNum>>>(G_d, newG_d, n, same_d);
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
gpuErrchk( cudaMemcpy(&same, same_d, sizeof(int), cudaMemcpyDeviceToHost) );
if (same)
break;
// Atomix Spin Matrices' pointers swapping.
temp_d = G_d;
G_d = newG_d;
newG_d = temp_d;
}
// Copies data from GPU to CPU memory.
gpuErrchk( cudaMemcpy(G, G_d, G_size, cudaMemcpyDeviceToHost) );
// Cleanup.
gpuErrchk( cudaFree(w_d) );
gpuErrchk( cudaFree(G_d) );
gpuErrchk( cudaFree(newG_d) );
}
int main() {
// Weight Matrix.
double w[] = { 0.004, 0.016, 0.026, 0.016, 0.004,
0.016, 0.071, 0.117, 0.071, 0.016,
0.026, 0.117, 0.000, 0.117, 0.026,
0.016, 0.071, 0.117, 0.071, 0.016,
0.004, 0.016, 0.026, 0.016, 0.004 };
// Number of dimensions for the square Atomic Spins Matrix.
int n = N;
// Allocates memory for the Atomic Spins Matrix.
int *G = (int *)malloc(n*n * sizeof(int));
// Randomizes seed.
srand(time(NULL));
// Fills the Atomic Spins Matrix with "-1" and "1" values from a uniform distribution.
for (int i=0; i<n*n; i++)
G[i] = ((rand() % 2) * 2) - 1;
/*
// Reads configuration file.
size_t readStatus;
FILE *conf_init = fopen("conf-init.bin","rb");
int initG[n*n];
readStatus = fread(&initG, sizeof(int), n*n, conf_init);
if (readStatus != n*n)
printf("Could not read conf-init.bin file.\n");
fclose(conf_init);
// Fills the Atomic Spins Matrix with "-1" and "1" values from configuration file.
for (int i=0; i<n*n; i++)
G[i] = initG[i];
*/
ising(G, w, 10, n);
/*
// Reads configuration file for state after one iteration.
size_t readStatus1;
FILE *conf_1 = fopen("conf-1.bin","rb");
int G1[n*n];
readStatus1 = fread(&G1, sizeof(int), n*n, conf_1);
if (readStatus1 != n*n)
printf("Could not read conf-1.bin file.\n");
fclose(conf_1);
// Checks for errors.
int errorsNum = 0;
for (int i=0; i<n; i++)
for (int j=0; j<n; j++)
if (G[i*n+j] != G1[i*n+j])
errorsNum++;
if (errorsNum == 0)
printf("Correct Results!\n");
else
printf("Wrong Results. Number of errors: %d\n", errorsNum);
// Checks the results.
for (int i=0; i<n; i++) {
for (int j=0; j<n; j++) {
if (G[i*n+j] == G1[i*n+j])
printf("=");
else
printf("!");
}
printf("\n");
}
printf("\n\n");
*/
return 0;
}
|
04f3ec48bde3266b78f7b06889f5138790d4c92d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "j3d27pt-32x32-4-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(1, __reg_4_1);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(2, __reg_4_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(3, __reg_4_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(4, __reg_4_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(4, __reg_4_1);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 4, __reg_4_1);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 3, __reg_4_0);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 2, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0);
__STORE(__h - 1, __reg_4_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 3, __reg_4_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 2, __reg_4_1);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 1, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1);
__STORE(__h + 0, __reg_4_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 3, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 2, __reg_4_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 1, __reg_4_2);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h + 0, __reg_4_0);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2);
__STORE(__h + 1, __reg_4_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 4, __reg_4_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 4, __reg_4_1);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(1, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__STORE(__h + 1, __reg_3_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(1, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__STORE(__h + 1, __reg_2_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
}
}
|
04f3ec48bde3266b78f7b06889f5138790d4c92d.cu
|
#include "j3d27pt-32x32-4-128_kernel.hu"
__device__ float __sbref_wrap(float *sb, size_t index) { return sb[index]; }
__global__ void kernel0_4(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 4;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 24;
const AN5D_TYPE __side3Len = 24;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
float __reg_4_0;
float __reg_4_1;
float __reg_4_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __writeValid4 = __updateValid && __local_c2 >= (__halo2 * 4) && __local_c2 < __side2LenOl - (__halo2 * 4) && __local_c3 >= (__halo3 * 4) && __local_c3 < __side3LenOl - (__halo3 * 4);
const AN5D_TYPE __storeValid = __writeValid4;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC4(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid4) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(1, __reg_4_1);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(2, __reg_4_2);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(3, __reg_4_0);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(4, __reg_4_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__LOAD(__reg_0, 7);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__LOAD(__reg_0, 8);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(4, __reg_4_1);
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 9; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 4, __reg_4_1);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 3, __reg_4_0);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 2, __reg_4_1);
__reg_3_0 = __reg_2_0;
__CALC4(__reg_4_1, __reg_4_1, __reg_4_2, __reg_3_0);
__STORE(__h - 1, __reg_4_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 3, __reg_4_0);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 2, __reg_4_1);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 1, __reg_4_2);
__reg_3_1 = __reg_2_1;
__CALC4(__reg_4_2, __reg_4_2, __reg_4_0, __reg_3_1);
__STORE(__h + 0, __reg_4_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 3, __reg_4_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 2, __reg_4_1);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 1, __reg_4_2);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h + 0, __reg_4_0);
__reg_3_2 = __reg_2_2;
__CALC4(__reg_4_0, __reg_4_0, __reg_4_1, __reg_3_2);
__STORE(__h + 1, __reg_4_1);
}
}
else
{
for (__h = 9; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 4, __reg_4_1);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__CALC4(__reg_4_1, __reg_4_0, __reg_4_2, __reg_3_0);
__STORE(__h - 4, __reg_4_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__CALC4(__reg_4_2, __reg_4_1, __reg_4_0, __reg_3_1);
__STORE(__h - 4, __reg_4_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__CALC4(__reg_4_0, __reg_4_2, __reg_4_1, __reg_3_2);
__STORE(__h - 4, __reg_4_1);
__h++;
}
}
__global__ void kernel0_3(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 3;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 26;
const AN5D_TYPE __side3Len = 26;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
float __reg_3_0;
float __reg_3_1;
float __reg_3_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __writeValid3 = __updateValid && __local_c2 >= (__halo2 * 3) && __local_c2 < __side2LenOl - (__halo2 * 3) && __local_c3 >= (__halo3 * 3) && __local_c3 < __side3LenOl - (__halo3 * 3);
const AN5D_TYPE __storeValid = __writeValid3;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC3(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid3) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(1, __reg_3_1);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(2, __reg_3_2);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__LOAD(__reg_0, 5);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__LOAD(__reg_0, 6);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(3, __reg_3_0);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 0;
if (__c1Id == __side1Num - 1)
{
for (__h = 7; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_2_1 = __reg_1_1;
__CALC3(__reg_3_2, __reg_3_2, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_2_2 = __reg_1_2;
__CALC3(__reg_3_0, __reg_3_0, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 2, __reg_3_2);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 1, __reg_3_0);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h + 0, __reg_3_1);
__reg_2_0 = __reg_1_0;
__CALC3(__reg_3_1, __reg_3_1, __reg_3_2, __reg_2_0);
__STORE(__h + 1, __reg_3_2);
}
}
else
{
for (__h = 7; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__CALC3(__reg_3_0, __reg_3_2, __reg_3_1, __reg_2_2);
__STORE(__h - 3, __reg_3_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__CALC3(__reg_3_1, __reg_3_0, __reg_3_2, __reg_2_0);
__STORE(__h - 3, __reg_3_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__CALC3(__reg_3_2, __reg_3_1, __reg_3_0, __reg_2_1);
__STORE(__h - 3, __reg_3_0);
__h++;
}
}
__global__ void kernel0_2(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 2;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 28;
const AN5D_TYPE __side3Len = 28;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
float __reg_2_0;
float __reg_2_1;
float __reg_2_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __writeValid2 = __updateValid && __local_c2 >= (__halo2 * 2) && __local_c2 < __side2LenOl - (__halo2 * 2) && __local_c3 >= (__halo3 * 2) && __local_c3 < __side3LenOl - (__halo3 * 2);
const AN5D_TYPE __storeValid = __writeValid2;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __CALC2(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid2) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(1, __reg_2_1);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__LOAD(__reg_0, 3);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__LOAD(__reg_0, 4);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(2, __reg_2_2);
__DB_SWITCH(); __syncthreads();
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 5; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__reg_1_2 = __reg_0;
__CALC2(__reg_2_0, __reg_2_0, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__reg_1_0 = __reg_0;
__CALC2(__reg_2_1, __reg_2_1, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 1, __reg_2_1);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h + 0, __reg_2_2);
__reg_1_1 = __reg_0;
__CALC2(__reg_2_2, __reg_2_2, __reg_2_0, __reg_1_1);
__STORE(__h + 1, __reg_2_0);
}
}
else
{
for (__h = 5; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__CALC2(__reg_2_2, __reg_2_1, __reg_2_0, __reg_1_1);
__STORE(__h - 2, __reg_2_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__CALC2(__reg_2_0, __reg_2_2, __reg_2_1, __reg_1_2);
__STORE(__h - 2, __reg_2_1);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__CALC2(__reg_2_1, __reg_2_0, __reg_2_2, __reg_1_0);
__STORE(__h - 2, __reg_2_2);
__h++;
}
}
__global__ void kernel0_1(float *A, int dimsize, int timestep, int c0)
{
#ifndef AN5D_TYPE
#define AN5D_TYPE unsigned
#endif
const AN5D_TYPE __c0Len = (timestep - 0);
const AN5D_TYPE __c0Pad = (0);
#define __c0 c0
const AN5D_TYPE __c1Len = (dimsize - 1 - 1);
const AN5D_TYPE __c1Pad = (1);
#define __c1 c1
const AN5D_TYPE __c2Len = (dimsize - 1 - 1);
const AN5D_TYPE __c2Pad = (1);
#define __c2 c2
const AN5D_TYPE __c3Len = (dimsize - 1 - 1);
const AN5D_TYPE __c3Pad = (1);
#define __c3 c3
const AN5D_TYPE __halo1 = 1;
const AN5D_TYPE __halo2 = 1;
const AN5D_TYPE __halo3 = 1;
const AN5D_TYPE __side0Len = 1;
const AN5D_TYPE __side1Len = 128;
const AN5D_TYPE __side2Len = 30;
const AN5D_TYPE __side3Len = 30;
const AN5D_TYPE __OlLen1 = (__halo1 * __side0Len);
const AN5D_TYPE __OlLen2 = (__halo2 * __side0Len);
const AN5D_TYPE __OlLen3 = (__halo3 * __side0Len);
const AN5D_TYPE __side1LenOl = (__side1Len + 2 * __OlLen1);
const AN5D_TYPE __side2LenOl = (__side2Len + 2 * __OlLen2);
const AN5D_TYPE __side3LenOl = (__side3Len + 2 * __OlLen3);
const AN5D_TYPE __blockSize = 1 * __side2LenOl * __side3LenOl;
const AN5D_TYPE __side1Num = (__c1Len + __side1Len - 1) / __side1Len;
const AN5D_TYPE __side2Num = (__c2Len + __side2Len - 1) / __side2Len;
const AN5D_TYPE __side3Num = (__c3Len + __side3Len - 1) / __side3Len;
const AN5D_TYPE __tid = threadIdx.y * blockDim.x + threadIdx.x;
const AN5D_TYPE __local_c2 = __tid / __side3LenOl;
const AN5D_TYPE __local_c3 = __tid % __side3LenOl;
const AN5D_TYPE __c1Id = blockIdx.x / __side2Num / __side3Num;
const AN5D_TYPE __c2 = (blockIdx.x / __side3Num % __side2Num) * __side2Len + __local_c2 + __c2Pad - __OlLen2;
const AN5D_TYPE __c3 = (blockIdx.x % __side3Num) * __side3Len + __local_c3 + __c3Pad - __OlLen3;
float __reg_0;
float __reg_1_0;
float __reg_1_1;
float __reg_1_2;
__shared__ float __a_sb_double[__blockSize * 2];
float *__a_sb = __a_sb_double;
const AN5D_TYPE __loadValid = 1 && __c2 >= __c2Pad - __halo2 && __c2 < __c2Pad + __c2Len + __halo2 && __c3 >= __c3Pad - __halo3 && __c3 < __c3Pad + __c3Len + __halo3;
const AN5D_TYPE __updateValid = 1 && __c2 >= __c2Pad && __c2 < __c2Pad + __c2Len && __c3 >= __c3Pad && __c3 < __c3Pad + __c3Len;
const AN5D_TYPE __writeValid1 = __updateValid && __local_c2 >= (__halo2 * 1) && __local_c2 < __side2LenOl - (__halo2 * 1) && __local_c3 >= (__halo3 * 1) && __local_c3 < __side3LenOl - (__halo3 * 1);
const AN5D_TYPE __storeValid = __writeValid1;
AN5D_TYPE __c1;
AN5D_TYPE __h;
const AN5D_TYPE __c1Pad2 = __c1Pad + __side1Len * __c1Id;
#define __LOAD(reg, h) do { if (__loadValid) { __c1 = __c1Pad2 - __halo1 + h; reg = A[(((__c0 % 2) * dimsize + __c1) * dimsize + __c2) * dimsize + __c3]; }} while (0)
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_0_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((((((((((1.500f * (__REGREF(__a, 0, 0))) + (0.500f * (__SBREF(__a_sb, -1, -1)))) + (0.700f * (__SBREF(__a_sb, -1, 0)))) + (0.900f * (__SBREF(__a_sb, -1, 1)))) + (1.200f * (__SBREF(__a_sb, 0, -1)))) + (1.201f * (__SBREF(__a_sb, 0, 1)))) + (0.901f * (__SBREF(__a_sb, 1, -1)))) + (0.701f * (__SBREF(__a_sb, 1, 0)))) + (0.501f * (__SBREF(__a_sb, 1, 1)))))))))))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_0(out, a) do { __CALCEXPR_0_wrap(out, a); } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_1_wrap(__rn0, __a) do { __rn0 = ((((((((((((((((((((1.510f * (__REGREF(__a, 0, 0)))) + (0.510f * (__SBREF(__a_sb, -1, -1)))) + (0.710f * (__SBREF(__a_sb, -1, 0)))) + (0.910f * (__SBREF(__a_sb, -1, 1)))) + (1.210f * (__SBREF(__a_sb, 0, -1)))) + (1.211f * (__SBREF(__a_sb, 0, 1)))) + (0.911f * (__SBREF(__a_sb, 1, -1)))) + (0.711f * (__SBREF(__a_sb, 1, 0)))) + (0.511f * (__SBREF(__a_sb, 1, 1))))))))))))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_1(out, a) do { float etmp; __CALCEXPR_1_wrap(etmp, a); out += etmp; } while (0);
#define __DEST (A[((((c0 + 1) % 2) * dimsize + c1) * dimsize + c2) * dimsize + c3])
#define __REGREF(reg, i2, i3) reg
#define __SBREF(sb, i2, i3) __sbref_wrap(sb, (int)__tid + i2 * (int)__side3LenOl + i3)
#define __CALCEXPR_2_wrap(__rn0, __a) do { __rn0 = (((((((((((1.520f * (__REGREF(__a, 0, 0)))) + (0.520f * (__SBREF(__a_sb, -1, -1)))) + (0.720f * (__SBREF(__a_sb, -1, 0)))) + (0.920f * (__SBREF(__a_sb, -1, 1)))) + (1.220f * (__SBREF(__a_sb, 0, -1)))) + (1.221f * (__SBREF(__a_sb, 0, 1)))) + (0.921f * (__SBREF(__a_sb, 1, -1)))) + (0.721f * (__SBREF(__a_sb, 1, 0)))) + (0.521f * (__SBREF(__a_sb, 1, 1)))) / 159); } while (0)
#define __DB_SWITCH() do { __a_sb = &__a_sb_double[(__a_sb == __a_sb_double) ? __blockSize : 0]; } while (0)
#define __CALCSETUP(a) do { __DB_SWITCH(); __a_sb[__tid] = a; __syncthreads(); } while (0)
#define __CALCEXPR_2(out, a) do { float etmp; __CALCEXPR_2_wrap(etmp, a); out += etmp; } while (0);
#define __CALCEXPR(out0, out1, out2, reg) do { __CALCEXPR_0(out0, reg); __CALCEXPR_1(out1, reg); __CALCEXPR_2(out2, reg); } while (0);
#define __CALC1(out0, out1, out2, reg) do { __CALCSETUP(reg); if (__writeValid1) { __CALCEXPR(out0, out1, out2, reg); } else out1 = reg; } while (0)
#define __STORE(h, out) do { if (__storeValid) { __c1 = __c1Pad2 - __halo1 + h; __DEST = out; }} while (0)
if (__c1Id == 0)
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
else
{
__LOAD(__reg_0, 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__LOAD(__reg_0, 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__LOAD(__reg_0, 2);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(1, __reg_1_1);
}
__a_sb = __a_sb_double + __blockSize * 1;
if (__c1Id == __side1Num - 1)
{
for (__h = 3; __h <= __c1Len - __side1Len * __c1Id + __halo1 * 2 - 4;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (0) {}
else if (__h + 1 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_1, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
}
else if (__h + 2 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_2, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
}
else if (__h + 3 == __c1Len - __side1Len * __c1Id + __halo1 * 2)
{
__LOAD(__reg_0, __h + 0);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__LOAD(__reg_0, __h + 1);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h + 0, __reg_1_0);
__LOAD(__reg_0, __h + 2);
__CALC1(__reg_1_0, __reg_1_0, __reg_1_1, __reg_0);
__STORE(__h + 1, __reg_1_1);
}
}
else
{
for (__h = 3; __h <= __side1LenOl - 3;)
{
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
__DB_SWITCH(); __syncthreads();
}
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_1, __reg_1_0, __reg_1_2, __reg_0);
__STORE(__h - 1, __reg_1_2);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_2, __reg_1_1, __reg_1_0, __reg_0);
__STORE(__h - 1, __reg_1_0);
__h++;
if (__h == __side1LenOl) return;
__LOAD(__reg_0, __h);
__CALC1(__reg_1_0, __reg_1_2, __reg_1_1, __reg_0);
__STORE(__h - 1, __reg_1_1);
__h++;
}
}
|
24abbfab4fe8b678d69545709527fd2bf1be352a.hip
|
// !!! This is a file automatically generated by hipify!!!
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere
architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4.
Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of
meta data is different for every data types. CUTLASS templates can automatically infer it based on
input A and B. Check code below.
Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers
efficiently.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_sparse.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = int32_t; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B
using ElementOutput = int32_t; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 3;
using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
// Data type and layout of meta data matrix E can be inferred from template Gemm.
using ElementInputE = typename Gemm::ElementE;
using LayoutInputE = typename Gemm::LayoutE;
// Blow property is defined in include/cutlass/arch/sp_mma_sm80.h
// 50% Sparsity on Ampere
constexpr int kSparse = Gemm::kSparse;
// How many elements of A are covered per ElementE
constexpr int kElementsPerElementE = Gemm::kElementsPerElementE;
// The size of individual meta data
constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits;
int run() {
const int length_m = 512;
const int length_n = 512;
const int length_k = 1024;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2)
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed(
problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing.
cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Same size as the above. The above one needs to be reordered and stored in this one.
cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e_reordered(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(1),
ElementInputA(-1),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(1),
ElementInputB(-1),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(1),
ElementOutput(-1),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_e.host_view(),
1,
kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core
// instructions.
cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / kSparse / kElementsPerElementE});
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_e_reordered.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
tensor_e.device_ref(), // <- reference to matrix E on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// uncompress tensor_a based on meta data tensor_e. We need it for reference computing.
cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(),
tensor_e.host_ref(), problem_size.m(), problem_size.k());
// Create instantiation for host reference gemm kernel
cutlass::reference::host::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue,
typename Gemm::Operator>
gemm_host;
// Launch host reference gemm kernel
gemm_host(problem_size,
alpha,
tensor_a_uncompressed.host_ref(),
tensor_b.host_ref(),
beta,
tensor_c.host_ref(),
tensor_ref_d.host_ref());
// Copy output data from CUTLASS host for comparison
tensor_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
bool notSupported = false;
// Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.1.
//
// CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl;
notSupported = true;
}
hipDeviceProp_t props;
hipError_t error = hipGetDeviceProperties(&props, 0);
if (error != hipSuccess) {
std::cerr << "hipGetDeviceProperties() returned an error: " << hipGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
|
24abbfab4fe8b678d69545709527fd2bf1be352a.cu
|
/***************************************************************************************************
* Copyright (c) 2017-2021, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without modification, are permitted
* provided that the following conditions are met:
* * Redistributions of source code must retain the above copyright notice, this list of
* conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright notice, this list of
* conditions and the following disclaimer in the documentation and/or other materials
* provided with the distribution.
* * Neither the name of the NVIDIA CORPORATION nor the names of its contributors may be used
* to endorse or promote products derived from this software without specific prior written
* permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
* FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NVIDIA CORPORATION BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
* OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
* STRICT LIABILITY, OR TOR (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
**************************************************************************************************/
/**
Please check example 07, 08 and 17 for the basics of dense tensor op gemm kernels. NVIDIA Ampere
architecture also supports structured sparse tensor op for tf32, fp16, int8 and int4.
Sparse GEMM kernels needs to takes an additional E matrix which stores the meta data. The format of
meta data is different for every data types. CUTLASS templates can automatically infer it based on
input A and B. Check code below.
Moreover, matrix E needs to be preprocessed so that it can use ldmatrix to load into the registers
efficiently.
*/
#include <iostream>
#include "cutlass/cutlass.h"
#include "cutlass/gemm/device/gemm_sparse.h"
#include "cutlass/util/host_tensor.h"
#include "cutlass/util/reference/host/gemm.h"
#include "cutlass/util/host_reorder.h"
#include "cutlass/util/host_uncompress.h"
#include "cutlass/util/reference/host/tensor_compare.h"
#include "cutlass/util/reference/host/tensor_copy.h"
#include "cutlass/util/reference/host/tensor_fill.h"
#include "cutlass/util/tensor_view_io.h"
#include "helper.h"
// The code section below describes datatype for input, output matrices and computation between
// elements in input matrices.
using ElementAccumulator = int32_t; // <- data type of accumulator
using ElementComputeEpilogue = ElementAccumulator; // <- data type of epilogue operations
using ElementInputA = cutlass::int4b_t; // <- data type of elements in input matrix A
using ElementInputB = cutlass::int4b_t; // <- data type of elements in input matrix B
using ElementOutput = int32_t; // <- data type of elements in output matrix D
// The code section below describes matrix layout of input and output matrices. Column Major for
// Matrix A, Row Major for Matrix B and Row Major for Matrix C
using LayoutInputA = cutlass::layout::RowMajor;
using LayoutInputB = cutlass::layout::ColumnMajor;
using LayoutOutput = cutlass::layout::RowMajor;
// This code section describes whether you want to use tensor cores or regular SIMT cores on GPU SM
using MMAOp = cutlass::arch::OpClassTensorOp;
// This code section describes CUDA SM architecture number
using SmArch = cutlass::arch::Sm80;
// This code section describes the tile size a thread block will compute
using ShapeMMAThreadBlock =
cutlass::gemm::GemmShape<128, 128, 256>; // <- threadblock tile M = 128, N = 128, K = 256
// This code section describes tile size a warp will compute
using ShapeMMAWarp = cutlass::gemm::GemmShape<64, 64, 256>; // <- warp tile M = 64, N = 64, K = 256
// This code section describes the size of MMA op
using ShapeMMAOp = cutlass::gemm::GemmShape<16, 8, 128>; // <- MMA Op tile M = 16, N = 8, K = 128
// This code section describes how threadblocks are scheduled on GPU
using SwizzleThreadBlock = cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>; // <- ??
// This code section describes the epilogue part of the kernel
using EpilogueOp = cutlass::epilogue::thread::LinearCombination<
ElementOutput, // <- data type of output matrix
128 / cutlass::sizeof_bits<ElementOutput>::value, // <- the number of elements per vectorized
// memory access. For a byte, it's 16
// elements. This becomes the vector width of
// math instructions in the epilogue too
ElementAccumulator, // <- data type of accumulator
ElementComputeEpilogue>; // <- data type for alpha/beta in linear combination function
// Number of pipelines you want to use
constexpr int NumStages = 3;
using Gemm = cutlass::gemm::device::SparseGemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementAccumulator,
MMAOp,
SmArch,
ShapeMMAThreadBlock,
ShapeMMAWarp,
ShapeMMAOp,
EpilogueOp,
SwizzleThreadBlock,
NumStages>;
// Data type and layout of meta data matrix E can be inferred from template Gemm.
using ElementInputE = typename Gemm::ElementE;
using LayoutInputE = typename Gemm::LayoutE;
// Blow property is defined in include/cutlass/arch/sp_mma_sm80.h
// 50% Sparsity on Ampere
constexpr int kSparse = Gemm::kSparse;
// How many elements of A are covered per ElementE
constexpr int kElementsPerElementE = Gemm::kElementsPerElementE;
// The size of individual meta data
constexpr int kMetaSizeInBits = Gemm::kMetaSizeInBits;
int run() {
const int length_m = 512;
const int length_n = 512;
const int length_k = 1024;
// Create a tuple of problem size for matrix multiplication
cutlass::gemm::GemmCoord problem_size(length_m, length_n, length_k);
// Initialize tensors using CUTLASS helper functions
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse)); // <- Create matrix A with dimensions M x (K / 2)
cutlass::HostTensor<ElementInputA, LayoutInputA> tensor_a_uncompressed(
problem_size.mk()); // <- Create uncompressed matrix A with dimensions M x K for reference computing
cutlass::HostTensor<ElementInputB, LayoutInputB> tensor_b(
problem_size.kn()); // <- Create matrix B with dimensions K x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_c(
problem_size.mn()); // <- Create matrix C with dimensions M x N
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// CUTLASS kernel
cutlass::HostTensor<ElementOutput, LayoutOutput> tensor_ref_d(
problem_size.mn()); // <- Create matrix D with dimensions M x N used to store output from
// reference kernel
// Create matrix E with dimensions M x (K / 2 / kElementsPerElementE). This one is used by reference computing.
cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Same size as the above. The above one needs to be reordered and stored in this one.
cutlass::HostTensor<ElementInputE, LayoutInputE> tensor_e_reordered(
cutlass::make_Coord(problem_size.m(), problem_size.k() / kSparse / kElementsPerElementE));
// Fill input and output matrices on host using CUTLASS helper functions
cutlass::reference::host::TensorFillRandomUniform(
tensor_a.host_view(),
1,
ElementInputA(1),
ElementInputA(-1),
0); // <- Fill matrix A on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_b.host_view(),
1,
ElementInputB(1),
ElementInputB(-1),
0); // <- Fill matrix B on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomUniform(
tensor_c.host_view(),
1,
ElementOutput(1),
ElementOutput(-1),
0); // <- Fill matrix C on host with uniform-distribution random data
cutlass::reference::host::TensorFillRandomSparseMeta(
tensor_e.host_view(),
1,
kMetaSizeInBits); // <- Fill matrix E on host with uniform-distribution random meta data
cutlass::reference::host::TensorFill(
tensor_d.host_view()); // <- fill matrix D on host with zeros
cutlass::reference::host::TensorFill(
tensor_ref_d.host_view()); // <- fill matrix D for reference on host with zeros
// Reorder the meta data matrix so that we can use ldmatrix to load them to tensor core
// instructions.
cutlass::reorder_meta(tensor_e_reordered.host_ref(), tensor_e.host_ref(),
{problem_size.m(), problem_size.n(),
problem_size.k() / kSparse / kElementsPerElementE});
// Copy data from host to GPU
tensor_a.sync_device();
tensor_b.sync_device();
tensor_c.sync_device();
tensor_d.sync_device();
tensor_e_reordered.sync_device();
tensor_ref_d.sync_device();
// Initialize alpha and beta for dot product computation
ElementComputeEpilogue alpha = ElementComputeEpilogue(1);
ElementComputeEpilogue beta = ElementComputeEpilogue(0);
// Split K dimension into 1 partitions
int split_k_slices = 1;
// Create a tuple of gemm kernel arguments. This is later passed as arguments to launch
// instantiated CUTLASS kernel
typename Gemm::Arguments arguments{problem_size, // <- problem size of matrix multiplication
tensor_a.device_ref(), // <- reference to matrix A on device
tensor_b.device_ref(), // <- reference to matrix B on device
tensor_c.device_ref(), // <- reference to matrix C on device
tensor_d.device_ref(), // <- reference to matrix D on device
tensor_e.device_ref(), // <- reference to matrix E on device
{alpha, beta}, // <- tuple of alpha and beta
split_k_slices}; // <- k-dimension split factor
// Using the arguments, query for extra workspace required for matrix multiplication computation
size_t workspace_size = Gemm::get_workspace_size(arguments);
// Allocate workspace memory
cutlass::device_memory::allocation<uint8_t> workspace(workspace_size);
// Instantiate CUTLASS kernel depending on templates
Gemm gemm_op;
// Check the problem size is supported or not
cutlass::Status status = gemm_op.can_implement(arguments);
CUTLASS_CHECK(status);
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, workspace.get());
CUTLASS_CHECK(status);
// Launch initialized CUTLASS kernel
status = gemm_op();
CUTLASS_CHECK(status);
// uncompress tensor_a based on meta data tensor_e. We need it for reference computing.
cutlass::uncompress(tensor_a_uncompressed.host_ref(), tensor_a.host_ref(),
tensor_e.host_ref(), problem_size.m(), problem_size.k());
// Create instantiation for host reference gemm kernel
cutlass::reference::host::Gemm<ElementInputA,
LayoutInputA,
ElementInputB,
LayoutInputB,
ElementOutput,
LayoutOutput,
ElementComputeEpilogue,
ElementComputeEpilogue,
typename Gemm::Operator>
gemm_host;
// Launch host reference gemm kernel
gemm_host(problem_size,
alpha,
tensor_a_uncompressed.host_ref(),
tensor_b.host_ref(),
beta,
tensor_c.host_ref(),
tensor_ref_d.host_ref());
// Copy output data from CUTLASS host for comparison
tensor_d.sync_host();
// Check if output from CUTLASS kernel and reference kernel are equal or not
bool passed = cutlass::reference::host::TensorEquals(
tensor_d.host_view(),
tensor_ref_d.host_view());
std::cout << (passed ? "Passed" : "Failed") << std::endl;
return (passed ? 0 : -1);
}
int main() {
bool notSupported = false;
// Ampere Sparse Tensor Core operations exposed with mma.sync and ldmatrix are first available
// in CUDA 11.1.
//
// CUTLASS must be compiled with CUDA 11.1 Toolkit to run these examples.
if (!(__CUDACC_VER_MAJOR__ > 11 || (__CUDACC_VER_MAJOR__ == 11 && __CUDACC_VER_MINOR__ >= 1))) {
std::cerr << "Ampere Tensor Core operations must be compiled with CUDA 11.1 Toolkit or later." << std::endl;
notSupported = true;
}
cudaDeviceProp props;
cudaError_t error = cudaGetDeviceProperties(&props, 0);
if (error != cudaSuccess) {
std::cerr << "cudaGetDeviceProperties() returned an error: " << cudaGetErrorString(error) << std::endl;
return -1;
}
if (!((props.major * 10 + props.minor) >= 80)) {
std::cerr << "Ampere Tensor Core operations must be run on a machine with compute capability at least 80."
<< std::endl;
notSupported = true;
}
if (notSupported) {
// Returning zero so this test passes on older Toolkits. Its actions are no-op.
return 0;
}
return run();
}
|
d72e13eddeba5d1d56bf2e33f33ad964083a23fc.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <type_traits>
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_cuda_utils.h"
namespace paddle {
namespace operators {
namespace math {
// NOTE(chenfeiyu): explicitly use operator+ for float2
// since float2 is not in namespace phi::funcs, ADL won't help
using phi::funcs::operator+;
template <typename T>
__device__ __forceinline__ T local_rsqrt(T num) {
return rsqrt(static_cast<float>(num));
}
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
__device__ __forceinline__ half local_rsqrt(half num) { return hrsqrt(num); }
#endif
template <typename T, int TPB>
__device__ inline void LayerNormSmall(T val,
const phi::funcs::kvp<T> &thread_data,
const int ld,
const int idx,
const T *bias,
const T *scale,
T *output,
T eps) {
using BlockReduce = hipcub::BlockReduce<phi::funcs::kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, hipcub::Sum());
if (threadIdx.x == 0) {
mu = sum_kv.key;
rsigma = local_rsqrt(sum_kv.value - mu * mu + eps);
}
__syncthreads();
if (threadIdx.x < ld) {
const T g(scale[threadIdx.x]);
const T b(bias[threadIdx.x]);
output[idx] = g * (val - mu) * rsigma + b;
}
}
template <typename T, int TPB>
__device__ inline void LayerNorm(const phi::funcs::kvp<T> &thread_data,
const int ld,
const int offset,
const T *bias,
const T *scale,
T *output,
T eps) {
using BlockReduce = hipcub::BlockReduce<phi::funcs::kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, hipcub::Sum());
if (threadIdx.x == 0) {
mu = sum_kv.key;
rsigma = local_rsqrt(sum_kv.value - mu * mu + eps);
}
__syncthreads();
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
const T val = output[idx];
const T g(scale[i]);
const T b(bias[i]);
output[idx] = g * (val - mu) * rsigma + b;
}
}
template <typename T, typename T2, int TPB>
__device__ inline void LayerNorm2(const phi::funcs::kvp<T> &thread_data,
const int ld,
const int offset,
const T2 *bias,
const T2 *scale,
T2 *output,
T eps) {
using BlockReduce = hipcub::BlockReduce<phi::funcs::kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, hipcub::Sum());
if (threadIdx.x == 0) {
mu = sum_kv.key;
rsigma = local_rsqrt(sum_kv.value - mu * mu + eps);
}
__syncthreads();
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
T2 val = output[idx];
const T2 g = scale[i];
const T2 b = bias[i];
val.x = T(g.x) * (val.x - mu) * rsigma + T(b.x);
val.y = T(g.y) * (val.y - mu) * rsigma + T(b.y);
output[idx] = val;
}
}
template <typename T, unsigned TPB>
__global__ void EmbEltwiseLayernormKernel(int hidden,
const int64_t *ids,
const T *scale,
const T *bias,
const int64_t *embs,
T *output,
T eps,
int input_num) {
hipcub::Sum pair_sum;
// blockIdx.x: position in the sequence
// blockIdx.y: batch
// gridDim.x: Seq
// gridDim.y: Batch
extern __shared__ int64_t array_id[];
const T rhidden = T(1.f) / T(hidden);
const int64_t seq_pos = blockIdx.y + blockIdx.x * gridDim.y;
if (threadIdx.x == 0) {
for (int i = 0; i < input_num; ++i) {
const int64_t *ids_p = reinterpret_cast<const int64_t *>(ids[i]);
array_id[i] = ids_p[seq_pos];
}
}
__syncthreads();
const int64_t out_offset = seq_pos * hidden;
phi::funcs::kvp<T> thread_data(0, 0);
#pragma unroll
for (int it = threadIdx.x; it < hidden; it += TPB) {
T val = 0;
for (int i = 0; i < input_num; ++i) {
val += reinterpret_cast<const T *>(embs[i])[array_id[i] * hidden + it];
}
output[out_offset + it] = val;
const T rhiddenval = rhidden * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<T>(rhiddenval, rhiddenval * val));
}
LayerNorm<T, TPB>(thread_data, hidden, out_offset, bias, scale, output, eps);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__ // @{ Half kernel: EmbEltwiseLayernormKernel
template <>
__global__ void EmbEltwiseLayernormKernel<half, 256>(int hidden,
const int64_t *ids,
const half *scale,
const half *bias,
const int64_t *embs,
half *output,
half eps,
int input_num) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
hipcub::Sum pair_sum;
// blockIdx.x: position in the sequence
// blockIdx.y: batch
// gridDim.x: Seq
// gridDim.y: Batch
extern __shared__ int64_t array_id[];
const half rhidden = half(1.f) / half(hidden);
const int64_t seq_pos = blockIdx.y + blockIdx.x * gridDim.y;
if (threadIdx.x == 0) {
for (int i = 0; i < input_num; ++i) {
const int64_t *ids_p = reinterpret_cast<const int64_t *>(ids[i]);
array_id[i] = ids_p[seq_pos];
}
}
__syncthreads();
const int64_t out_offset = seq_pos * hidden;
phi::funcs::kvp<half> thread_data(0, 0);
#pragma unroll
for (int it = threadIdx.x; it < hidden; it += 256) {
half val = 0;
for (int i = 0; i < input_num; ++i) {
val += reinterpret_cast<const half *>(embs[i])[array_id[i] * hidden + it];
}
output[out_offset + it] = val;
const half rhiddenval = rhidden * val;
thread_data = pair_sum(thread_data,
phi::funcs::kvp<half>(rhiddenval, rhiddenval * val));
}
LayerNorm<half, 256>(
thread_data, hidden, out_offset, bias, scale, output, eps);
#endif
}
#endif // @} End Half kernel: EmbEltwiseLayernormKernel
template <typename T>
void EmbEltwiseLayerNormFunctor<T>::operator()(int batch,
int seq_len,
int hidden,
const int64_t *ids,
const T *scale,
const T *bias,
const int64_t *embs,
T *output,
float eps,
int input_num,
gpuStream_t stream) {
const unsigned tpb = 256;
const dim3 grid(seq_len, batch, 1);
const dim3 block(tpb, 1, 1);
int shared_bytes = input_num * sizeof(int64_t);
hipLaunchKernelGGL(( EmbEltwiseLayernormKernel<T, tpb>), dim3(grid), dim3(block), shared_bytes, stream,
hidden, ids, scale, bias, embs, output, eps, input_num);
}
template class EmbEltwiseLayerNormFunctor<float>;
// device function 'operator()' is not supportted until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 10000
template class EmbEltwiseLayerNormFunctor<half>;
#endif
template <typename T>
__global__ void SoftmaxKernelWithEltadd(T *qk_buf_,
const T *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float tmp = threadIdx.x < seq_len
? static_cast<float>(qk_buf_[threadIdx.x + qk_offset] +
bias_qk_[threadIdx.x + qk_offset])
: -1e20f;
float max_val = phi::funcs::BlockReduceMax<float>(tmp, mask);
float qk_tmp = threadIdx.x < seq_len ? __expf(tmp - max_val) : 0.0f;
float sum_val = phi::funcs::BlockReduceSum<float>(qk_tmp, mask);
if (threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / sum_val);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__
#ifndef __HIPCC__ // @{ Half kernel: SoftmaxKernelWithEltadd
template <>
__global__ void SoftmaxKernelWithEltadd<half>(half *qk_buf_,
const half *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float tmp = threadIdx.x < seq_len
? static_cast<float>(qk_buf_[threadIdx.x + qk_offset] +
bias_qk_[threadIdx.x + qk_offset])
: -1e20f;
float max_val = phi::funcs::BlockReduceMax<float>(tmp, mask);
float qk_tmp = threadIdx.x < seq_len ? __expf(tmp - max_val) : 0.0f;
float sum_val = phi::funcs::BlockReduceSum<float>(qk_tmp, mask);
if (threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (half)(qk_tmp / sum_val);
#endif
}
#endif // @} End Half kernel: SoftmaxKernelWithEltadd
template <typename T>
__global__ void SoftmaxKernelWithEltadd2(T *qk_buf_,
const T *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
int idx = threadIdx.x;
assert(blockDim.x % 32 == 0);
float2 tmp = idx < seq_len
? phi::funcs::ToFloat2<T>(qk_buf_[idx + qk_offset] +
bias_qk_[idx + qk_offset])
: make_float2(-1e20f, -1e20f);
float max_val = phi::funcs::BlockReduceMax<float>(max(tmp.x, tmp.y), mask);
float2 qk_tmp = idx < seq_len ? make_float2(__expf(tmp.x - max_val),
__expf(tmp.y - max_val))
: make_float2(0.f, 0.f);
float sum_val =
phi::funcs::BlockReduceSum<float>(qk_tmp.x + qk_tmp.y, mask) + 1e-6f;
if (idx < seq_len) {
qk_buf_[idx + qk_offset] =
phi::funcs::FloatsToPair<T>(qk_tmp.x / sum_val, qk_tmp.y / sum_val);
}
}
template <>
__global__ void SoftmaxKernelWithEltadd2<half2>(half2 *qk_buf_,
const half2 *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
// operator "+" of half only suppotted after cuda version 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && \
(CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) && TORCH_HIP_VERSION >= 10000)
int qk_offset = blockIdx.x * seq_len;
int idx = threadIdx.x;
assert(blockDim.x % 32 == 0);
float2 tmp = idx < seq_len
? phi::funcs::ToFloat2<half2>(qk_buf_[idx + qk_offset] +
bias_qk_[idx + qk_offset])
: make_float2(-1e20f, -1e20f);
float max_val = phi::funcs::BlockReduceMax<float>(max(tmp.x, tmp.y), mask);
float2 qk_tmp = idx < seq_len ? make_float2(__expf(tmp.x - max_val),
__expf(tmp.y - max_val))
: make_float2(0.f, 0.f);
float sum_val =
phi::funcs::BlockReduceSum<float>(qk_tmp.x + qk_tmp.y, mask) + 1e-6f;
if (idx < seq_len) {
qk_buf_[idx + qk_offset] =
phi::funcs::FloatsToPair<half2>(qk_tmp.x / sum_val, qk_tmp.y / sum_val);
}
#endif
}
template <typename T>
__global__ void SoftmaxKernelWithEltaddForLarge(T *qk_buf,
const T *bias_qk,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
T stride_max = -1e20f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
stride_max = qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset] >
stride_max
? qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset]
: stride_max;
}
T max_val = phi::funcs::BlockReduceMax<T>(stride_max, mask);
T stride_sum = 0.f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
stride_sum += __expf(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset] - max_val);
}
T sum_val = phi::funcs::BlockReduceSum<T>(stride_sum, mask);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
qk_buf[threadIdx.x + i + qk_offset] =
(T)(__expf(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset] - max_val) /
sum_val);
}
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__
#ifndef __HIPCC__ // @{ Half kernel: SoftmaxKernelWithEltadd
template <>
__global__ void SoftmaxKernelWithEltaddForLarge(half *qk_buf,
const half *bias_qk,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float stride_max = -1e20f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float tmp = static_cast<float>(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset]);
stride_max = tmp > stride_max ? tmp : stride_max;
}
float max_val = phi::funcs::BlockReduceMax<float>(stride_max, mask);
float stride_sum = 0.f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float tmp = static_cast<float>(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset]);
stride_sum += __expf(tmp - max_val);
}
float sum_val = phi::funcs::BlockReduceSum<float>(stride_sum, mask);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float tmp =
__expf(static_cast<float>(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset]) -
max_val);
qk_buf[threadIdx.x + i + qk_offset] = (half)(tmp / sum_val);
}
#endif
}
#endif // @} End Half kernel: SoftmaxKernelWithEltadd
template <typename T>
__global__ void SoftmaxKernelWithEltaddForLarge2(T *qk_buf_,
const T *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float2 stride_max = make_float2(-1e20f, -1e20f);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur = phi::funcs::ToFloat2<T>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
stride_max.x = max(stride_max.x, cur.x);
stride_max.y = max(stride_max.y, cur.y);
}
float max_val =
phi::funcs::BlockReduceMax<float>(max(stride_max.x, stride_max.y), mask);
float2 stride_sum = make_float2(0.f, 0.f);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur = phi::funcs::ToFloat2<T>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
stride_sum.x += __expf(cur.x - max_val);
stride_sum.y += __expf(cur.y - max_val);
}
float sum_val =
phi::funcs::BlockReduceSum<float>(stride_sum.x + stride_sum.y, mask) +
1e-6f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur = phi::funcs::ToFloat2<T>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
qk_buf_[threadIdx.x + i + qk_offset] = phi::funcs::FloatsToPair<T>(
__expf(cur.x - max_val) / sum_val, __expf(cur.y - max_val) / sum_val);
}
}
template <>
__global__ void SoftmaxKernelWithEltaddForLarge2(half2 *qk_buf_,
const half2 *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
// operator "+" of half only suppotted after cuda version 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && \
(CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) && TORCH_HIP_VERSION >= 10000)
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float2 stride_max = make_float2(-1e20f, -1e20f);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur =
phi::funcs::ToFloat2<half2>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
stride_max.x = max(stride_max.x, cur.x);
stride_max.y = max(stride_max.y, cur.y);
}
float max_val =
phi::funcs::BlockReduceMax<float>(max(stride_max.x, stride_max.y), mask);
float2 stride_sum = make_float2(0.f, 0.f);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur =
phi::funcs::ToFloat2<half2>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
stride_sum.x += __expf(cur.x - max_val);
stride_sum.y += __expf(cur.y - max_val);
}
float sum_val =
phi::funcs::BlockReduceSum<float>(stride_sum.x + stride_sum.y, mask) +
1e-6f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur =
phi::funcs::ToFloat2<half2>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
qk_buf_[threadIdx.x + i + qk_offset] = phi::funcs::FloatsToPair<half2>(
__expf(cur.x - max_val) / sum_val, __expf(cur.y - max_val) / sum_val);
}
#endif
}
template <typename T>
inline __device__ T ldg(const T *val) {
return __ldg(val);
}
template <typename T>
inline __device__ T hexp2(T a) {
return h2exp(a);
}
template <typename T_IN, typename T_OUT>
inline __device__ T_OUT type2type2(T_IN a);
template <>
inline __device__ half2 type2type2(half a) {
return __half2half2(a);
}
template <typename T>
inline __device__ T float2type2(float a);
template <>
inline __device__ half2 float2type2(float a) {
return __float2half2_rn(a);
}
template <typename T>
inline __device__ T hmul2(T a, T b) {
return __hmul2(a, b);
}
template <typename T>
inline __device__ T hsub2(T a, T b) {
return __hsub2(a, b);
}
template <typename T>
inline __device__ T hadd2(T a, T b) {
return __hadd2(a, b);
}
template <typename T, int ITEMS_PER_THREAD, int NUM>
__global__ void softmax_kernel_with_mask(T *qk_buf_,
const T *attr_mask,
const int batch_size,
const int head_num,
const int seq_len) {
using T2 = half2;
T2 *qk_buf_half2 = reinterpret_cast<T2 *>(qk_buf_);
const T2 *attr_mask_half2 = (const T2 *)attr_mask;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x * NUM) {
T2 data[NUM][ITEMS_PER_THREAD];
int qk_offset[NUM];
__shared__ float s_sum[NUM], s_max[NUM];
float local_max[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
local_max[j] = -1e20f;
}
for (int i = 0;
blockDim.x * i + threadIdx.x < (seq_len / 2) && i < ITEMS_PER_THREAD;
i++) {
int mask_offset[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
qk_offset[j] = ((blockIdx.y * head_num + blockIdx.z) * seq_len +
seq_id + j * gridDim.x) *
(seq_len / 2) +
blockDim.x * i + threadIdx.x;
mask_offset[j] =
(blockIdx.y * seq_len + seq_id + j * gridDim.x) * (seq_len / 2) +
blockDim.x * i + threadIdx.x;
}
T2 mask_val[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
mask_val[j] = ldg(&attr_mask_half2[mask_offset[j]]);
}
T2 qk[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
qk[j] = qk_buf_half2[qk_offset[j]];
}
#pragma unroll
for (int j = 0; j < NUM; j++) {
mask_val[j] = hmul2<T2>(hsub2<T2>(float2type2<T2>(1.0f), mask_val[j]),
float2type2<T2>(-10000.0f));
}
#pragma unroll
for (int j = 0; j < NUM; j++) {
data[j][i] = hadd2<T2>(qk[j], mask_val[j]);
local_max[j] = fmax(local_max[j],
fmax(static_cast<float>(data[j][i].x),
static_cast<float>(data[j][i].y)));
}
}
if (blockDim.x <= 32) {
phi::funcs::WarpReduceMaxV2<float, NUM>(local_max);
} else {
phi::funcs::BlockReduceMaxV2<float, NUM>(local_max);
}
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 0; j < NUM; j++) {
s_max[j] = local_max[j];
}
}
__syncthreads();
float local_sum[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
local_sum[j] = {0.f};
}
for (int i = 0;
blockDim.x * i + threadIdx.x < (seq_len / 2) && i < ITEMS_PER_THREAD;
i++) {
#pragma unroll
for (int j = 0; j < NUM; j++) {
data[j][i] =
hexp2<T2>(hsub2<T2>(data[j][i], float2type2<T2>(s_max[j])));
}
#pragma unroll
for (int j = 0; j < NUM; j++) {
local_sum[j] += static_cast<float>(data[j][i].x + data[j][i].y);
}
}
if (blockDim.x <= 32) {
phi::funcs::WarpReduceSumV2<float, NUM>(local_sum);
} else {
phi::funcs::BlockReduceSumV2<float, NUM>(local_sum);
}
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 0; j < NUM; j++) {
s_sum[j] = __fdividef(1.0f, local_sum[j] + 1e-6f);
}
}
__syncthreads();
for (int i = 0;
blockDim.x * i + threadIdx.x < (seq_len / 2) && i < ITEMS_PER_THREAD;
i++) {
#pragma unroll
for (int j = 0; j < NUM; j++) {
qk_offset[j] = ((blockIdx.y * head_num + blockIdx.z) * seq_len +
seq_id + j * gridDim.x) *
(seq_len / 2) +
blockDim.x * i + threadIdx.x;
}
#pragma unroll
for (int j = 0; j < NUM; j++) {
qk_buf_half2[qk_offset[j]] =
hmul2<T2>(data[j][i], float2type2<T2>(s_sum[j]));
}
}
}
}
#define SOFTMAX_KERNEL_WITH_MASK(REPEAT_THREAD) \
do { \
block.x /= REPEAT_THREAD; \
grid.x /= 4; \
constexpr int NUM = 4; \
hipLaunchKernelGGL(( softmax_kernel_with_mask<half, REPEAT_THREAD, NUM>) \
, dim3(grid), dim3(block), 0, stream, reinterpret_cast<half *>(qk_buf_), \
(const half *)bias_qk, \
batch_size, \
head_num, \
seq_len); \
} while (0)
template <typename T>
inline void MatMulWithHeadQK(const phi::GPUContext &context,
int head_num,
int seq_len,
int size_per_head,
int batch_size,
bool q_trans,
bool k_trans,
T *q_buf_,
T *k_buf_,
T *qk_buf_,
const T *bias_qk,
bool bias_is_mask,
T alpha,
T beta) {
CBLAS_TRANSPOSE transA = !q_trans ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = !k_trans ? CblasNoTrans : CblasTrans;
typedef typename CUDATypeTraits<T>::TYPE run_type;
auto blas = phi::funcs::GetBlas<phi::GPUContext, run_type>(context);
auto stream = context.stream();
blas.BatchedGEMM(transA,
transB,
seq_len,
seq_len,
size_per_head,
static_cast<run_type>(alpha),
reinterpret_cast<run_type *>(q_buf_),
reinterpret_cast<run_type *>(k_buf_),
static_cast<run_type>(beta),
reinterpret_cast<run_type *>(qk_buf_),
batch_size * head_num,
seq_len * size_per_head,
seq_len * size_per_head);
if (seq_len <= 1024) {
int grid = batch_size * head_num * seq_len;
int block = seq_len;
// Align block to 32, also limit seq_len to max block size.
if (seq_len % 2 == 0) {
block = (seq_len <= 64) ? 32 : ((seq_len + 63) / 64) * 32;
if (std::is_same<T, float>::value) {
hipLaunchKernelGGL(( SoftmaxKernelWithEltadd2<float2>), dim3(grid), dim3(block), 0, stream,
reinterpret_cast<float2 *>(qk_buf_),
reinterpret_cast<const float2 *>(bias_qk),
batch_size,
head_num,
seq_len / 2,
FINAL_MASK);
} else {
if (bias_is_mask) {
#if defined(__HIPCC__) || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700)
PADDLE_ENFORCE_EQ(bias_is_mask,
false,
platform::errors::InvalidArgument(
"QK_bias is mask can't be supported on rocm or "
"cuda_arch<700"));
#else
dim3 grid(seq_len, batch_size, head_num);
dim3 block((seq_len / 2 + 31) / 32 * 32);
SOFTMAX_KERNEL_WITH_MASK(1);
#endif
} else {
hipLaunchKernelGGL(( SoftmaxKernelWithEltadd2<__half2>), dim3(grid), dim3(block), 0, stream,
reinterpret_cast<__half2 *>(qk_buf_),
reinterpret_cast<const __half2 *>(bias_qk),
batch_size,
head_num,
seq_len / 2,
FINAL_MASK);
}
}
} else {
block = (seq_len <= 32) ? 32 : ((seq_len + 31) / 32) * 32;
hipLaunchKernelGGL(( SoftmaxKernelWithEltadd<T>), dim3(grid), dim3(block), 0, stream,
qk_buf_, bias_qk, batch_size, head_num, seq_len, FINAL_MASK);
}
} else {
int grid = batch_size * head_num * seq_len;
int block = 512;
if (seq_len % 2 == 0) {
if (std::is_same<T, float>::value) {
hipLaunchKernelGGL(( SoftmaxKernelWithEltaddForLarge2<float2>), dim3(grid), dim3(block), 0, stream,
reinterpret_cast<float2 *>(qk_buf_),
reinterpret_cast<const float2 *>(bias_qk),
batch_size,
head_num,
seq_len / 2,
FINAL_MASK);
} else {
if (bias_is_mask) {
#if defined(__HIPCC__) || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700)
PADDLE_ENFORCE_EQ(bias_is_mask,
false,
platform::errors::InvalidArgument(
"QK_bias is mask can't be supported on rocm or "
"cuda_arch<700"));
#else
dim3 grid(seq_len, batch_size, head_num);
dim3 block((seq_len / 2 + 31) / 32 * 32);
if (block.x > 0 && block.x <= 1024) {
SOFTMAX_KERNEL_WITH_MASK(1);
} else if (block.x <= 2048) {
SOFTMAX_KERNEL_WITH_MASK(2);
} else if (block.x <= 4096) {
SOFTMAX_KERNEL_WITH_MASK(4);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Cannot support the length of attention > 8192."));
}
#endif
} else {
hipLaunchKernelGGL(( SoftmaxKernelWithEltaddForLarge2<__half2>), dim3(grid), dim3(block), 0, stream,
reinterpret_cast<__half2 *>(qk_buf_),
reinterpret_cast<const __half2 *>(bias_qk),
batch_size,
head_num,
seq_len / 2,
FINAL_MASK);
}
}
} else {
hipLaunchKernelGGL(( SoftmaxKernelWithEltaddForLarge<T>), dim3(grid), dim3(block), 0, stream,
qk_buf_, bias_qk, batch_size, head_num, seq_len, FINAL_MASK);
}
}
}
template <typename T>
inline void MatMulWithHeadQKV(const phi::GPUContext &context,
int head_num,
int seq_len,
int size_per_head,
int batch_size,
bool qk_trans,
bool v_trans,
T *v_buf_,
const T *qk_buf_,
T *dst,
T alpha,
T beta) {
int m = batch_size * seq_len;
int k = head_num * size_per_head;
typedef typename CUDATypeTraits<T>::TYPE run_type;
auto blas = phi::funcs::GetBlas<phi::GPUContext, run_type>(context);
auto stream = context.stream();
CBLAS_TRANSPOSE transA = !qk_trans ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = !v_trans ? CblasNoTrans : CblasTrans;
blas.BatchedGEMM(transA,
transB,
seq_len,
size_per_head,
seq_len,
static_cast<run_type>(alpha),
reinterpret_cast<const run_type *>(qk_buf_),
reinterpret_cast<run_type *>(v_buf_),
static_cast<run_type>(beta),
reinterpret_cast<run_type *>(dst),
batch_size * head_num,
seq_len * seq_len,
seq_len * size_per_head);
}
template <typename T>
void MultiHeadGPUComputeFunctor<T>::operator()(const phi::GPUContext &dev_ctx,
int batch,
int seq_len,
int head_num,
int head_size,
T *qkptr,
const T *bias_qk_ptr,
bool bias_is_mask,
T *tptr,
T alpha,
T beta) {
auto stream = dev_ctx.stream();
const int tsize = batch * head_num * seq_len * head_size;
T *qptr = tptr;
T *kptr = qptr + tsize;
T *vptr = kptr + tsize;
// batch gemm stride, softmaxwithscale.
MatMulWithHeadQK<T>(dev_ctx,
head_num,
seq_len,
head_size,
batch,
false,
true,
qptr,
kptr,
qkptr,
bias_qk_ptr,
bias_is_mask,
alpha,
beta);
// batch gemm stride, transpose.
MatMulWithHeadQKV<T>(dev_ctx,
head_num,
seq_len,
head_size,
batch,
false,
false,
vptr,
qkptr,
tptr,
T(1.0),
beta);
}
template class MultiHeadGPUComputeFunctor<float>;
// device function 'operator()' is not supportted until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 10000
template class MultiHeadGPUComputeFunctor<half>;
#endif
template <typename T, unsigned TPB>
__global__ void SkipLayerNormSmallKernel(int num,
int hidden,
const T *input1,
const T *input2,
T *output,
const T *scale,
const T *bias,
T eps) {
const T rld = T(1) / T(hidden);
const int offset = blockIdx.x * hidden;
hipcub::Sum pair_sum;
phi::funcs::kvp<T> thread_data(0, 0);
const int idx = offset + threadIdx.x;
T val = 0;
if (threadIdx.x < hidden) {
val = input1[idx] + input2[idx];
const T rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<T>(rldval, rldval * val));
}
LayerNormSmall<T, TPB>(
val, thread_data, hidden, idx, bias, scale, output, eps);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__ // @{ Half kernel: SkipLayerNormSmallKernel
template <>
__global__ void SkipLayerNormSmallKernel<half, 32>(int num,
int hidden,
const half *input1,
const half *input2,
half *output,
const half *scale,
const half *bias,
half eps) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const half rld = half(1) / half(hidden);
const int offset = blockIdx.x * hidden;
hipcub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
const int idx = offset + threadIdx.x;
half val = 0;
if (threadIdx.x < hidden) {
val = input1[idx] + input2[idx];
const half rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<half>(rldval, rldval * val));
}
LayerNormSmall<half, 32>(
val, thread_data, hidden, idx, bias, scale, output, eps);
#endif
}
template <>
__global__ void SkipLayerNormSmallKernel<half, 128>(int num,
int hidden,
const half *input1,
const half *input2,
half *output,
const half *scale,
const half *bias,
half eps) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const half rld = half(1) / half(hidden);
const int offset = blockIdx.x * hidden;
hipcub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
const int idx = offset + threadIdx.x;
half val = 0;
if (threadIdx.x < hidden) {
val = input1[idx] + input2[idx];
const half rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<half>(rldval, rldval * val));
}
LayerNormSmall<half, 128>(
val, thread_data, hidden, idx, bias, scale, output, eps);
#endif
}
template <>
__global__ void SkipLayerNormSmallKernel<half, 384>(int num,
int hidden,
const half *input1,
const half *input2,
half *output,
const half *scale,
const half *bias,
half eps) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const half rld = half(1) / half(hidden);
const int offset = blockIdx.x * hidden;
hipcub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
const int idx = offset + threadIdx.x;
half val = 0;
if (threadIdx.x < hidden) {
val = input1[idx] + input2[idx];
const half rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<half>(rldval, rldval * val));
}
LayerNormSmall<half, 384>(
val, thread_data, hidden, idx, bias, scale, output, eps);
#endif
}
#endif // @} End Half kernel: SkipLayerNormSmallKernel
template <typename T, unsigned TPB>
__global__ void SkipLayerNormKernel(int num,
int hidden,
const T *input1,
const T *input2,
T *output,
const T *scale,
const T *bias,
T eps) {
const T rld = T(1) / T(hidden);
const int offset = blockIdx.x * hidden;
hipcub::Sum pair_sum;
phi::funcs::kvp<T> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden; it += TPB) {
const int idx = offset + it;
const T val = input1[idx] + input2[idx];
const T rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<T>(rldval, rldval * val));
output[idx] = val;
}
LayerNorm<T, TPB>(thread_data, hidden, offset, bias, scale, output, eps);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__ // @{ Half kernel: SkipLayerNormKernel
template <>
__global__ void SkipLayerNormKernel<half, 256>(int num,
int hidden,
const half *input1,
const half *input2,
half *output,
const half *scale,
const half *bias,
half eps) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const half rld = half(1) / half(hidden);
const int offset = blockIdx.x * hidden;
hipcub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden; it += 256) {
const int idx = offset + it;
const half val = input1[idx] + input2[idx];
const half rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<half>(rldval, rldval * val));
output[idx] = val;
}
LayerNorm<half, 256>(thread_data, hidden, offset, bias, scale, output, eps);
#endif
}
#endif // @} End Half kernel: SkipLayerNormKernel
template <typename T, typename T2, unsigned TPB>
__global__ void SkipLayerNormKernel2(int num,
int hidden,
const T2 *input1,
const T2 *input2,
T2 *output,
const T2 *scale,
const T2 *bias,
float eps) {
const T rld = T(0.5f / hidden); // because hidden is hidden/2
const int offset = blockIdx.x * hidden;
hipcub::Sum pair_sum;
phi::funcs::kvp<T> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden; it += TPB) {
const int idx = offset + it;
const T2 val2 = input1[idx] + input2[idx];
thread_data = pair_sum(
thread_data,
phi::funcs::kvp<T>(rld * (val2.x + val2.y),
rld * val2.x * val2.x + rld * val2.y * val2.y));
output[idx] = val2;
}
LayerNorm2<T, T2, TPB>(thread_data, hidden, offset, bias, scale, output, eps);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__ // @{ Half kernel: SkipLayerNormKernel2
template <>
__global__ void SkipLayerNormKernel2<half, half2, 256>(int num,
int hidden,
const half2 *input1,
const half2 *input2,
half2 *output,
const half2 *scale,
const half2 *bias,
float eps) {
// operator "+" of half only suppotted after cuda version 10.0
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) && TORCH_HIP_VERSION >= 10000
const half rld = half(0.5f / hidden); // because hidden is hidden/2
const int offset = blockIdx.x * hidden;
hipcub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden; it += 256) {
const int idx = offset + it;
const half2 val2 = input1[idx] + input2[idx];
thread_data = pair_sum(
thread_data,
phi::funcs::kvp<half>(rld * (val2.x + val2.y),
rld * val2.x * val2.x + rld * val2.y * val2.y));
output[idx] = val2;
}
LayerNorm2<half, half2, 256>(
thread_data, hidden, offset, bias, scale, output, eps);
#endif
}
#endif // @} End Half kernel: SkipLayerNormKernel2
template <typename T>
void SkipLayerNormFunctor<T>::operator()(const int num,
const int hidden,
const T *input1,
const T *input2,
const T *scale,
const T *bias,
T *output,
float eps,
gpuStream_t stream) {
int block = num / hidden;
if (hidden <= 32) {
const int threads = 32;
hipLaunchKernelGGL(( SkipLayerNormSmallKernel<T, threads>), dim3(block), dim3(threads), 0, stream,
num, hidden, input1, input2, output, scale, bias, eps);
} else if (hidden <= 128) {
const int threads = 128;
hipLaunchKernelGGL(( SkipLayerNormSmallKernel<T, threads>), dim3(block), dim3(threads), 0, stream,
num, hidden, input1, input2, output, scale, bias, eps);
} else if (hidden == 384) {
const int threads = 384;
hipLaunchKernelGGL(( SkipLayerNormSmallKernel<T, threads>), dim3(block), dim3(threads), 0, stream,
num, hidden, input1, input2, output, scale, bias, eps);
} else {
const int threads = 256;
if (hidden % 2 == 0) {
if (std::is_same<T, float>::value) {
hipLaunchKernelGGL(( SkipLayerNormKernel2<float, float2, threads>)
, dim3(block), dim3(threads), 0, stream,
num,
hidden / 2,
reinterpret_cast<const float2 *>(input1),
reinterpret_cast<const float2 *>(input2),
reinterpret_cast<float2 *>(output),
reinterpret_cast<const float2 *>(scale),
reinterpret_cast<const float2 *>(bias),
eps);
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__
} else if (std::is_same<T, __half>::value) {
hipLaunchKernelGGL(( SkipLayerNormKernel2<__half, __half2, threads>)
, dim3(block), dim3(threads), 0, stream,
num,
hidden / 2,
reinterpret_cast<const __half2 *>(input1),
reinterpret_cast<const __half2 *>(input2),
reinterpret_cast<__half2 *>(output),
reinterpret_cast<const __half2 *>(scale),
reinterpret_cast<const __half2 *>(bias),
eps);
#endif
} else {
assert(false);
// should not be here
}
} else {
hipLaunchKernelGGL(( SkipLayerNormKernel<T, threads>), dim3(block), dim3(threads), 0, stream,
num, hidden, input1, input2, output, scale, bias, eps);
}
}
}
template class SkipLayerNormFunctor<float>;
// device function 'operator()' is not supportted until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && TORCH_HIP_VERSION >= 10000
template class SkipLayerNormFunctor<half>;
#endif
} // namespace math
} // namespace operators
} // namespace paddle
|
d72e13eddeba5d1d56bf2e33f33ad964083a23fc.cu
|
/* Copyright (c) 2016 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <algorithm>
#include <type_traits>
#include "paddle/fluid/framework/tensor.h"
#include "paddle/fluid/framework/tensor_util.h"
#include "paddle/fluid/operators/math/bert_encoder_functor.h"
#include "paddle/fluid/platform/enforce.h"
#include "paddle/phi/kernels/funcs/blas/blas.h"
#include "paddle/phi/kernels/funcs/math_cuda_utils.h"
namespace paddle {
namespace operators {
namespace math {
// NOTE(chenfeiyu): explicitly use operator+ for float2
// since float2 is not in namespace phi::funcs, ADL won't help
using phi::funcs::operator+;
template <typename T>
__device__ __forceinline__ T local_rsqrt(T num) {
return rsqrt(static_cast<float>(num));
}
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
__device__ __forceinline__ half local_rsqrt(half num) { return hrsqrt(num); }
#endif
template <typename T, int TPB>
__device__ inline void LayerNormSmall(T val,
const phi::funcs::kvp<T> &thread_data,
const int ld,
const int idx,
const T *bias,
const T *scale,
T *output,
T eps) {
using BlockReduce = cub::BlockReduce<phi::funcs::kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, cub::Sum());
if (threadIdx.x == 0) {
mu = sum_kv.key;
rsigma = local_rsqrt(sum_kv.value - mu * mu + eps);
}
__syncthreads();
if (threadIdx.x < ld) {
const T g(scale[threadIdx.x]);
const T b(bias[threadIdx.x]);
output[idx] = g * (val - mu) * rsigma + b;
}
}
template <typename T, int TPB>
__device__ inline void LayerNorm(const phi::funcs::kvp<T> &thread_data,
const int ld,
const int offset,
const T *bias,
const T *scale,
T *output,
T eps) {
using BlockReduce = cub::BlockReduce<phi::funcs::kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, cub::Sum());
if (threadIdx.x == 0) {
mu = sum_kv.key;
rsigma = local_rsqrt(sum_kv.value - mu * mu + eps);
}
__syncthreads();
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
const T val = output[idx];
const T g(scale[i]);
const T b(bias[i]);
output[idx] = g * (val - mu) * rsigma + b;
}
}
template <typename T, typename T2, int TPB>
__device__ inline void LayerNorm2(const phi::funcs::kvp<T> &thread_data,
const int ld,
const int offset,
const T2 *bias,
const T2 *scale,
T2 *output,
T eps) {
using BlockReduce = cub::BlockReduce<phi::funcs::kvp<T>, TPB>;
__shared__ typename BlockReduce::TempStorage temp_storage;
__shared__ T mu; // mean
__shared__ T rsigma; // 1 / std.dev.
const auto sum_kv = BlockReduce(temp_storage).Reduce(thread_data, cub::Sum());
if (threadIdx.x == 0) {
mu = sum_kv.key;
rsigma = local_rsqrt(sum_kv.value - mu * mu + eps);
}
__syncthreads();
for (int i = threadIdx.x; i < ld; i += TPB) {
const int idx = offset + i;
T2 val = output[idx];
const T2 g = scale[i];
const T2 b = bias[i];
val.x = T(g.x) * (val.x - mu) * rsigma + T(b.x);
val.y = T(g.y) * (val.y - mu) * rsigma + T(b.y);
output[idx] = val;
}
}
template <typename T, unsigned TPB>
__global__ void EmbEltwiseLayernormKernel(int hidden,
const int64_t *ids,
const T *scale,
const T *bias,
const int64_t *embs,
T *output,
T eps,
int input_num) {
cub::Sum pair_sum;
// blockIdx.x: position in the sequence
// blockIdx.y: batch
// gridDim.x: Seq
// gridDim.y: Batch
extern __shared__ int64_t array_id[];
const T rhidden = T(1.f) / T(hidden);
const int64_t seq_pos = blockIdx.y + blockIdx.x * gridDim.y;
if (threadIdx.x == 0) {
for (int i = 0; i < input_num; ++i) {
const int64_t *ids_p = reinterpret_cast<const int64_t *>(ids[i]);
array_id[i] = ids_p[seq_pos];
}
}
__syncthreads();
const int64_t out_offset = seq_pos * hidden;
phi::funcs::kvp<T> thread_data(0, 0);
#pragma unroll
for (int it = threadIdx.x; it < hidden; it += TPB) {
T val = 0;
for (int i = 0; i < input_num; ++i) {
val += reinterpret_cast<const T *>(embs[i])[array_id[i] * hidden + it];
}
output[out_offset + it] = val;
const T rhiddenval = rhidden * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<T>(rhiddenval, rhiddenval * val));
}
LayerNorm<T, TPB>(thread_data, hidden, out_offset, bias, scale, output, eps);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__ // @{ Half kernel: EmbEltwiseLayernormKernel
template <>
__global__ void EmbEltwiseLayernormKernel<half, 256>(int hidden,
const int64_t *ids,
const half *scale,
const half *bias,
const int64_t *embs,
half *output,
half eps,
int input_num) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
cub::Sum pair_sum;
// blockIdx.x: position in the sequence
// blockIdx.y: batch
// gridDim.x: Seq
// gridDim.y: Batch
extern __shared__ int64_t array_id[];
const half rhidden = half(1.f) / half(hidden);
const int64_t seq_pos = blockIdx.y + blockIdx.x * gridDim.y;
if (threadIdx.x == 0) {
for (int i = 0; i < input_num; ++i) {
const int64_t *ids_p = reinterpret_cast<const int64_t *>(ids[i]);
array_id[i] = ids_p[seq_pos];
}
}
__syncthreads();
const int64_t out_offset = seq_pos * hidden;
phi::funcs::kvp<half> thread_data(0, 0);
#pragma unroll
for (int it = threadIdx.x; it < hidden; it += 256) {
half val = 0;
for (int i = 0; i < input_num; ++i) {
val += reinterpret_cast<const half *>(embs[i])[array_id[i] * hidden + it];
}
output[out_offset + it] = val;
const half rhiddenval = rhidden * val;
thread_data = pair_sum(thread_data,
phi::funcs::kvp<half>(rhiddenval, rhiddenval * val));
}
LayerNorm<half, 256>(
thread_data, hidden, out_offset, bias, scale, output, eps);
#endif
}
#endif // @} End Half kernel: EmbEltwiseLayernormKernel
template <typename T>
void EmbEltwiseLayerNormFunctor<T>::operator()(int batch,
int seq_len,
int hidden,
const int64_t *ids,
const T *scale,
const T *bias,
const int64_t *embs,
T *output,
float eps,
int input_num,
gpuStream_t stream) {
const unsigned tpb = 256;
const dim3 grid(seq_len, batch, 1);
const dim3 block(tpb, 1, 1);
int shared_bytes = input_num * sizeof(int64_t);
EmbEltwiseLayernormKernel<T, tpb><<<grid, block, shared_bytes, stream>>>(
hidden, ids, scale, bias, embs, output, eps, input_num);
}
template class EmbEltwiseLayerNormFunctor<float>;
// device function 'operator()' is not supportted until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000
template class EmbEltwiseLayerNormFunctor<half>;
#endif
template <typename T>
__global__ void SoftmaxKernelWithEltadd(T *qk_buf_,
const T *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float tmp = threadIdx.x < seq_len
? static_cast<float>(qk_buf_[threadIdx.x + qk_offset] +
bias_qk_[threadIdx.x + qk_offset])
: -1e20f;
float max_val = phi::funcs::BlockReduceMax<float>(tmp, mask);
float qk_tmp = threadIdx.x < seq_len ? __expf(tmp - max_val) : 0.0f;
float sum_val = phi::funcs::BlockReduceSum<float>(qk_tmp, mask);
if (threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (T)(qk_tmp / sum_val);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__
#ifndef __HIPCC__ // @{ Half kernel: SoftmaxKernelWithEltadd
template <>
__global__ void SoftmaxKernelWithEltadd<half>(half *qk_buf_,
const half *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float tmp = threadIdx.x < seq_len
? static_cast<float>(qk_buf_[threadIdx.x + qk_offset] +
bias_qk_[threadIdx.x + qk_offset])
: -1e20f;
float max_val = phi::funcs::BlockReduceMax<float>(tmp, mask);
float qk_tmp = threadIdx.x < seq_len ? __expf(tmp - max_val) : 0.0f;
float sum_val = phi::funcs::BlockReduceSum<float>(qk_tmp, mask);
if (threadIdx.x < seq_len)
qk_buf_[threadIdx.x + qk_offset] = (half)(qk_tmp / sum_val);
#endif
}
#endif // @} End Half kernel: SoftmaxKernelWithEltadd
template <typename T>
__global__ void SoftmaxKernelWithEltadd2(T *qk_buf_,
const T *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
int idx = threadIdx.x;
assert(blockDim.x % 32 == 0);
float2 tmp = idx < seq_len
? phi::funcs::ToFloat2<T>(qk_buf_[idx + qk_offset] +
bias_qk_[idx + qk_offset])
: make_float2(-1e20f, -1e20f);
float max_val = phi::funcs::BlockReduceMax<float>(max(tmp.x, tmp.y), mask);
float2 qk_tmp = idx < seq_len ? make_float2(__expf(tmp.x - max_val),
__expf(tmp.y - max_val))
: make_float2(0.f, 0.f);
float sum_val =
phi::funcs::BlockReduceSum<float>(qk_tmp.x + qk_tmp.y, mask) + 1e-6f;
if (idx < seq_len) {
qk_buf_[idx + qk_offset] =
phi::funcs::FloatsToPair<T>(qk_tmp.x / sum_val, qk_tmp.y / sum_val);
}
}
template <>
__global__ void SoftmaxKernelWithEltadd2<half2>(half2 *qk_buf_,
const half2 *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
// operator "+" of half only suppotted after cuda version 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && \
(CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) && CUDA_VERSION >= 10000)
int qk_offset = blockIdx.x * seq_len;
int idx = threadIdx.x;
assert(blockDim.x % 32 == 0);
float2 tmp = idx < seq_len
? phi::funcs::ToFloat2<half2>(qk_buf_[idx + qk_offset] +
bias_qk_[idx + qk_offset])
: make_float2(-1e20f, -1e20f);
float max_val = phi::funcs::BlockReduceMax<float>(max(tmp.x, tmp.y), mask);
float2 qk_tmp = idx < seq_len ? make_float2(__expf(tmp.x - max_val),
__expf(tmp.y - max_val))
: make_float2(0.f, 0.f);
float sum_val =
phi::funcs::BlockReduceSum<float>(qk_tmp.x + qk_tmp.y, mask) + 1e-6f;
if (idx < seq_len) {
qk_buf_[idx + qk_offset] =
phi::funcs::FloatsToPair<half2>(qk_tmp.x / sum_val, qk_tmp.y / sum_val);
}
#endif
}
template <typename T>
__global__ void SoftmaxKernelWithEltaddForLarge(T *qk_buf,
const T *bias_qk,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
T stride_max = -1e20f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
stride_max = qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset] >
stride_max
? qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset]
: stride_max;
}
T max_val = phi::funcs::BlockReduceMax<T>(stride_max, mask);
T stride_sum = 0.f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
stride_sum += __expf(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset] - max_val);
}
T sum_val = phi::funcs::BlockReduceSum<T>(stride_sum, mask);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
qk_buf[threadIdx.x + i + qk_offset] =
(T)(__expf(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset] - max_val) /
sum_val);
}
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__
#ifndef __HIPCC__ // @{ Half kernel: SoftmaxKernelWithEltadd
template <>
__global__ void SoftmaxKernelWithEltaddForLarge(half *qk_buf,
const half *bias_qk,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float stride_max = -1e20f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float tmp = static_cast<float>(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset]);
stride_max = tmp > stride_max ? tmp : stride_max;
}
float max_val = phi::funcs::BlockReduceMax<float>(stride_max, mask);
float stride_sum = 0.f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float tmp = static_cast<float>(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset]);
stride_sum += __expf(tmp - max_val);
}
float sum_val = phi::funcs::BlockReduceSum<float>(stride_sum, mask);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float tmp =
__expf(static_cast<float>(qk_buf[threadIdx.x + i + qk_offset] +
bias_qk[threadIdx.x + i + qk_offset]) -
max_val);
qk_buf[threadIdx.x + i + qk_offset] = (half)(tmp / sum_val);
}
#endif
}
#endif // @} End Half kernel: SoftmaxKernelWithEltadd
template <typename T>
__global__ void SoftmaxKernelWithEltaddForLarge2(T *qk_buf_,
const T *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float2 stride_max = make_float2(-1e20f, -1e20f);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur = phi::funcs::ToFloat2<T>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
stride_max.x = max(stride_max.x, cur.x);
stride_max.y = max(stride_max.y, cur.y);
}
float max_val =
phi::funcs::BlockReduceMax<float>(max(stride_max.x, stride_max.y), mask);
float2 stride_sum = make_float2(0.f, 0.f);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur = phi::funcs::ToFloat2<T>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
stride_sum.x += __expf(cur.x - max_val);
stride_sum.y += __expf(cur.y - max_val);
}
float sum_val =
phi::funcs::BlockReduceSum<float>(stride_sum.x + stride_sum.y, mask) +
1e-6f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur = phi::funcs::ToFloat2<T>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
qk_buf_[threadIdx.x + i + qk_offset] = phi::funcs::FloatsToPair<T>(
__expf(cur.x - max_val) / sum_val, __expf(cur.y - max_val) / sum_val);
}
}
template <>
__global__ void SoftmaxKernelWithEltaddForLarge2(half2 *qk_buf_,
const half2 *bias_qk_,
const int batch_size,
const int head_num,
const int seq_len,
const unsigned mask) {
// operator "+" of half only suppotted after cuda version 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && \
(CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) && CUDA_VERSION >= 10000)
int qk_offset = blockIdx.x * seq_len;
assert(blockDim.x % 32 == 0);
float2 stride_max = make_float2(-1e20f, -1e20f);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur =
phi::funcs::ToFloat2<half2>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
stride_max.x = max(stride_max.x, cur.x);
stride_max.y = max(stride_max.y, cur.y);
}
float max_val =
phi::funcs::BlockReduceMax<float>(max(stride_max.x, stride_max.y), mask);
float2 stride_sum = make_float2(0.f, 0.f);
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur =
phi::funcs::ToFloat2<half2>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
stride_sum.x += __expf(cur.x - max_val);
stride_sum.y += __expf(cur.y - max_val);
}
float sum_val =
phi::funcs::BlockReduceSum<float>(stride_sum.x + stride_sum.y, mask) +
1e-6f;
for (int i = 0; threadIdx.x + i < seq_len; i += blockDim.x) {
float2 cur =
phi::funcs::ToFloat2<half2>(qk_buf_[threadIdx.x + i + qk_offset] +
bias_qk_[threadIdx.x + i + qk_offset]);
qk_buf_[threadIdx.x + i + qk_offset] = phi::funcs::FloatsToPair<half2>(
__expf(cur.x - max_val) / sum_val, __expf(cur.y - max_val) / sum_val);
}
#endif
}
template <typename T>
inline __device__ T ldg(const T *val) {
return __ldg(val);
}
template <typename T>
inline __device__ T hexp2(T a) {
return h2exp(a);
}
template <typename T_IN, typename T_OUT>
inline __device__ T_OUT type2type2(T_IN a);
template <>
inline __device__ half2 type2type2(half a) {
return __half2half2(a);
}
template <typename T>
inline __device__ T float2type2(float a);
template <>
inline __device__ half2 float2type2(float a) {
return __float2half2_rn(a);
}
template <typename T>
inline __device__ T hmul2(T a, T b) {
return __hmul2(a, b);
}
template <typename T>
inline __device__ T hsub2(T a, T b) {
return __hsub2(a, b);
}
template <typename T>
inline __device__ T hadd2(T a, T b) {
return __hadd2(a, b);
}
template <typename T, int ITEMS_PER_THREAD, int NUM>
__global__ void softmax_kernel_with_mask(T *qk_buf_,
const T *attr_mask,
const int batch_size,
const int head_num,
const int seq_len) {
using T2 = half2;
T2 *qk_buf_half2 = reinterpret_cast<T2 *>(qk_buf_);
const T2 *attr_mask_half2 = (const T2 *)attr_mask;
for (int seq_id = blockIdx.x; seq_id < seq_len; seq_id += gridDim.x * NUM) {
T2 data[NUM][ITEMS_PER_THREAD];
int qk_offset[NUM];
__shared__ float s_sum[NUM], s_max[NUM];
float local_max[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
local_max[j] = -1e20f;
}
for (int i = 0;
blockDim.x * i + threadIdx.x < (seq_len / 2) && i < ITEMS_PER_THREAD;
i++) {
int mask_offset[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
qk_offset[j] = ((blockIdx.y * head_num + blockIdx.z) * seq_len +
seq_id + j * gridDim.x) *
(seq_len / 2) +
blockDim.x * i + threadIdx.x;
mask_offset[j] =
(blockIdx.y * seq_len + seq_id + j * gridDim.x) * (seq_len / 2) +
blockDim.x * i + threadIdx.x;
}
T2 mask_val[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
mask_val[j] = ldg(&attr_mask_half2[mask_offset[j]]);
}
T2 qk[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
qk[j] = qk_buf_half2[qk_offset[j]];
}
#pragma unroll
for (int j = 0; j < NUM; j++) {
mask_val[j] = hmul2<T2>(hsub2<T2>(float2type2<T2>(1.0f), mask_val[j]),
float2type2<T2>(-10000.0f));
}
#pragma unroll
for (int j = 0; j < NUM; j++) {
data[j][i] = hadd2<T2>(qk[j], mask_val[j]);
local_max[j] = fmax(local_max[j],
fmax(static_cast<float>(data[j][i].x),
static_cast<float>(data[j][i].y)));
}
}
if (blockDim.x <= 32) {
phi::funcs::WarpReduceMaxV2<float, NUM>(local_max);
} else {
phi::funcs::BlockReduceMaxV2<float, NUM>(local_max);
}
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 0; j < NUM; j++) {
s_max[j] = local_max[j];
}
}
__syncthreads();
float local_sum[NUM];
#pragma unroll
for (int j = 0; j < NUM; j++) {
local_sum[j] = {0.f};
}
for (int i = 0;
blockDim.x * i + threadIdx.x < (seq_len / 2) && i < ITEMS_PER_THREAD;
i++) {
#pragma unroll
for (int j = 0; j < NUM; j++) {
data[j][i] =
hexp2<T2>(hsub2<T2>(data[j][i], float2type2<T2>(s_max[j])));
}
#pragma unroll
for (int j = 0; j < NUM; j++) {
local_sum[j] += static_cast<float>(data[j][i].x + data[j][i].y);
}
}
if (blockDim.x <= 32) {
phi::funcs::WarpReduceSumV2<float, NUM>(local_sum);
} else {
phi::funcs::BlockReduceSumV2<float, NUM>(local_sum);
}
if (threadIdx.x == 0) {
#pragma unroll
for (int j = 0; j < NUM; j++) {
s_sum[j] = __fdividef(1.0f, local_sum[j] + 1e-6f);
}
}
__syncthreads();
for (int i = 0;
blockDim.x * i + threadIdx.x < (seq_len / 2) && i < ITEMS_PER_THREAD;
i++) {
#pragma unroll
for (int j = 0; j < NUM; j++) {
qk_offset[j] = ((blockIdx.y * head_num + blockIdx.z) * seq_len +
seq_id + j * gridDim.x) *
(seq_len / 2) +
blockDim.x * i + threadIdx.x;
}
#pragma unroll
for (int j = 0; j < NUM; j++) {
qk_buf_half2[qk_offset[j]] =
hmul2<T2>(data[j][i], float2type2<T2>(s_sum[j]));
}
}
}
}
#define SOFTMAX_KERNEL_WITH_MASK(REPEAT_THREAD) \
do { \
block.x /= REPEAT_THREAD; \
grid.x /= 4; \
constexpr int NUM = 4; \
softmax_kernel_with_mask<half, REPEAT_THREAD, NUM> \
<<<grid, block, 0, stream>>>(reinterpret_cast<half *>(qk_buf_), \
(const half *)bias_qk, \
batch_size, \
head_num, \
seq_len); \
} while (0)
template <typename T>
inline void MatMulWithHeadQK(const phi::GPUContext &context,
int head_num,
int seq_len,
int size_per_head,
int batch_size,
bool q_trans,
bool k_trans,
T *q_buf_,
T *k_buf_,
T *qk_buf_,
const T *bias_qk,
bool bias_is_mask,
T alpha,
T beta) {
CBLAS_TRANSPOSE transA = !q_trans ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = !k_trans ? CblasNoTrans : CblasTrans;
typedef typename CUDATypeTraits<T>::TYPE run_type;
auto blas = phi::funcs::GetBlas<phi::GPUContext, run_type>(context);
auto stream = context.stream();
blas.BatchedGEMM(transA,
transB,
seq_len,
seq_len,
size_per_head,
static_cast<run_type>(alpha),
reinterpret_cast<run_type *>(q_buf_),
reinterpret_cast<run_type *>(k_buf_),
static_cast<run_type>(beta),
reinterpret_cast<run_type *>(qk_buf_),
batch_size * head_num,
seq_len * size_per_head,
seq_len * size_per_head);
if (seq_len <= 1024) {
int grid = batch_size * head_num * seq_len;
int block = seq_len;
// Align block to 32, also limit seq_len to max block size.
if (seq_len % 2 == 0) {
block = (seq_len <= 64) ? 32 : ((seq_len + 63) / 64) * 32;
if (std::is_same<T, float>::value) {
SoftmaxKernelWithEltadd2<float2><<<grid, block, 0, stream>>>(
reinterpret_cast<float2 *>(qk_buf_),
reinterpret_cast<const float2 *>(bias_qk),
batch_size,
head_num,
seq_len / 2,
FINAL_MASK);
} else {
if (bias_is_mask) {
#if defined(__HIPCC__) || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700)
PADDLE_ENFORCE_EQ(bias_is_mask,
false,
platform::errors::InvalidArgument(
"QK_bias is mask can't be supported on rocm or "
"cuda_arch<700"));
#else
dim3 grid(seq_len, batch_size, head_num);
dim3 block((seq_len / 2 + 31) / 32 * 32);
SOFTMAX_KERNEL_WITH_MASK(1);
#endif
} else {
SoftmaxKernelWithEltadd2<__half2><<<grid, block, 0, stream>>>(
reinterpret_cast<__half2 *>(qk_buf_),
reinterpret_cast<const __half2 *>(bias_qk),
batch_size,
head_num,
seq_len / 2,
FINAL_MASK);
}
}
} else {
block = (seq_len <= 32) ? 32 : ((seq_len + 31) / 32) * 32;
SoftmaxKernelWithEltadd<T><<<grid, block, 0, stream>>>(
qk_buf_, bias_qk, batch_size, head_num, seq_len, FINAL_MASK);
}
} else {
int grid = batch_size * head_num * seq_len;
int block = 512;
if (seq_len % 2 == 0) {
if (std::is_same<T, float>::value) {
SoftmaxKernelWithEltaddForLarge2<float2><<<grid, block, 0, stream>>>(
reinterpret_cast<float2 *>(qk_buf_),
reinterpret_cast<const float2 *>(bias_qk),
batch_size,
head_num,
seq_len / 2,
FINAL_MASK);
} else {
if (bias_is_mask) {
#if defined(__HIPCC__) || (defined(__CUDA_ARCH__) && __CUDA_ARCH__ < 700)
PADDLE_ENFORCE_EQ(bias_is_mask,
false,
platform::errors::InvalidArgument(
"QK_bias is mask can't be supported on rocm or "
"cuda_arch<700"));
#else
dim3 grid(seq_len, batch_size, head_num);
dim3 block((seq_len / 2 + 31) / 32 * 32);
if (block.x > 0 && block.x <= 1024) {
SOFTMAX_KERNEL_WITH_MASK(1);
} else if (block.x <= 2048) {
SOFTMAX_KERNEL_WITH_MASK(2);
} else if (block.x <= 4096) {
SOFTMAX_KERNEL_WITH_MASK(4);
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"Cannot support the length of attention > 8192."));
}
#endif
} else {
SoftmaxKernelWithEltaddForLarge2<__half2><<<grid, block, 0, stream>>>(
reinterpret_cast<__half2 *>(qk_buf_),
reinterpret_cast<const __half2 *>(bias_qk),
batch_size,
head_num,
seq_len / 2,
FINAL_MASK);
}
}
} else {
SoftmaxKernelWithEltaddForLarge<T><<<grid, block, 0, stream>>>(
qk_buf_, bias_qk, batch_size, head_num, seq_len, FINAL_MASK);
}
}
}
template <typename T>
inline void MatMulWithHeadQKV(const phi::GPUContext &context,
int head_num,
int seq_len,
int size_per_head,
int batch_size,
bool qk_trans,
bool v_trans,
T *v_buf_,
const T *qk_buf_,
T *dst,
T alpha,
T beta) {
int m = batch_size * seq_len;
int k = head_num * size_per_head;
typedef typename CUDATypeTraits<T>::TYPE run_type;
auto blas = phi::funcs::GetBlas<phi::GPUContext, run_type>(context);
auto stream = context.stream();
CBLAS_TRANSPOSE transA = !qk_trans ? CblasNoTrans : CblasTrans;
CBLAS_TRANSPOSE transB = !v_trans ? CblasNoTrans : CblasTrans;
blas.BatchedGEMM(transA,
transB,
seq_len,
size_per_head,
seq_len,
static_cast<run_type>(alpha),
reinterpret_cast<const run_type *>(qk_buf_),
reinterpret_cast<run_type *>(v_buf_),
static_cast<run_type>(beta),
reinterpret_cast<run_type *>(dst),
batch_size * head_num,
seq_len * seq_len,
seq_len * size_per_head);
}
template <typename T>
void MultiHeadGPUComputeFunctor<T>::operator()(const phi::GPUContext &dev_ctx,
int batch,
int seq_len,
int head_num,
int head_size,
T *qkptr,
const T *bias_qk_ptr,
bool bias_is_mask,
T *tptr,
T alpha,
T beta) {
auto stream = dev_ctx.stream();
const int tsize = batch * head_num * seq_len * head_size;
T *qptr = tptr;
T *kptr = qptr + tsize;
T *vptr = kptr + tsize;
// batch gemm stride, softmaxwithscale.
MatMulWithHeadQK<T>(dev_ctx,
head_num,
seq_len,
head_size,
batch,
false,
true,
qptr,
kptr,
qkptr,
bias_qk_ptr,
bias_is_mask,
alpha,
beta);
// batch gemm stride, transpose.
MatMulWithHeadQKV<T>(dev_ctx,
head_num,
seq_len,
head_size,
batch,
false,
false,
vptr,
qkptr,
tptr,
T(1.0),
beta);
}
template class MultiHeadGPUComputeFunctor<float>;
// device function 'operator()' is not supportted until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000
template class MultiHeadGPUComputeFunctor<half>;
#endif
template <typename T, unsigned TPB>
__global__ void SkipLayerNormSmallKernel(int num,
int hidden,
const T *input1,
const T *input2,
T *output,
const T *scale,
const T *bias,
T eps) {
const T rld = T(1) / T(hidden);
const int offset = blockIdx.x * hidden;
cub::Sum pair_sum;
phi::funcs::kvp<T> thread_data(0, 0);
const int idx = offset + threadIdx.x;
T val = 0;
if (threadIdx.x < hidden) {
val = input1[idx] + input2[idx];
const T rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<T>(rldval, rldval * val));
}
LayerNormSmall<T, TPB>(
val, thread_data, hidden, idx, bias, scale, output, eps);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__ // @{ Half kernel: SkipLayerNormSmallKernel
template <>
__global__ void SkipLayerNormSmallKernel<half, 32>(int num,
int hidden,
const half *input1,
const half *input2,
half *output,
const half *scale,
const half *bias,
half eps) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const half rld = half(1) / half(hidden);
const int offset = blockIdx.x * hidden;
cub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
const int idx = offset + threadIdx.x;
half val = 0;
if (threadIdx.x < hidden) {
val = input1[idx] + input2[idx];
const half rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<half>(rldval, rldval * val));
}
LayerNormSmall<half, 32>(
val, thread_data, hidden, idx, bias, scale, output, eps);
#endif
}
template <>
__global__ void SkipLayerNormSmallKernel<half, 128>(int num,
int hidden,
const half *input1,
const half *input2,
half *output,
const half *scale,
const half *bias,
half eps) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const half rld = half(1) / half(hidden);
const int offset = blockIdx.x * hidden;
cub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
const int idx = offset + threadIdx.x;
half val = 0;
if (threadIdx.x < hidden) {
val = input1[idx] + input2[idx];
const half rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<half>(rldval, rldval * val));
}
LayerNormSmall<half, 128>(
val, thread_data, hidden, idx, bias, scale, output, eps);
#endif
}
template <>
__global__ void SkipLayerNormSmallKernel<half, 384>(int num,
int hidden,
const half *input1,
const half *input2,
half *output,
const half *scale,
const half *bias,
half eps) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const half rld = half(1) / half(hidden);
const int offset = blockIdx.x * hidden;
cub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
const int idx = offset + threadIdx.x;
half val = 0;
if (threadIdx.x < hidden) {
val = input1[idx] + input2[idx];
const half rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<half>(rldval, rldval * val));
}
LayerNormSmall<half, 384>(
val, thread_data, hidden, idx, bias, scale, output, eps);
#endif
}
#endif // @} End Half kernel: SkipLayerNormSmallKernel
template <typename T, unsigned TPB>
__global__ void SkipLayerNormKernel(int num,
int hidden,
const T *input1,
const T *input2,
T *output,
const T *scale,
const T *bias,
T eps) {
const T rld = T(1) / T(hidden);
const int offset = blockIdx.x * hidden;
cub::Sum pair_sum;
phi::funcs::kvp<T> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden; it += TPB) {
const int idx = offset + it;
const T val = input1[idx] + input2[idx];
const T rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<T>(rldval, rldval * val));
output[idx] = val;
}
LayerNorm<T, TPB>(thread_data, hidden, offset, bias, scale, output, eps);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__ // @{ Half kernel: SkipLayerNormKernel
template <>
__global__ void SkipLayerNormKernel<half, 256>(int num,
int hidden,
const half *input1,
const half *input2,
half *output,
const half *scale,
const half *bias,
half eps) {
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__)
const half rld = half(1) / half(hidden);
const int offset = blockIdx.x * hidden;
cub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden; it += 256) {
const int idx = offset + it;
const half val = input1[idx] + input2[idx];
const half rldval = rld * val;
thread_data =
pair_sum(thread_data, phi::funcs::kvp<half>(rldval, rldval * val));
output[idx] = val;
}
LayerNorm<half, 256>(thread_data, hidden, offset, bias, scale, output, eps);
#endif
}
#endif // @} End Half kernel: SkipLayerNormKernel
template <typename T, typename T2, unsigned TPB>
__global__ void SkipLayerNormKernel2(int num,
int hidden,
const T2 *input1,
const T2 *input2,
T2 *output,
const T2 *scale,
const T2 *bias,
float eps) {
const T rld = T(0.5f / hidden); // because hidden is hidden/2
const int offset = blockIdx.x * hidden;
cub::Sum pair_sum;
phi::funcs::kvp<T> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden; it += TPB) {
const int idx = offset + it;
const T2 val2 = input1[idx] + input2[idx];
thread_data = pair_sum(
thread_data,
phi::funcs::kvp<T>(rld * (val2.x + val2.y),
rld * val2.x * val2.x + rld * val2.y * val2.y));
output[idx] = val2;
}
LayerNorm2<T, T2, TPB>(thread_data, hidden, offset, bias, scale, output, eps);
}
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__ // @{ Half kernel: SkipLayerNormKernel2
template <>
__global__ void SkipLayerNormKernel2<half, half2, 256>(int num,
int hidden,
const half2 *input1,
const half2 *input2,
half2 *output,
const half2 *scale,
const half2 *bias,
float eps) {
// operator "+" of half only suppotted after cuda version 10.0
#if CUDA_ARCH_FP16_SUPPORTED(__CUDA_ARCH__) && CUDA_VERSION >= 10000
const half rld = half(0.5f / hidden); // because hidden is hidden/2
const int offset = blockIdx.x * hidden;
cub::Sum pair_sum;
phi::funcs::kvp<half> thread_data(0, 0);
for (int it = threadIdx.x; it < hidden; it += 256) {
const int idx = offset + it;
const half2 val2 = input1[idx] + input2[idx];
thread_data = pair_sum(
thread_data,
phi::funcs::kvp<half>(rld * (val2.x + val2.y),
rld * val2.x * val2.x + rld * val2.y * val2.y));
output[idx] = val2;
}
LayerNorm2<half, half2, 256>(
thread_data, hidden, offset, bias, scale, output, eps);
#endif
}
#endif // @} End Half kernel: SkipLayerNormKernel2
template <typename T>
void SkipLayerNormFunctor<T>::operator()(const int num,
const int hidden,
const T *input1,
const T *input2,
const T *scale,
const T *bias,
T *output,
float eps,
gpuStream_t stream) {
int block = num / hidden;
if (hidden <= 32) {
const int threads = 32;
SkipLayerNormSmallKernel<T, threads><<<block, threads, 0, stream>>>(
num, hidden, input1, input2, output, scale, bias, eps);
} else if (hidden <= 128) {
const int threads = 128;
SkipLayerNormSmallKernel<T, threads><<<block, threads, 0, stream>>>(
num, hidden, input1, input2, output, scale, bias, eps);
} else if (hidden == 384) {
const int threads = 384;
SkipLayerNormSmallKernel<T, threads><<<block, threads, 0, stream>>>(
num, hidden, input1, input2, output, scale, bias, eps);
} else {
const int threads = 256;
if (hidden % 2 == 0) {
if (std::is_same<T, float>::value) {
SkipLayerNormKernel2<float, float2, threads>
<<<block, threads, 0, stream>>>(
num,
hidden / 2,
reinterpret_cast<const float2 *>(input1),
reinterpret_cast<const float2 *>(input2),
reinterpret_cast<float2 *>(output),
reinterpret_cast<const float2 *>(scale),
reinterpret_cast<const float2 *>(bias),
eps);
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#ifndef __HIPCC__
} else if (std::is_same<T, __half>::value) {
SkipLayerNormKernel2<__half, __half2, threads>
<<<block, threads, 0, stream>>>(
num,
hidden / 2,
reinterpret_cast<const __half2 *>(input1),
reinterpret_cast<const __half2 *>(input2),
reinterpret_cast<__half2 *>(output),
reinterpret_cast<const __half2 *>(scale),
reinterpret_cast<const __half2 *>(bias),
eps);
#endif
} else {
assert(false);
// should not be here
}
} else {
SkipLayerNormKernel<T, threads><<<block, threads, 0, stream>>>(
num, hidden, input1, input2, output, scale, bias, eps);
}
}
}
template class SkipLayerNormFunctor<float>;
// device function 'operator()' is not supportted until cuda 10.0
// HIP defined __HIP_NO_HALF_CONVERSIONS__ in hip.cmake
#if defined(PADDLE_WITH_CUDA) && CUDA_VERSION >= 10000
template class SkipLayerNormFunctor<half>;
#endif
} // namespace math
} // namespace operators
} // namespace paddle
|
b959811af616e7f1447fa05e254abf9697966cb9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int xdim0_write_kernel;
int xdim0_write_kernel_h = -1;
__constant__ int ydim0_write_kernel;
int ydim0_write_kernel_h = -1;
__constant__ int xdim1_write_kernel;
int xdim1_write_kernel_h = -1;
__constant__ int ydim1_write_kernel;
int ydim1_write_kernel_h = -1;
__constant__ int xdim2_write_kernel;
int xdim2_write_kernel_h = -1;
__constant__ int ydim2_write_kernel;
int ydim2_write_kernel_h = -1;
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC_MD0
#define OPS_ACC1(x, y, z) \
(x + xdim1_write_kernel * (y) + xdim1_write_kernel * ydim1_write_kernel * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_write_kernel * (y) + xdim2_write_kernel * ydim2_write_kernel * (z))
#define OPS_ACC_MD0(d, x, y, z) \
((x)*2 + (d) + (xdim0_write_kernel * (y)*2) + \
(xdim0_write_kernel * ydim0_write_kernel * (z)*2))
// user function
__device__
void
write_kernel_gpu(double *mult, double *single, int *digit, const int *idx) {
mult[OPS_ACC_MD0(0, 0, 0, 0)] = 1;
mult[OPS_ACC_MD0(1, 0, 0, 0)] = 2;
single[OPS_ACC1(0, 0, 0)] = 3;
digit[OPS_ACC2(0, 0, 0)] = idx[0] * 100 + idx[1] * 10 + idx[2];
}
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC_MD0
__global__ void ops_write_kernel(double *__restrict arg0,
double *__restrict arg1, int *__restrict arg2,
int arg_idx0, int arg_idx1, int arg_idx2,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[3];
arg_idx[0] = arg_idx0 + idx_x;
arg_idx[1] = arg_idx1 + idx_y;
arg_idx[2] = arg_idx2 + idx_z;
arg0 += idx_x * 1 * 2 + idx_y * 1 * 2 * xdim0_write_kernel +
idx_z * 1 * 2 * xdim0_write_kernel * ydim0_write_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_write_kernel +
idx_z * 1 * 1 * xdim1_write_kernel * ydim1_write_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_write_kernel +
idx_z * 1 * 1 * xdim2_write_kernel * ydim2_write_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
write_kernel_gpu(arg0, arg1, arg2, arg_idx);
}
}
// host stub function
void ops_par_loop_write_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 0))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(0, "write_kernel");
OPS_kernels[0].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int arg_idx[3];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0] + start[0];
arg_idx[1] = sb->decomp_disp[1] + start[1];
arg_idx[2] = sb->decomp_disp[2] + start[2];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
arg_idx[2] = start[2];
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
if (xdim0 != xdim0_write_kernel_h || ydim0 != ydim0_write_kernel_h ||
xdim1 != xdim1_write_kernel_h || ydim1 != ydim1_write_kernel_h ||
xdim2 != xdim2_write_kernel_h || ydim2 != ydim2_write_kernel_h) {
hipMemcpyToSymbol(xdim0_write_kernel, &xdim0, sizeof(int));
xdim0_write_kernel_h = xdim0;
hipMemcpyToSymbol(ydim0_write_kernel, &ydim0, sizeof(int));
ydim0_write_kernel_h = ydim0;
hipMemcpyToSymbol(xdim1_write_kernel, &xdim1, sizeof(int));
xdim1_write_kernel_h = xdim1;
hipMemcpyToSymbol(ydim1_write_kernel, &ydim1, sizeof(int));
ydim1_write_kernel_h = ydim1;
hipMemcpyToSymbol(xdim2_write_kernel, &xdim2, sizeof(int));
xdim2_write_kernel_h = xdim2;
hipMemcpyToSymbol(ydim2_write_kernel, &ydim2, sizeof(int));
ydim2_write_kernel_h = ydim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
hipLaunchKernelGGL(( ops_write_kernel), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1],
(int *)p_a[2], arg_idx[0], arg_idx[1],
arg_idx[2], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[0].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
|
b959811af616e7f1447fa05e254abf9697966cb9.cu
|
//
// auto-generated by ops.py
//
__constant__ int xdim0_write_kernel;
int xdim0_write_kernel_h = -1;
__constant__ int ydim0_write_kernel;
int ydim0_write_kernel_h = -1;
__constant__ int xdim1_write_kernel;
int xdim1_write_kernel_h = -1;
__constant__ int ydim1_write_kernel;
int ydim1_write_kernel_h = -1;
__constant__ int xdim2_write_kernel;
int xdim2_write_kernel_h = -1;
__constant__ int ydim2_write_kernel;
int ydim2_write_kernel_h = -1;
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC_MD0
#define OPS_ACC1(x, y, z) \
(x + xdim1_write_kernel * (y) + xdim1_write_kernel * ydim1_write_kernel * (z))
#define OPS_ACC2(x, y, z) \
(x + xdim2_write_kernel * (y) + xdim2_write_kernel * ydim2_write_kernel * (z))
#define OPS_ACC_MD0(d, x, y, z) \
((x)*2 + (d) + (xdim0_write_kernel * (y)*2) + \
(xdim0_write_kernel * ydim0_write_kernel * (z)*2))
// user function
__device__
void
write_kernel_gpu(double *mult, double *single, int *digit, const int *idx) {
mult[OPS_ACC_MD0(0, 0, 0, 0)] = 1;
mult[OPS_ACC_MD0(1, 0, 0, 0)] = 2;
single[OPS_ACC1(0, 0, 0)] = 3;
digit[OPS_ACC2(0, 0, 0)] = idx[0] * 100 + idx[1] * 10 + idx[2];
}
#undef OPS_ACC1
#undef OPS_ACC2
#undef OPS_ACC_MD0
__global__ void ops_write_kernel(double *__restrict arg0,
double *__restrict arg1, int *__restrict arg2,
int arg_idx0, int arg_idx1, int arg_idx2,
int size0, int size1, int size2) {
int idx_z = blockDim.z * blockIdx.z + threadIdx.z;
int idx_y = blockDim.y * blockIdx.y + threadIdx.y;
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
int arg_idx[3];
arg_idx[0] = arg_idx0 + idx_x;
arg_idx[1] = arg_idx1 + idx_y;
arg_idx[2] = arg_idx2 + idx_z;
arg0 += idx_x * 1 * 2 + idx_y * 1 * 2 * xdim0_write_kernel +
idx_z * 1 * 2 * xdim0_write_kernel * ydim0_write_kernel;
arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_write_kernel +
idx_z * 1 * 1 * xdim1_write_kernel * ydim1_write_kernel;
arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_write_kernel +
idx_z * 1 * 1 * xdim2_write_kernel * ydim2_write_kernel;
if (idx_x < size0 && idx_y < size1 && idx_z < size2) {
write_kernel_gpu(arg0, arg1, arg2, arg_idx);
}
}
// host stub function
void ops_par_loop_write_kernel(char const *name, ops_block block, int dim,
int *range, ops_arg arg0, ops_arg arg1,
ops_arg arg2, ops_arg arg3) {
// Timing
double t1, t2, c1, c2;
ops_arg args[4] = {arg0, arg1, arg2, arg3};
#ifdef CHECKPOINTING
if (!ops_checkpointing_before(args, 4, range, 0))
return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(0, "write_kernel");
OPS_kernels[0].count++;
ops_timers_core(&c1, &t1);
}
// compute locally allocated range for the sub-block
int start[3];
int end[3];
#ifdef OPS_MPI
sub_block_list sb = OPS_sub_block_list[block->index];
if (!sb->owned)
return;
for (int n = 0; n < 3; n++) {
start[n] = sb->decomp_disp[n];
end[n] = sb->decomp_disp[n] + sb->decomp_size[n];
if (start[n] >= range[2 * n]) {
start[n] = 0;
} else {
start[n] = range[2 * n] - start[n];
}
if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0)
start[n] = range[2 * n];
if (end[n] >= range[2 * n + 1]) {
end[n] = range[2 * n + 1] - sb->decomp_disp[n];
} else {
end[n] = sb->decomp_size[n];
}
if (sb->id_p[n] == MPI_PROC_NULL &&
(range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n]))
end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]);
}
#else
for (int n = 0; n < 3; n++) {
start[n] = range[2 * n];
end[n] = range[2 * n + 1];
}
#endif
int x_size = MAX(0, end[0] - start[0]);
int y_size = MAX(0, end[1] - start[1]);
int z_size = MAX(0, end[2] - start[2]);
int arg_idx[3];
#ifdef OPS_MPI
arg_idx[0] = sb->decomp_disp[0] + start[0];
arg_idx[1] = sb->decomp_disp[1] + start[1];
arg_idx[2] = sb->decomp_disp[2] + start[2];
#else
arg_idx[0] = start[0];
arg_idx[1] = start[1];
arg_idx[2] = start[2];
#endif
int xdim0 = args[0].dat->size[0];
int ydim0 = args[0].dat->size[1];
int xdim1 = args[1].dat->size[0];
int ydim1 = args[1].dat->size[1];
int xdim2 = args[2].dat->size[0];
int ydim2 = args[2].dat->size[1];
if (xdim0 != xdim0_write_kernel_h || ydim0 != ydim0_write_kernel_h ||
xdim1 != xdim1_write_kernel_h || ydim1 != ydim1_write_kernel_h ||
xdim2 != xdim2_write_kernel_h || ydim2 != ydim2_write_kernel_h) {
cudaMemcpyToSymbol(xdim0_write_kernel, &xdim0, sizeof(int));
xdim0_write_kernel_h = xdim0;
cudaMemcpyToSymbol(ydim0_write_kernel, &ydim0, sizeof(int));
ydim0_write_kernel_h = ydim0;
cudaMemcpyToSymbol(xdim1_write_kernel, &xdim1, sizeof(int));
xdim1_write_kernel_h = xdim1;
cudaMemcpyToSymbol(ydim1_write_kernel, &ydim1, sizeof(int));
ydim1_write_kernel_h = ydim1;
cudaMemcpyToSymbol(xdim2_write_kernel, &xdim2, sizeof(int));
xdim2_write_kernel_h = xdim2;
cudaMemcpyToSymbol(ydim2_write_kernel, &ydim2, sizeof(int));
ydim2_write_kernel_h = ydim2;
}
dim3 grid((x_size - 1) / OPS_block_size_x + 1,
(y_size - 1) / OPS_block_size_y + 1, z_size);
dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1);
int dat0 = args[0].dat->elem_size;
int dat1 = args[1].dat->elem_size;
int dat2 = args[2].dat->elem_size;
char *p_a[4];
// set up initial pointers
int d_m[OPS_MAX_DIM];
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[0].dat->d_m[d];
#endif
int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] -
args[0].dat->base[0] - d_m[0]);
base0 = base0 +
dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] -
args[0].dat->base[1] - d_m[1]);
base0 = base0 +
dat0 * args[0].dat->size[0] * args[0].dat->size[1] *
(start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] -
d_m[2]);
p_a[0] = (char *)args[0].data_d + base0;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[1].dat->d_m[d];
#endif
int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] -
args[1].dat->base[0] - d_m[0]);
base1 = base1 +
dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] -
args[1].dat->base[1] - d_m[1]);
base1 = base1 +
dat1 * args[1].dat->size[0] * args[1].dat->size[1] *
(start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] -
d_m[2]);
p_a[1] = (char *)args[1].data_d + base1;
#ifdef OPS_MPI
for (int d = 0; d < dim; d++)
d_m[d] =
args[2].dat->d_m[d] + OPS_sub_dat_list[args[2].dat->index]->d_im[d];
#else
for (int d = 0; d < dim; d++)
d_m[d] = args[2].dat->d_m[d];
#endif
int base2 = dat2 * 1 * (start[0] * args[2].stencil->stride[0] -
args[2].dat->base[0] - d_m[0]);
base2 = base2 +
dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1] -
args[2].dat->base[1] - d_m[1]);
base2 = base2 +
dat2 * args[2].dat->size[0] * args[2].dat->size[1] *
(start[2] * args[2].stencil->stride[2] - args[2].dat->base[2] -
d_m[2]);
p_a[2] = (char *)args[2].data_d + base2;
ops_H_D_exchanges_device(args, 4);
ops_halo_exchanges(args, 4, range);
if (OPS_diags > 1) {
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
}
// call kernel wrapper function, passing in pointers to data
ops_write_kernel<<<grid, tblock>>>((double *)p_a[0], (double *)p_a[1],
(int *)p_a[2], arg_idx[0], arg_idx[1],
arg_idx[2], x_size, y_size, z_size);
if (OPS_diags > 1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1, &t1);
OPS_kernels[0].time += t1 - t2;
}
ops_set_dirtybit_device(args, 4);
ops_set_halo_dirtybit3(&args[0], range);
ops_set_halo_dirtybit3(&args[1], range);
ops_set_halo_dirtybit3(&args[2], range);
if (OPS_diags > 1) {
// Update kernel record
ops_timers_core(&c2, &t2);
OPS_kernels[0].mpi_time += t2 - t1;
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[0].transfer += ops_compute_transfer(dim, start, end, &arg2);
}
}
|
a9ee8adcb63ee926d7efa13503d2c0e1628481c2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
@author Ichitaro Yamazaki
*/
#include "common_magma.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl_2x2_full(int m, const magmaDoubleComplex* W, int ldw, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( 1, 0 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( 1, 1 ), D21 );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( 0, 0 ), MAGMA_Z_CNJG( D21 ) );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_Z_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl_2x2_lower(int m, const magmaDoubleComplex* W, int ldw, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( 1, 0 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( 1, 1 ), D21 );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( 0, 0 ), MAGMA_Z_CNJG( D21 ) );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_Z_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl_2x2_upper(int m, const magmaDoubleComplex *W, int ldw, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( m, 1 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( m+1, 1 ), MAGMA_Z_CNJG( D21 ) );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( m, 0 ), D21 );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_Z_CNJG( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/**
Purpose
-------
ZLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
\param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl_2x2_q(
magma_type_t type, magma_int_t m,
const magmaDoubleComplex *dW, magma_int_t lddw,
magmaDoubleComplex *dA, magma_int_t ldda,
magma_int_t *info, magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
hipLaunchKernelGGL(( zlascl_2x2_lower) , dim3(grid), dim3(threads), 0, queue , m, dW, lddw, dA, ldda);
}
else {
hipLaunchKernelGGL(( zlascl_2x2_upper) , dim3(grid), dim3(threads), 0, queue , m, dW, lddw, dA, ldda);
}
}
/**
@see magmablas_zlascl2_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl_2x2(
magma_type_t type, magma_int_t m,
magmaDoubleComplex *dW, magma_int_t lddw,
magmaDoubleComplex *dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_zlascl_2x2_q( type, m, dW, lddw, dA, ldda, info, magma_stream );
}
|
a9ee8adcb63ee926d7efa13503d2c0e1628481c2.cu
|
/*
-- MAGMA (version 1.6.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date November 2014
@precisions normal z -> s d c
@author Ichitaro Yamazaki
*/
#include "common_magma.h"
#define NB 64
#define A(i,j) (A[(i) + (j)*lda])
#define W(i,j) (W[(i) + (j)*ldw])
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right.
__global__ void
zlascl_2x2_full(int m, const magmaDoubleComplex* W, int ldw, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( 1, 0 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( 1, 1 ), D21 );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( 0, 0 ), MAGMA_Z_CNJG( D21 ) );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_Z_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from left edge and moving right to diagonal.
__global__ void
zlascl_2x2_lower(int m, const magmaDoubleComplex* W, int ldw, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( 1, 0 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( 1, 1 ), D21 );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( 0, 0 ), MAGMA_Z_CNJG( D21 ) );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = MAGMA_Z_CNJG( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) );
A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) );
}
}
// each thread block does one NB x n block row of A.
// each thread does one row, starting from right edge and moving left to diagonal.
__global__ void
zlascl_2x2_upper(int m, const magmaDoubleComplex *W, int ldw, magmaDoubleComplex* A, int lda)
{
int ind = blockIdx.x * NB + threadIdx.x;
magmaDoubleComplex D21 = W( m, 1 );
magmaDoubleComplex D11 = MAGMA_Z_DIV( W( m+1, 1 ), MAGMA_Z_CNJG( D21 ) );
magmaDoubleComplex D22 = MAGMA_Z_DIV( W( m, 0 ), D21 );
double T = 1.0 / ( MAGMA_Z_REAL( D11*D22 ) - 1.0 );
D21 = MAGMA_Z_DIV( MAGMA_Z_MAKE(T,0.0), D21 );
if (ind < m) {
A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) );
A( ind, 1 ) = MAGMA_Z_CNJG( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) );
}
}
/**
Purpose
-------
ZLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD.
TYPE specifies that A may be full, upper triangular, lower triangular.
Arguments
---------
\param[in]
type magma_type_t
TYPE indices the storage type of the input matrix A.
= MagmaFull: full matrix.
= MagmaLower: lower triangular matrix.
= MagmaUpper: upper triangular matrix.
Other formats that LAPACK supports, MAGMA does not currently support.
\param[in]
m INTEGER
The number of rows of the matrix A. M >= 0.
\param[in]
n INTEGER
The number of columns of the matrix A. N >= 0.
\param[in]
dD DOUBLE PRECISION vector, dimension (M)
The diagonal matrix containing the scalar factors. Stored as a vector.
\param[in,out]
dA COMPLEX*16 array, dimension (LDDA,N)
The matrix to be scaled by dD. See TYPE for the
storage type.
\param[in]
ldda INTEGER
The leading dimension of the array A. LDDA >= max(1,M).
\param[out]
info INTEGER
- = 0: successful exit
- < 0: if INFO = -i, the i-th argument had an illegal value.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl_2x2_q(
magma_type_t type, magma_int_t m,
const magmaDoubleComplex *dW, magma_int_t lddw,
magmaDoubleComplex *dA, magma_int_t ldda,
magma_int_t *info, magma_queue_t queue )
{
*info = 0;
if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull )
*info = -1;
else if ( m < 0 )
*info = -2;
else if ( ldda < max(1,m) )
*info = -4;
if (*info != 0) {
magma_xerbla( __func__, -(*info) );
return; //info;
}
dim3 grid( (m + NB - 1)/NB );
dim3 threads( NB );
if (type == MagmaLower) {
zlascl_2x2_lower <<< grid, threads, 0, queue >>> (m, dW, lddw, dA, ldda);
}
else {
zlascl_2x2_upper <<< grid, threads, 0, queue >>> (m, dW, lddw, dA, ldda);
}
}
/**
@see magmablas_zlascl2_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlascl_2x2(
magma_type_t type, magma_int_t m,
magmaDoubleComplex *dW, magma_int_t lddw,
magmaDoubleComplex *dA, magma_int_t ldda,
magma_int_t *info )
{
magmablas_zlascl_2x2_q( type, m, dW, lddw, dA, ldda, info, magma_stream );
}
|
e30a27210950a983c8f002bbd43897d58afd6207.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "softmax_activationnloss.h"
using namespace global;
namespace layer {
SoftmaxAnL::SoftmaxAnL(Layer* _prev, float* _label, int _class_num, int _batch) : Layer()
{
prev = _prev;
prev->next = this;
label = _label; // gpu data
class_num = _class_num;
batch = _batch;
data_size = batch; //
param_size = 0;
param_bias_size = 0;
callCudnn(cudnnCreateTensorDescriptor(&t_data));
callCudnn(cudnnSetTensor4dDescriptor(
t_data,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batch,
class_num,
1,
1));
callCuda(hipMalloc(&tmp_data, sizeof(float) * prev->data_size)); // softmaxa
callCuda(hipMalloc(&data, sizeof(float) * 1)); // Loss
callCuda(hipMalloc(&diff, sizeof(float) * prev->data_size)); // diff
callCuda(hipMalloc(&predict_label, sizeof(float) * data_size)); // predict
}
SoftmaxAnL::~SoftmaxAnL()
{
callCudnn(cudnnDestroyTensorDescriptor(t_data));
callCuda(hipFree(tmp_data));
callCuda(hipFree(data));
callCuda(hipFree(diff));
callCuda(hipFree(predict_label));
}
__global__ void corssEntropyLoss(float *softmax_output_a, float *label, int class_num, int batch, float *predict_label, float *loss)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < batch)
{
int label_value = 0;
float max = -1;
for (int i = 0; i < class_num; i++) {
if (softmax_output_a[idx * class_num + i] > max) {
max = softmax_output_a[idx * class_num + i];
label_value = i;
}
}
predict_label[idx] = (float)label_value;
atomicAdd(loss, -log(softmax_output_a[idx * class_num + (int)label[idx]]));
}
}
// softmaxdiff = f(zl)-1softmaxadifflabel1
__global__ void softmaxDiff(const float *label, int class_num, int batch, float *diff)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < batch)
{
const int label_value = static_cast<int>(label[idx]);
diff[idx * class_num + label_value] -= 1.0f; // zi
}
}
void SoftmaxAnL::forward(bool train)
{
float a = 1;
float b = 0;
callCudnn(cudnnSoftmaxForward(
cudnnHandle,
CUDNN_SOFTMAX_FAST,
CUDNN_SOFTMAX_MODE_CHANNEL,
&a,
t_data,
prev->data,
&b,
t_data,
tmp_data));
net_utils::setGpuValue(data, 1, 0); // loss = 0
hipLaunchKernelGGL(( corssEntropyLoss) , dim3((batch + 127) / 128), dim3(128) , 0, 0, tmp_data, label, class_num, batch, predict_label, data);
}
void SoftmaxAnL::backward()
{
callCuda(hipMemcpy(diff, tmp_data, sizeof(float) * prev->data_size, hipMemcpyDeviceToDevice));
hipLaunchKernelGGL(( softmaxDiff) , dim3((batch + 127) / 128), dim3(128) , 0, 0, label, class_num, batch, diff);
}
void SoftmaxAnL::update()
{
//# .::::.
//# .::::::::.
//# :::::::::::
//# ..:::::::::::'
//# '::::::::::::'
//# .::::::::::
//# '::::::::::::::..
//# ..::::::::::::.
//# ``::::::::::::::::
//# ::::``:::::::::' .:::.
//# ::::' ':::::' .::::::::.
//# .::::' :::: .:::::::'::::.
//# .:::' ::::: .:::::::::' ':::::.
//# .::' :::::.:::::::::' ':::::.
//# .::' ::::::::::::::' ``::::.
//# ...::: ::::::::::::' ``::.
//# ```` ':. ':::::::::' ::::..
//# '.:::::' ':'````..
//# BUG
}
}
|
e30a27210950a983c8f002bbd43897d58afd6207.cu
|
#include "softmax_activationnloss.h"
using namespace global;
namespace layer {
SoftmaxAnL::SoftmaxAnL(Layer* _prev, float* _label, int _class_num, int _batch) : Layer()
{
prev = _prev;
prev->next = this;
label = _label; // gpu data
class_num = _class_num;
batch = _batch;
data_size = batch; // 输出大小
param_size = 0;
param_bias_size = 0;
callCudnn(cudnnCreateTensorDescriptor(&t_data));
callCudnn(cudnnSetTensor4dDescriptor(
t_data,
CUDNN_TENSOR_NCHW,
CUDNN_DATA_FLOAT,
batch,
class_num,
1,
1));
callCuda(cudaMalloc(&tmp_data, sizeof(float) * prev->data_size)); // 存放softmax输出的概率值a
callCuda(cudaMalloc(&data, sizeof(float) * 1)); // Loss
callCuda(cudaMalloc(&diff, sizeof(float) * prev->data_size)); // diff
callCuda(cudaMalloc(&predict_label, sizeof(float) * data_size)); // 此处用于存放 predict
}
SoftmaxAnL::~SoftmaxAnL()
{
callCudnn(cudnnDestroyTensorDescriptor(t_data));
callCuda(cudaFree(tmp_data));
callCuda(cudaFree(data));
callCuda(cudaFree(diff));
callCuda(cudaFree(predict_label));
}
__global__ void corssEntropyLoss(float *softmax_output_a, float *label, int class_num, int batch, float *predict_label, float *loss)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < batch)
{
int label_value = 0;
float max = -1;
for (int i = 0; i < class_num; i++) {
if (softmax_output_a[idx * class_num + i] > max) {
max = softmax_output_a[idx * class_num + i];
label_value = i;
}
}
predict_label[idx] = (float)label_value;
atomicAdd(loss, -log(softmax_output_a[idx * class_num + (int)label[idx]]));
}
}
// 计算交叉熵损失对softmax输入数据(未归一化)的导数:diff = f(zl)-1,因此需要先将softmax的输出概率值(a)赋给diff,再在label位置减1
__global__ void softmaxDiff(const float *label, int class_num, int batch, float *diff)
{
int idx = blockIdx.x * blockDim.x + threadIdx.x;
if (idx < batch)
{
const int label_value = static_cast<int>(label[idx]);
diff[idx * class_num + label_value] -= 1.0f; // 对zi求导
}
}
void SoftmaxAnL::forward(bool train)
{
float a = 1;
float b = 0;
callCudnn(cudnnSoftmaxForward(
cudnnHandle,
CUDNN_SOFTMAX_FAST,
CUDNN_SOFTMAX_MODE_CHANNEL,
&a,
t_data,
prev->data,
&b,
t_data,
tmp_data));
net_utils::setGpuValue(data, 1, 0); // loss = 0
corssEntropyLoss <<< (batch + 127) / 128, 128 >>> (tmp_data, label, class_num, batch, predict_label, data);
}
void SoftmaxAnL::backward()
{
callCuda(cudaMemcpy(diff, tmp_data, sizeof(float) * prev->data_size, cudaMemcpyDeviceToDevice));
softmaxDiff <<< (batch + 127) / 128, 128 >>> (label, class_num, batch, diff);
}
void SoftmaxAnL::update()
{
//# .::::.
//# .::::::::.
//# :::::::::::
//# ..:::::::::::'
//# '::::::::::::'
//# .::::::::::
//# '::::::::::::::..
//# ..::::::::::::.
//# ``::::::::::::::::
//# ::::``:::::::::' .:::.
//# ::::' ':::::' .::::::::.
//# .::::' :::: .:::::::'::::.
//# .:::' ::::: .:::::::::' ':::::.
//# .::' :::::.:::::::::' ':::::.
//# .::' ::::::::::::::' ``::::.
//# ...::: ::::::::::::' ``::.
//# ```` ':. ':::::::::' ::::..
//# '.:::::' ':'````..
//# 美女保佑 永无BUG
}
}
|
9bee43ad0e54a5328da653fe408dabc990a703be.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cudaCompress/util/DWT.h>
#include <assert.h>
#include <cudaCompress/cudaUtil.h>
#include "DWTFloatKernels.cui"
#include "DWTFloatFromSymbolsKernels.cui"
#include "DWTFloat2DLowpassKernels.cui"
namespace cudaCompress {
namespace util {
template<typename TIn, int channelCountIn>
static void dwtFloatForwardLowpassOnly2D(
float* dpDest, float* dpBuffer, const TIn* dpSource,
int sizeX, int sizeY,
int dstRowPitch, int srcRowPitch,
hipStream_t stream)
{
const int xBlockSizeX = 32;
const int xBlockSizeY = 4;
const dim3 xBlockSize(xBlockSizeX, xBlockSizeY);
const int xResultBlockCount = 8;
const int yBlockSizeX = 32;
const int yBlockSizeY = 4;
const dim3 yBlockSize(yBlockSizeX, yBlockSizeY);
const int yResultBlockCount = 8;
dim3 xBlockCount(sizeX / (xResultBlockCount * xBlockSizeX), (sizeY + xBlockSizeY - 1) / xBlockSizeY);
if(xBlockCount.x > 0) {
hipLaunchKernelGGL(( forwardDWT9XLowpassKernel2D
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY, xResultBlockCount>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch);
cudaCheckMsg("forwardDWT9XLowpassKernel2D execution failed");
}
int sizeXdone = xBlockCount.x * xResultBlockCount * xBlockSizeX;
int sizeXrest = sizeX - sizeXdone;
if(sizeXrest > 0) {
dim3 xBlockCountRest(1, xBlockCount.y);
int xResultBlockCountRest = (sizeXrest + xBlockSizeX - 1) / xBlockSizeX;
uint sharedSize = (xBlockSizeY * (xResultBlockCountRest * xBlockSizeX + (FILTER_LENGTH-1))) * sizeof(float);
hipLaunchKernelGGL(( forwardDWT9XLowpassRestKernel2D
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY>)
, dim3(xBlockCountRest), dim3(xBlockSize), sharedSize, stream,
dpBuffer, dpSource, sizeXdone, sizeX, sizeY, xResultBlockCountRest, dstRowPitch, srcRowPitch);
cudaCheckMsg("forwardDWT9XLowpassRestKernel2D execution failed");
}
dim3 yBlockCount((sizeX/2 + yBlockSizeX - 1) / yBlockSizeX, sizeY / (yResultBlockCount * yBlockSizeY));
if(yBlockCount.y > 0) {
hipLaunchKernelGGL(( forwardDWT9YLowpassKernel2D
<yBlockSizeX, yBlockSizeY, yResultBlockCount>)
, dim3(yBlockCount), dim3(yBlockSize), 0, stream,
dpDest, dpBuffer, sizeX/2, sizeY, dstRowPitch);
cudaCheckMsg("forwardDWT9YLowpassKernel2D execution failed");
}
int sizeYdone = yBlockCount.y * yResultBlockCount * yBlockSizeY;
int sizeYrest = sizeY - sizeYdone;
if(sizeYrest > 0) {
dim3 yBlockCountRest(yBlockCount.x, 1);
int yResultBlockCountRest = (sizeYrest + yBlockSizeY - 1) / yBlockSizeY;
uint sharedSize = (yBlockSizeX * (yResultBlockCountRest * yBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
hipLaunchKernelGGL(( forwardDWT9YLowpassRestKernel2D
<yBlockSizeX, yBlockSizeY>)
, dim3(yBlockCountRest), dim3(yBlockSize), sharedSize, stream,
dpDest, dpBuffer, sizeYdone, sizeX/2, sizeY, yResultBlockCountRest, dstRowPitch);
cudaCheckMsg("forwardDWT9YLowpassRestKernel2D execution failed");
}
}
template<typename TIn, int channelCountIn>
static void dwtFloatForward(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const TIn* dpSource,
int sizeX, int sizeY, int sizeZ,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
const int xBlockSizeX = 32;
const int xBlockSizeY = 4;
const dim3 xBlockSize(xBlockSizeX, xBlockSizeY);
const int xResultBlockCount = 8;
const int xResultBlockCount2 = 4;
const int xResultBlockCount3 = 2;
const int yBlockSizeX = 32;
const int yBlockSizeY = 4;
const dim3 yBlockSize(yBlockSizeX, yBlockSizeY);
const int yResultBlockCount = 8;
const int zBlockSizeX = 32;
const int zBlockSizeY = 4;
const dim3 zBlockSize(zBlockSizeX, zBlockSizeY);
const int zResultBlockCount = 8;
bool do3D = (sizeZ > 1);
dim3 xBlockCount(sizeX / (xResultBlockCount * xBlockSizeX), (sizeY + xBlockSizeY - 1) / xBlockSizeY, sizeZ);
if(xBlockCount.x > 0) {
hipLaunchKernelGGL(( forwardDWT9XKernel
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY, xResultBlockCount>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpBuffer1, dpSource, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("forwardDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount2 * xBlockSizeX)) {
// special case for sizeX == 128
xBlockCount.x = 1;
hipLaunchKernelGGL(( forwardDWT9XKernel
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY, xResultBlockCount2>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpBuffer1, dpSource, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("forwardDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount3 * xBlockSizeX)) {
// special case for sizeX == 64
xBlockCount.x = 1;
hipLaunchKernelGGL(( forwardDWT9XKernel
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY, xResultBlockCount3>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpBuffer1, dpSource, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("forwardDWT9XKernel execution failed");
}
int sizeXdone = xBlockCount.x * xResultBlockCount * xBlockSizeX;
int sizeXrest = sizeX - sizeXdone;
if(sizeXrest > 0) {
dim3 xBlockCountRest(1, xBlockCount.y, xBlockCount.z);
int xResultBlockCountRest = (sizeXrest + xBlockSizeX - 1) / xBlockSizeX;
uint sharedSize = (xBlockSizeY * (xResultBlockCountRest * xBlockSizeX + (FILTER_LENGTH-1))) * sizeof(float);
hipLaunchKernelGGL(( forwardDWT9XRestKernel
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY>)
, dim3(xBlockCountRest), dim3(xBlockSize), sharedSize, stream,
dpBuffer1, dpSource, sizeXdone, sizeX, sizeY, xResultBlockCountRest, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("forwardDWT9XRestKernel execution failed");
}
float* dpDestY = (do3D ? dpBuffer2 : dpDest);
dim3 yBlockCount((sizeX + yBlockSizeX - 1) / yBlockSizeX, sizeY / (yResultBlockCount * yBlockSizeY), sizeZ);
if(yBlockCount.y > 0) {
hipLaunchKernelGGL(( forwardDWT9YKernel
<yBlockSizeX, yBlockSizeY, yResultBlockCount>)
, dim3(yBlockCount), dim3(yBlockSize), 0, stream,
dpDestY, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch);
cudaCheckMsg("forwardDWT9YKernel execution failed");
}
int sizeYdone = yBlockCount.y * yResultBlockCount * yBlockSizeY;
int sizeYrest = sizeY - sizeYdone;
if(sizeYrest > 0) {
dim3 yBlockCountRest(yBlockCount.x, 1, yBlockCount.z);
int yResultBlockCountRest = (sizeYrest + yBlockSizeY - 1) / yBlockSizeY;
uint sharedSize = (yBlockSizeX * (yResultBlockCountRest * yBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
hipLaunchKernelGGL(( forwardDWT9YRestKernel
<yBlockSizeX, yBlockSizeY>)
, dim3(yBlockCountRest), dim3(yBlockSize), sharedSize, stream,
dpDestY, dpBuffer1, sizeYdone, sizeX, sizeY, yResultBlockCountRest, dstRowPitch, dstSlicePitch);
cudaCheckMsg("forwardDWT9YRestKernel execution failed");
}
if(do3D) {
dim3 zBlockCount((sizeX + zBlockSizeX - 1) / zBlockSizeX, sizeY, sizeZ / (zResultBlockCount * zBlockSizeY));
if(zBlockCount.z > 0) {
hipLaunchKernelGGL(( forwardDWT9ZKernel
<zBlockSizeX, zBlockSizeY, zResultBlockCount>)
, dim3(zBlockCount), dim3(zBlockSize), 0, stream,
dpDest, dpBuffer2, sizeX, sizeZ, dstRowPitch, dstSlicePitch);
cudaCheckMsg("forwardDWT9ZKernel execution failed");
}
int sizeZdone = zBlockCount.z * zResultBlockCount * zBlockSizeY;
int sizeZrest = sizeZ - sizeZdone;
if(sizeZrest > 0) {
dim3 zBlockCountRest(zBlockCount.x, zBlockCount.y, 1);
int zResultBlockCountRest = (sizeZrest + zBlockSizeY - 1) / zBlockSizeY;
uint sharedSize = (zBlockSizeX * (zResultBlockCountRest * zBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
hipLaunchKernelGGL(( forwardDWT9ZRestKernel
<zBlockSizeX, zBlockSizeY>)
, dim3(zBlockCountRest), dim3(zBlockSize), sharedSize, stream,
dpDest, dpBuffer2, sizeZdone, sizeX, sizeZ, zResultBlockCountRest, dstRowPitch, dstSlicePitch);
cudaCheckMsg("forwardDWT9ZRestKernel execution failed");
}
}
}
template<typename TOut, int channelCountOut>
static void dwtFloatInverse(
TOut* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
const int xBlockSizeX = 32;
const int xBlockSizeY = 4;
const dim3 xBlockSize(xBlockSizeX, xBlockSizeY);
const int xResultBlockCount = 8;
const int xResultBlockCount2 = 4;
const int xResultBlockCount3 = 2;
const int yBlockSizeX = 32;
const int yBlockSizeY = 4;
const dim3 yBlockSize(yBlockSizeX, yBlockSizeY);
const int yResultBlockCount = 8;
const int zBlockSizeX = 32;
const int zBlockSizeY = 4;
const dim3 zBlockSize(zBlockSizeX, zBlockSizeY);
const int zResultBlockCount = 8;
bool do3D = (sizeZ > 1);
if(do3D) {
dim3 zBlockCount(((sizeX + zBlockSizeX - 1) / zBlockSizeX), sizeY, sizeZ / (zResultBlockCount * zBlockSizeY));
if(zBlockCount.z > 0) {
hipLaunchKernelGGL(( inverseDWT9ZKernel
<zBlockSizeX, zBlockSizeY, zResultBlockCount>)
, dim3(zBlockCount), dim3(zBlockSize), 0, stream,
dpBuffer2, dpSource, sizeX, sizeZ, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9ZKernel execution failed");
}
int sizeZdone = zBlockCount.z * zResultBlockCount * zBlockSizeY;
int sizeZrest = sizeZ - sizeZdone;
if(sizeZrest > 0) {
dim3 zBlockCountRest(zBlockCount.x, zBlockCount.y, 1);
int zResultBlockCountRest = (sizeZrest + zBlockSizeY - 1) / zBlockSizeY;
uint sharedSize = (zBlockSizeX * (zResultBlockCountRest * zBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
hipLaunchKernelGGL(( inverseDWT9ZRestKernel
<zBlockSizeX, zBlockSizeY>)
, dim3(zBlockCountRest), dim3(zBlockSize), sharedSize, stream,
dpBuffer2, dpSource, sizeZdone, sizeX, sizeZ, zResultBlockCountRest, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9ZRestKernel execution failed");
}
}
const float* dpSourceY = (do3D ? dpBuffer2 : dpSource);
dim3 yBlockCount((sizeX + yBlockSizeX - 1) / yBlockSizeX, sizeY / (yResultBlockCount * yBlockSizeY), sizeZ);
if(yBlockCount.y > 0) {
hipLaunchKernelGGL(( inverseDWT9YKernel
<yBlockSizeX, yBlockSizeY, yResultBlockCount>)
, dim3(yBlockCount), dim3(yBlockSize), 0, stream,
dpBuffer1, dpSourceY, sizeX, sizeY, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9YKernel execution failed");
}
int sizeYdone = yBlockCount.y * yResultBlockCount * yBlockSizeY;
int sizeYrest = sizeY - sizeYdone;
if(sizeYrest > 0) {
dim3 yBlockCountRest(yBlockCount.x, 1, yBlockCount.z);
int yResultBlockCountRest = (sizeYrest + yBlockSizeY - 1) / yBlockSizeY;
uint sharedSize = (yBlockSizeX * (yResultBlockCountRest * yBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
hipLaunchKernelGGL(( inverseDWT9YRestKernel
<yBlockSizeX, yBlockSizeY>)
, dim3(yBlockCountRest), dim3(yBlockSize), sharedSize, stream,
dpBuffer1, dpSourceY, sizeYdone, sizeX, sizeY, yResultBlockCountRest, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9YRestKernel execution failed");
}
dim3 xBlockCount(sizeX / (xResultBlockCount * xBlockSizeX), ((sizeY + xBlockSizeY - 1) / xBlockSizeY), sizeZ);
if(xBlockCount.x > 0) {
hipLaunchKernelGGL(( inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount2 * xBlockSizeX)) {
// special case for sizeX == 128
xBlockCount.x = 1;
hipLaunchKernelGGL(( inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount2>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount3 * xBlockSizeX)) {
// special case for sizeX == 64
xBlockCount.x = 1;
hipLaunchKernelGGL(( inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount3>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
}
int sizeXdone = xBlockCount.x * xResultBlockCount * xBlockSizeX;
int sizeXrest = sizeX - sizeXdone;
if(sizeXrest > 0) {
dim3 xBlockCountRest(1, xBlockCount.y, xBlockCount.z);
int xResultBlockCountRest = (sizeXrest + xBlockSizeX - 1) / xBlockSizeX;
uint sharedSize = (xBlockSizeY * (xResultBlockCountRest * xBlockSizeX + (FILTER_LENGTH-1))) * sizeof(float);
hipLaunchKernelGGL(( inverseDWT9XRestKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY>)
, dim3(xBlockCountRest), dim3(xBlockSize), sharedSize, stream,
dpDest, dpBuffer1, sizeXdone, sizeX, sizeY, xResultBlockCountRest, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9XRestKernel execution failed");
}
}
template<typename TOut, typename THigh, int channelCountOut>
static void dwtFloatInverseFromSymbols(
TOut* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpLowpass,
const THigh*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
hipStream_t stream)
{
const int xBlockSizeX = 32;
const int xBlockSizeY = 4;
const dim3 xBlockSize(xBlockSizeX, xBlockSizeY);
const int xResultBlockCount = 8;
const int xResultBlockCount2 = 4;
const int xResultBlockCount3 = 2;
const int yBlockSizeX = 32;
const int yBlockSizeY = 4;
const dim3 yBlockSize(yBlockSizeX, yBlockSizeY);
const int yResultBlockCount = 8;
const int zBlockSizeX = 32;
const int zBlockSizeY = 4;
const dim3 zBlockSize(zBlockSizeX, zBlockSizeY);
const int zResultBlockCount = 8;
bool do3D = (sizeZ > 1);
int bufferRowPitch = sizeX;
int bufferSlicePitch = bufferRowPitch * sizeY;
if(do3D) {
dim3 zBlockCount(((sizeX + zBlockSizeX - 1) / zBlockSizeX), sizeY, sizeZ / (zResultBlockCount * zBlockSizeY));
if(zBlockCount.z > 0) {
hipLaunchKernelGGL(( inverseDWT9ZFromSymbolsKernel
<THigh, zBlockSizeX, zBlockSizeY, zResultBlockCount>)
, dim3(zBlockCount), dim3(zBlockSize), 0, stream,
dpBuffer2, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, bufferRowPitch, bufferSlicePitch, lowpassRowPitch, lowpassSlicePitch);
cudaCheckMsg("inverseDWT9ZFromSymbolsKernel execution failed");
}
int sizeZdone = zBlockCount.z * zResultBlockCount * zBlockSizeY;
int sizeZrest = sizeZ - sizeZdone;
if(sizeZrest > 0) {
dim3 zBlockCountRest(zBlockCount.x, zBlockCount.y, 1);
int zResultBlockCountRest = (sizeZrest + zBlockSizeY - 1) / zBlockSizeY;
uint sharedSize = (zBlockSizeX * (zResultBlockCountRest * zBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
hipLaunchKernelGGL(( inverseDWT9ZFromSymbolsRestKernel
<THigh, zBlockSizeX, zBlockSizeY>)
, dim3(zBlockCountRest), dim3(zBlockSize), sharedSize, stream,
dpBuffer2, dpLowpass, dppHighpass, quantStep, sizeZdone, sizeX, sizeY, sizeZ, zResultBlockCountRest, bufferRowPitch, bufferSlicePitch, lowpassRowPitch, lowpassSlicePitch);
cudaCheckMsg("inverseDWT9ZFromSymbolsRestKernel execution failed");
}
}
dim3 yBlockCount((sizeX + yBlockSizeX - 1) / yBlockSizeX, sizeY / (yResultBlockCount * yBlockSizeY), sizeZ);
if(yBlockCount.y > 0) {
if(do3D) {
hipLaunchKernelGGL(( inverseDWT9YKernel
<yBlockSizeX, yBlockSizeY, yResultBlockCount>)
, dim3(yBlockCount), dim3(yBlockSize), 0, stream,
dpBuffer1, dpBuffer2, sizeX, sizeY, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9YKernel execution failed");
} else {
hipLaunchKernelGGL(( inverseDWT9YFromSymbolsKernel
<THigh, yBlockSizeX, yBlockSizeY, yResultBlockCount>)
, dim3(yBlockCount), dim3(yBlockSize), 0, stream,
dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, bufferRowPitch, lowpassRowPitch);
cudaCheckMsg("inverseDWT9YFromSymbolsKernel execution failed");
}
}
int sizeYdone = yBlockCount.y * yResultBlockCount * yBlockSizeY;
int sizeYrest = sizeY - sizeYdone;
if(sizeYrest > 0) {
dim3 yBlockCountRest(yBlockCount.x, 1, yBlockCount.z);
int yResultBlockCountRest = (sizeYrest + yBlockSizeY - 1) / yBlockSizeY;
uint sharedSize = (yBlockSizeX * (yResultBlockCountRest * yBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
if(do3D) {
hipLaunchKernelGGL(( inverseDWT9YRestKernel
<yBlockSizeX, yBlockSizeY>)
, dim3(yBlockCountRest), dim3(yBlockSize), sharedSize, stream,
dpBuffer1, dpBuffer2, sizeYdone, sizeX, sizeY, yResultBlockCountRest, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9YRestKernel execution failed");
} else {
hipLaunchKernelGGL(( inverseDWT9YFromSymbolsRestKernel
<THigh, yBlockSizeX, yBlockSizeY>)
, dim3(yBlockCountRest), dim3(yBlockSize), sharedSize, stream,
dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeYdone, sizeX, sizeY, yResultBlockCountRest, bufferRowPitch, lowpassRowPitch);
cudaCheckMsg("inverseDWT9YFromSymbolsRestKernel execution failed");
}
}
dim3 xBlockCount(sizeX / (xResultBlockCount * xBlockSizeX), ((sizeY + xBlockSizeY - 1) / xBlockSizeY), sizeZ);
if(xBlockCount.x > 0) {
hipLaunchKernelGGL(( inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount2 * xBlockSizeX)) {
// special case for sizeX == 128
xBlockCount.x = 1;
hipLaunchKernelGGL(( inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount2>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount3 * xBlockSizeX)) {
// special case for sizeX == 64
xBlockCount.x = 1;
hipLaunchKernelGGL(( inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount3>)
, dim3(xBlockCount), dim3(xBlockSize), 0, stream,
dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
}
int sizeXdone = xBlockCount.x * xResultBlockCount * xBlockSizeX;
int sizeXrest = sizeX - sizeXdone;
if(sizeXrest > 0) {
dim3 xBlockCountRest(1, xBlockCount.y, xBlockCount.z);
int xResultBlockCountRest = (sizeXrest + xBlockSizeX - 1) / xBlockSizeX;
uint sharedSize = (xBlockSizeY * (xResultBlockCountRest * xBlockSizeX + (FILTER_LENGTH-1))) * sizeof(float);
hipLaunchKernelGGL(( inverseDWT9XRestKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY>)
, dim3(xBlockCountRest), dim3(xBlockSize), sharedSize, stream,
dpDest, dpBuffer1, sizeXdone, sizeX, sizeY, xResultBlockCountRest, dstRowPitch, dstSlicePitch, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9XRestKernel execution failed");
}
}
template<typename TIn>
static void dwtFloatForward(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const TIn* dpSource,
int sizeX, int sizeY, int sizeZ, int srcChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
if(dstRowPitch <= 0) dstRowPitch = sizeX;
if(dstSlicePitch <= 0) dstSlicePitch = sizeY * dstRowPitch;
if(srcRowPitch <= 0) srcRowPitch = sizeX * srcChannelCount;
if(srcSlicePitch <= 0) srcSlicePitch = sizeY * srcRowPitch;
switch(srcChannelCount) {
case 1: dwtFloatForward<TIn, 1>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 2: dwtFloatForward<TIn, 2>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 3: dwtFloatForward<TIn, 3>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 4: dwtFloatForward<TIn, 4>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
default: assert(false);
}
}
template<typename TOut>
static void dwtFloatInverse(
TOut* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
if(dstRowPitch <= 0) dstRowPitch = sizeX * dstChannelCount;
if(dstSlicePitch <= 0) dstSlicePitch = sizeY * dstRowPitch;
if(srcRowPitch <= 0) srcRowPitch = sizeX;
if(srcSlicePitch <= 0) srcSlicePitch = sizeY * srcRowPitch;
switch(dstChannelCount) {
case 1: dwtFloatInverse<TOut, 1>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 2: dwtFloatInverse<TOut, 2>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 3: dwtFloatInverse<TOut, 3>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 4: dwtFloatInverse<TOut, 4>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
default: assert(false);
}
}
template<typename TOut, typename THigh>
void dwtFloatInverseFromSymbols(
TOut* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpLowpass, const THigh*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
hipStream_t stream)
{
if(dstRowPitch <= 0) dstRowPitch = sizeX * dstChannelCount;
if(dstSlicePitch <= 0) dstSlicePitch = sizeY * dstRowPitch;
if(lowpassRowPitch <= 0) lowpassRowPitch = sizeX;
if(lowpassSlicePitch <= 0) lowpassSlicePitch = sizeY * lowpassRowPitch;
switch(dstChannelCount) {
case 1: dwtFloatInverseFromSymbols<TOut, THigh, 1>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream); break;
case 2: dwtFloatInverseFromSymbols<TOut, THigh, 2>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream); break;
case 3: dwtFloatInverseFromSymbols<TOut, THigh, 3>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream); break;
case 4: dwtFloatInverseFromSymbols<TOut, THigh, 4>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream); break;
default: assert(false);
}
}
template<typename T>
static void dwtFloatForwardLowpassOnly(
float* dpDest, float* dpBuffer, const T* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
hipStream_t stream)
{
if(dstRowPitch <= 0) dstRowPitch = sizeX / 2;
if(srcRowPitch <= 0) srcRowPitch = sizeX * srcChannelCount;
switch(srcChannelCount) {
case 1: dwtFloatForwardLowpassOnly2D<T, 1>(dpDest, dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch, stream); break;
case 2: dwtFloatForwardLowpassOnly2D<T, 2>(dpDest, dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch, stream); break;
case 3: dwtFloatForwardLowpassOnly2D<T, 3>(dpDest, dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch, stream); break;
case 4: dwtFloatForwardLowpassOnly2D<T, 4>(dpDest, dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch, stream); break;
default: assert(false);
}
}
void dwtFloat2DForward(
float* dpDest, float* dpBuffer, const float* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
hipStream_t stream)
{
dwtFloatForward<float>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, srcChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DForward(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int srcChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
dwtFloatForward<float>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, srcChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverse(
float* dpDest, float* dpBuffer, const float* dpSource,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int srcRowPitch,
hipStream_t stream)
{
dwtFloatInverse<float>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DInverse(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
dwtFloatInverse<float>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverseFromSymbols(
float* dpDest, float* dpBuffer,
const float* dpLowpass, const ushort*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int lowpassRowPitch,
hipStream_t stream)
{
dwtFloatInverseFromSymbols<float, ushort>(dpDest, nullptr, dpBuffer, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, lowpassRowPitch, 0, stream);
}
void dwtFloat3DInverseFromSymbols(
float* dpDest, float* dpBuffer2, float* dpBuffer1,
const float* dpLowpass, const ushort*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
hipStream_t stream)
{
dwtFloatInverseFromSymbols<float, ushort>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream);
}
void dwtFloat2DInverseFromSymbols(
float* dpDest, float* dpBuffer,
const float* dpLowpass, const uint*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int lowpassRowPitch,
hipStream_t stream)
{
dwtFloatInverseFromSymbols<float, uint>(dpDest, nullptr, dpBuffer, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, lowpassRowPitch, 0, stream);
}
void dwtFloat3DInverseFromSymbols(
float* dpDest, float* dpBuffer2, float* dpBuffer1,
const float* dpLowpass, const uint*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
hipStream_t stream)
{
dwtFloatInverseFromSymbols<float, uint>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream);
}
void dwtFloat2DForwardFromByte(
float* dpDest, float* dpBuffer, const byte* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
hipStream_t stream)
{
dwtFloatForward<byte>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, srcChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DForwardFromByte(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const byte* dpSource,
int sizeX, int sizeY, int sizeZ, int srcChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
dwtFloatForward<byte>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, srcChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverseToByte(
byte* dpDest, float* dpBuffer, const float* dpSource,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int srcRowPitch,
hipStream_t stream)
{
dwtFloatInverse<byte>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DInverseToByte(
byte* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
dwtFloatInverse<byte>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DForwardFromUshort(
float* dpDest, float* dpBuffer, const ushort* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
hipStream_t stream)
{
dwtFloatForward<ushort>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, srcChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DForwardFromUshort(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const ushort* dpSource,
int sizeX, int sizeY, int sizeZ, int srcChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
dwtFloatForward<ushort>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, srcChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverseToUshort(
ushort* dpDest, float* dpBuffer, const float* dpSource,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int srcRowPitch,
hipStream_t stream)
{
dwtFloatInverse<ushort>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DInverseToUshort(
ushort* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
hipStream_t stream)
{
dwtFloatInverse<ushort>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverseFromSymbolsToByte(
byte* dpDest, float* dpBuffer,
const float* dpLowpass, const ushort*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int lowpassRowPitch,
hipStream_t stream)
{
dwtFloatInverseFromSymbols<byte>(dpDest, nullptr, dpBuffer, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, lowpassRowPitch, 0, stream);
}
void dwtFloat3DInverseFromSymbolsToByte(
byte* dpDest, float* dpBuffer2, float* dpBuffer1,
const float* dpLowpass, const ushort*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
hipStream_t stream)
{
dwtFloatInverseFromSymbols<byte>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream);
}
void dwtFloat2DForwardLowpassOnlyFromByte(
float* dpDest, float* dpBuffer, const byte* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
hipStream_t stream)
{
dwtFloatForwardLowpassOnly<byte>(dpDest, dpBuffer, dpSource, sizeX, sizeY, srcChannelCount, dstRowPitch, srcRowPitch, stream);
}
}
}
|
9bee43ad0e54a5328da653fe408dabc990a703be.cu
|
#include <cudaCompress/util/DWT.h>
#include <assert.h>
#include <cudaCompress/cudaUtil.h>
#include "DWTFloatKernels.cui"
#include "DWTFloatFromSymbolsKernels.cui"
#include "DWTFloat2DLowpassKernels.cui"
namespace cudaCompress {
namespace util {
template<typename TIn, int channelCountIn>
static void dwtFloatForwardLowpassOnly2D(
float* dpDest, float* dpBuffer, const TIn* dpSource,
int sizeX, int sizeY,
int dstRowPitch, int srcRowPitch,
cudaStream_t stream)
{
const int xBlockSizeX = 32;
const int xBlockSizeY = 4;
const dim3 xBlockSize(xBlockSizeX, xBlockSizeY);
const int xResultBlockCount = 8;
const int yBlockSizeX = 32;
const int yBlockSizeY = 4;
const dim3 yBlockSize(yBlockSizeX, yBlockSizeY);
const int yResultBlockCount = 8;
dim3 xBlockCount(sizeX / (xResultBlockCount * xBlockSizeX), (sizeY + xBlockSizeY - 1) / xBlockSizeY);
if(xBlockCount.x > 0) {
forwardDWT9XLowpassKernel2D
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY, xResultBlockCount>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch);
cudaCheckMsg("forwardDWT9XLowpassKernel2D execution failed");
}
int sizeXdone = xBlockCount.x * xResultBlockCount * xBlockSizeX;
int sizeXrest = sizeX - sizeXdone;
if(sizeXrest > 0) {
dim3 xBlockCountRest(1, xBlockCount.y);
int xResultBlockCountRest = (sizeXrest + xBlockSizeX - 1) / xBlockSizeX;
uint sharedSize = (xBlockSizeY * (xResultBlockCountRest * xBlockSizeX + (FILTER_LENGTH-1))) * sizeof(float);
forwardDWT9XLowpassRestKernel2D
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY>
<<<xBlockCountRest, xBlockSize, sharedSize, stream>>>
(dpBuffer, dpSource, sizeXdone, sizeX, sizeY, xResultBlockCountRest, dstRowPitch, srcRowPitch);
cudaCheckMsg("forwardDWT9XLowpassRestKernel2D execution failed");
}
dim3 yBlockCount((sizeX/2 + yBlockSizeX - 1) / yBlockSizeX, sizeY / (yResultBlockCount * yBlockSizeY));
if(yBlockCount.y > 0) {
forwardDWT9YLowpassKernel2D
<yBlockSizeX, yBlockSizeY, yResultBlockCount>
<<<yBlockCount, yBlockSize, 0, stream>>>
(dpDest, dpBuffer, sizeX/2, sizeY, dstRowPitch);
cudaCheckMsg("forwardDWT9YLowpassKernel2D execution failed");
}
int sizeYdone = yBlockCount.y * yResultBlockCount * yBlockSizeY;
int sizeYrest = sizeY - sizeYdone;
if(sizeYrest > 0) {
dim3 yBlockCountRest(yBlockCount.x, 1);
int yResultBlockCountRest = (sizeYrest + yBlockSizeY - 1) / yBlockSizeY;
uint sharedSize = (yBlockSizeX * (yResultBlockCountRest * yBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
forwardDWT9YLowpassRestKernel2D
<yBlockSizeX, yBlockSizeY>
<<<yBlockCountRest, yBlockSize, sharedSize, stream>>>
(dpDest, dpBuffer, sizeYdone, sizeX/2, sizeY, yResultBlockCountRest, dstRowPitch);
cudaCheckMsg("forwardDWT9YLowpassRestKernel2D execution failed");
}
}
template<typename TIn, int channelCountIn>
static void dwtFloatForward(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const TIn* dpSource,
int sizeX, int sizeY, int sizeZ,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
const int xBlockSizeX = 32;
const int xBlockSizeY = 4;
const dim3 xBlockSize(xBlockSizeX, xBlockSizeY);
const int xResultBlockCount = 8;
const int xResultBlockCount2 = 4;
const int xResultBlockCount3 = 2;
const int yBlockSizeX = 32;
const int yBlockSizeY = 4;
const dim3 yBlockSize(yBlockSizeX, yBlockSizeY);
const int yResultBlockCount = 8;
const int zBlockSizeX = 32;
const int zBlockSizeY = 4;
const dim3 zBlockSize(zBlockSizeX, zBlockSizeY);
const int zResultBlockCount = 8;
bool do3D = (sizeZ > 1);
dim3 xBlockCount(sizeX / (xResultBlockCount * xBlockSizeX), (sizeY + xBlockSizeY - 1) / xBlockSizeY, sizeZ);
if(xBlockCount.x > 0) {
forwardDWT9XKernel
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY, xResultBlockCount>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpBuffer1, dpSource, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("forwardDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount2 * xBlockSizeX)) {
// special case for sizeX == 128
xBlockCount.x = 1;
forwardDWT9XKernel
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY, xResultBlockCount2>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpBuffer1, dpSource, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("forwardDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount3 * xBlockSizeX)) {
// special case for sizeX == 64
xBlockCount.x = 1;
forwardDWT9XKernel
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY, xResultBlockCount3>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpBuffer1, dpSource, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("forwardDWT9XKernel execution failed");
}
int sizeXdone = xBlockCount.x * xResultBlockCount * xBlockSizeX;
int sizeXrest = sizeX - sizeXdone;
if(sizeXrest > 0) {
dim3 xBlockCountRest(1, xBlockCount.y, xBlockCount.z);
int xResultBlockCountRest = (sizeXrest + xBlockSizeX - 1) / xBlockSizeX;
uint sharedSize = (xBlockSizeY * (xResultBlockCountRest * xBlockSizeX + (FILTER_LENGTH-1))) * sizeof(float);
forwardDWT9XRestKernel
<TIn, channelCountIn, xBlockSizeX, xBlockSizeY>
<<<xBlockCountRest, xBlockSize, sharedSize, stream>>>
(dpBuffer1, dpSource, sizeXdone, sizeX, sizeY, xResultBlockCountRest, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("forwardDWT9XRestKernel execution failed");
}
float* dpDestY = (do3D ? dpBuffer2 : dpDest);
dim3 yBlockCount((sizeX + yBlockSizeX - 1) / yBlockSizeX, sizeY / (yResultBlockCount * yBlockSizeY), sizeZ);
if(yBlockCount.y > 0) {
forwardDWT9YKernel
<yBlockSizeX, yBlockSizeY, yResultBlockCount>
<<<yBlockCount, yBlockSize, 0, stream>>>
(dpDestY, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch);
cudaCheckMsg("forwardDWT9YKernel execution failed");
}
int sizeYdone = yBlockCount.y * yResultBlockCount * yBlockSizeY;
int sizeYrest = sizeY - sizeYdone;
if(sizeYrest > 0) {
dim3 yBlockCountRest(yBlockCount.x, 1, yBlockCount.z);
int yResultBlockCountRest = (sizeYrest + yBlockSizeY - 1) / yBlockSizeY;
uint sharedSize = (yBlockSizeX * (yResultBlockCountRest * yBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
forwardDWT9YRestKernel
<yBlockSizeX, yBlockSizeY>
<<<yBlockCountRest, yBlockSize, sharedSize, stream>>>
(dpDestY, dpBuffer1, sizeYdone, sizeX, sizeY, yResultBlockCountRest, dstRowPitch, dstSlicePitch);
cudaCheckMsg("forwardDWT9YRestKernel execution failed");
}
if(do3D) {
dim3 zBlockCount((sizeX + zBlockSizeX - 1) / zBlockSizeX, sizeY, sizeZ / (zResultBlockCount * zBlockSizeY));
if(zBlockCount.z > 0) {
forwardDWT9ZKernel
<zBlockSizeX, zBlockSizeY, zResultBlockCount>
<<<zBlockCount, zBlockSize, 0, stream>>>
(dpDest, dpBuffer2, sizeX, sizeZ, dstRowPitch, dstSlicePitch);
cudaCheckMsg("forwardDWT9ZKernel execution failed");
}
int sizeZdone = zBlockCount.z * zResultBlockCount * zBlockSizeY;
int sizeZrest = sizeZ - sizeZdone;
if(sizeZrest > 0) {
dim3 zBlockCountRest(zBlockCount.x, zBlockCount.y, 1);
int zResultBlockCountRest = (sizeZrest + zBlockSizeY - 1) / zBlockSizeY;
uint sharedSize = (zBlockSizeX * (zResultBlockCountRest * zBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
forwardDWT9ZRestKernel
<zBlockSizeX, zBlockSizeY>
<<<zBlockCountRest, zBlockSize, sharedSize, stream>>>
(dpDest, dpBuffer2, sizeZdone, sizeX, sizeZ, zResultBlockCountRest, dstRowPitch, dstSlicePitch);
cudaCheckMsg("forwardDWT9ZRestKernel execution failed");
}
}
}
template<typename TOut, int channelCountOut>
static void dwtFloatInverse(
TOut* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
const int xBlockSizeX = 32;
const int xBlockSizeY = 4;
const dim3 xBlockSize(xBlockSizeX, xBlockSizeY);
const int xResultBlockCount = 8;
const int xResultBlockCount2 = 4;
const int xResultBlockCount3 = 2;
const int yBlockSizeX = 32;
const int yBlockSizeY = 4;
const dim3 yBlockSize(yBlockSizeX, yBlockSizeY);
const int yResultBlockCount = 8;
const int zBlockSizeX = 32;
const int zBlockSizeY = 4;
const dim3 zBlockSize(zBlockSizeX, zBlockSizeY);
const int zResultBlockCount = 8;
bool do3D = (sizeZ > 1);
if(do3D) {
dim3 zBlockCount(((sizeX + zBlockSizeX - 1) / zBlockSizeX), sizeY, sizeZ / (zResultBlockCount * zBlockSizeY));
if(zBlockCount.z > 0) {
inverseDWT9ZKernel
<zBlockSizeX, zBlockSizeY, zResultBlockCount>
<<<zBlockCount, zBlockSize, 0, stream>>>
(dpBuffer2, dpSource, sizeX, sizeZ, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9ZKernel execution failed");
}
int sizeZdone = zBlockCount.z * zResultBlockCount * zBlockSizeY;
int sizeZrest = sizeZ - sizeZdone;
if(sizeZrest > 0) {
dim3 zBlockCountRest(zBlockCount.x, zBlockCount.y, 1);
int zResultBlockCountRest = (sizeZrest + zBlockSizeY - 1) / zBlockSizeY;
uint sharedSize = (zBlockSizeX * (zResultBlockCountRest * zBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
inverseDWT9ZRestKernel
<zBlockSizeX, zBlockSizeY>
<<<zBlockCountRest, zBlockSize, sharedSize, stream>>>
(dpBuffer2, dpSource, sizeZdone, sizeX, sizeZ, zResultBlockCountRest, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9ZRestKernel execution failed");
}
}
const float* dpSourceY = (do3D ? dpBuffer2 : dpSource);
dim3 yBlockCount((sizeX + yBlockSizeX - 1) / yBlockSizeX, sizeY / (yResultBlockCount * yBlockSizeY), sizeZ);
if(yBlockCount.y > 0) {
inverseDWT9YKernel
<yBlockSizeX, yBlockSizeY, yResultBlockCount>
<<<yBlockCount, yBlockSize, 0, stream>>>
(dpBuffer1, dpSourceY, sizeX, sizeY, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9YKernel execution failed");
}
int sizeYdone = yBlockCount.y * yResultBlockCount * yBlockSizeY;
int sizeYrest = sizeY - sizeYdone;
if(sizeYrest > 0) {
dim3 yBlockCountRest(yBlockCount.x, 1, yBlockCount.z);
int yResultBlockCountRest = (sizeYrest + yBlockSizeY - 1) / yBlockSizeY;
uint sharedSize = (yBlockSizeX * (yResultBlockCountRest * yBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
inverseDWT9YRestKernel
<yBlockSizeX, yBlockSizeY>
<<<yBlockCountRest, yBlockSize, sharedSize, stream>>>
(dpBuffer1, dpSourceY, sizeYdone, sizeX, sizeY, yResultBlockCountRest, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9YRestKernel execution failed");
}
dim3 xBlockCount(sizeX / (xResultBlockCount * xBlockSizeX), ((sizeY + xBlockSizeY - 1) / xBlockSizeY), sizeZ);
if(xBlockCount.x > 0) {
inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount2 * xBlockSizeX)) {
// special case for sizeX == 128
xBlockCount.x = 1;
inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount2>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount3 * xBlockSizeX)) {
// special case for sizeX == 64
xBlockCount.x = 1;
inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount3>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
}
int sizeXdone = xBlockCount.x * xResultBlockCount * xBlockSizeX;
int sizeXrest = sizeX - sizeXdone;
if(sizeXrest > 0) {
dim3 xBlockCountRest(1, xBlockCount.y, xBlockCount.z);
int xResultBlockCountRest = (sizeXrest + xBlockSizeX - 1) / xBlockSizeX;
uint sharedSize = (xBlockSizeY * (xResultBlockCountRest * xBlockSizeX + (FILTER_LENGTH-1))) * sizeof(float);
inverseDWT9XRestKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY>
<<<xBlockCountRest, xBlockSize, sharedSize, stream>>>
(dpDest, dpBuffer1, sizeXdone, sizeX, sizeY, xResultBlockCountRest, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch);
cudaCheckMsg("inverseDWT9XRestKernel execution failed");
}
}
template<typename TOut, typename THigh, int channelCountOut>
static void dwtFloatInverseFromSymbols(
TOut* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpLowpass,
const THigh*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
cudaStream_t stream)
{
const int xBlockSizeX = 32;
const int xBlockSizeY = 4;
const dim3 xBlockSize(xBlockSizeX, xBlockSizeY);
const int xResultBlockCount = 8;
const int xResultBlockCount2 = 4;
const int xResultBlockCount3 = 2;
const int yBlockSizeX = 32;
const int yBlockSizeY = 4;
const dim3 yBlockSize(yBlockSizeX, yBlockSizeY);
const int yResultBlockCount = 8;
const int zBlockSizeX = 32;
const int zBlockSizeY = 4;
const dim3 zBlockSize(zBlockSizeX, zBlockSizeY);
const int zResultBlockCount = 8;
bool do3D = (sizeZ > 1);
int bufferRowPitch = sizeX;
int bufferSlicePitch = bufferRowPitch * sizeY;
if(do3D) {
dim3 zBlockCount(((sizeX + zBlockSizeX - 1) / zBlockSizeX), sizeY, sizeZ / (zResultBlockCount * zBlockSizeY));
if(zBlockCount.z > 0) {
inverseDWT9ZFromSymbolsKernel
<THigh, zBlockSizeX, zBlockSizeY, zResultBlockCount>
<<<zBlockCount, zBlockSize, 0, stream>>>
(dpBuffer2, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, bufferRowPitch, bufferSlicePitch, lowpassRowPitch, lowpassSlicePitch);
cudaCheckMsg("inverseDWT9ZFromSymbolsKernel execution failed");
}
int sizeZdone = zBlockCount.z * zResultBlockCount * zBlockSizeY;
int sizeZrest = sizeZ - sizeZdone;
if(sizeZrest > 0) {
dim3 zBlockCountRest(zBlockCount.x, zBlockCount.y, 1);
int zResultBlockCountRest = (sizeZrest + zBlockSizeY - 1) / zBlockSizeY;
uint sharedSize = (zBlockSizeX * (zResultBlockCountRest * zBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
inverseDWT9ZFromSymbolsRestKernel
<THigh, zBlockSizeX, zBlockSizeY>
<<<zBlockCountRest, zBlockSize, sharedSize, stream>>>
(dpBuffer2, dpLowpass, dppHighpass, quantStep, sizeZdone, sizeX, sizeY, sizeZ, zResultBlockCountRest, bufferRowPitch, bufferSlicePitch, lowpassRowPitch, lowpassSlicePitch);
cudaCheckMsg("inverseDWT9ZFromSymbolsRestKernel execution failed");
}
}
dim3 yBlockCount((sizeX + yBlockSizeX - 1) / yBlockSizeX, sizeY / (yResultBlockCount * yBlockSizeY), sizeZ);
if(yBlockCount.y > 0) {
if(do3D) {
inverseDWT9YKernel
<yBlockSizeX, yBlockSizeY, yResultBlockCount>
<<<yBlockCount, yBlockSize, 0, stream>>>
(dpBuffer1, dpBuffer2, sizeX, sizeY, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9YKernel execution failed");
} else {
inverseDWT9YFromSymbolsKernel
<THigh, yBlockSizeX, yBlockSizeY, yResultBlockCount>
<<<yBlockCount, yBlockSize, 0, stream>>>
(dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, bufferRowPitch, lowpassRowPitch);
cudaCheckMsg("inverseDWT9YFromSymbolsKernel execution failed");
}
}
int sizeYdone = yBlockCount.y * yResultBlockCount * yBlockSizeY;
int sizeYrest = sizeY - sizeYdone;
if(sizeYrest > 0) {
dim3 yBlockCountRest(yBlockCount.x, 1, yBlockCount.z);
int yResultBlockCountRest = (sizeYrest + yBlockSizeY - 1) / yBlockSizeY;
uint sharedSize = (yBlockSizeX * (yResultBlockCountRest * yBlockSizeY + (FILTER_LENGTH-1) + 1)) * sizeof(float);
if(do3D) {
inverseDWT9YRestKernel
<yBlockSizeX, yBlockSizeY>
<<<yBlockCountRest, yBlockSize, sharedSize, stream>>>
(dpBuffer1, dpBuffer2, sizeYdone, sizeX, sizeY, yResultBlockCountRest, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9YRestKernel execution failed");
} else {
inverseDWT9YFromSymbolsRestKernel
<THigh, yBlockSizeX, yBlockSizeY>
<<<yBlockCountRest, yBlockSize, sharedSize, stream>>>
(dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeYdone, sizeX, sizeY, yResultBlockCountRest, bufferRowPitch, lowpassRowPitch);
cudaCheckMsg("inverseDWT9YFromSymbolsRestKernel execution failed");
}
}
dim3 xBlockCount(sizeX / (xResultBlockCount * xBlockSizeX), ((sizeY + xBlockSizeY - 1) / xBlockSizeY), sizeZ);
if(xBlockCount.x > 0) {
inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount2 * xBlockSizeX)) {
// special case for sizeX == 128
xBlockCount.x = 1;
inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount2>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
} else if(sizeX == (xResultBlockCount3 * xBlockSizeX)) {
// special case for sizeX == 64
xBlockCount.x = 1;
inverseDWT9XKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY, xResultBlockCount3>
<<<xBlockCount, xBlockSize, 0, stream>>>
(dpDest, dpBuffer1, sizeX, sizeY, dstRowPitch, dstSlicePitch, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9XKernel execution failed");
}
int sizeXdone = xBlockCount.x * xResultBlockCount * xBlockSizeX;
int sizeXrest = sizeX - sizeXdone;
if(sizeXrest > 0) {
dim3 xBlockCountRest(1, xBlockCount.y, xBlockCount.z);
int xResultBlockCountRest = (sizeXrest + xBlockSizeX - 1) / xBlockSizeX;
uint sharedSize = (xBlockSizeY * (xResultBlockCountRest * xBlockSizeX + (FILTER_LENGTH-1))) * sizeof(float);
inverseDWT9XRestKernel
<TOut, channelCountOut, xBlockSizeX, xBlockSizeY>
<<<xBlockCountRest, xBlockSize, sharedSize, stream>>>
(dpDest, dpBuffer1, sizeXdone, sizeX, sizeY, xResultBlockCountRest, dstRowPitch, dstSlicePitch, bufferRowPitch, bufferSlicePitch);
cudaCheckMsg("inverseDWT9XRestKernel execution failed");
}
}
template<typename TIn>
static void dwtFloatForward(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const TIn* dpSource,
int sizeX, int sizeY, int sizeZ, int srcChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
if(dstRowPitch <= 0) dstRowPitch = sizeX;
if(dstSlicePitch <= 0) dstSlicePitch = sizeY * dstRowPitch;
if(srcRowPitch <= 0) srcRowPitch = sizeX * srcChannelCount;
if(srcSlicePitch <= 0) srcSlicePitch = sizeY * srcRowPitch;
switch(srcChannelCount) {
case 1: dwtFloatForward<TIn, 1>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 2: dwtFloatForward<TIn, 2>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 3: dwtFloatForward<TIn, 3>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 4: dwtFloatForward<TIn, 4>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
default: assert(false);
}
}
template<typename TOut>
static void dwtFloatInverse(
TOut* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
if(dstRowPitch <= 0) dstRowPitch = sizeX * dstChannelCount;
if(dstSlicePitch <= 0) dstSlicePitch = sizeY * dstRowPitch;
if(srcRowPitch <= 0) srcRowPitch = sizeX;
if(srcSlicePitch <= 0) srcSlicePitch = sizeY * srcRowPitch;
switch(dstChannelCount) {
case 1: dwtFloatInverse<TOut, 1>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 2: dwtFloatInverse<TOut, 2>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 3: dwtFloatInverse<TOut, 3>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
case 4: dwtFloatInverse<TOut, 4>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream); break;
default: assert(false);
}
}
template<typename TOut, typename THigh>
void dwtFloatInverseFromSymbols(
TOut* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpLowpass, const THigh*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
cudaStream_t stream)
{
if(dstRowPitch <= 0) dstRowPitch = sizeX * dstChannelCount;
if(dstSlicePitch <= 0) dstSlicePitch = sizeY * dstRowPitch;
if(lowpassRowPitch <= 0) lowpassRowPitch = sizeX;
if(lowpassSlicePitch <= 0) lowpassSlicePitch = sizeY * lowpassRowPitch;
switch(dstChannelCount) {
case 1: dwtFloatInverseFromSymbols<TOut, THigh, 1>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream); break;
case 2: dwtFloatInverseFromSymbols<TOut, THigh, 2>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream); break;
case 3: dwtFloatInverseFromSymbols<TOut, THigh, 3>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream); break;
case 4: dwtFloatInverseFromSymbols<TOut, THigh, 4>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream); break;
default: assert(false);
}
}
template<typename T>
static void dwtFloatForwardLowpassOnly(
float* dpDest, float* dpBuffer, const T* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
cudaStream_t stream)
{
if(dstRowPitch <= 0) dstRowPitch = sizeX / 2;
if(srcRowPitch <= 0) srcRowPitch = sizeX * srcChannelCount;
switch(srcChannelCount) {
case 1: dwtFloatForwardLowpassOnly2D<T, 1>(dpDest, dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch, stream); break;
case 2: dwtFloatForwardLowpassOnly2D<T, 2>(dpDest, dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch, stream); break;
case 3: dwtFloatForwardLowpassOnly2D<T, 3>(dpDest, dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch, stream); break;
case 4: dwtFloatForwardLowpassOnly2D<T, 4>(dpDest, dpBuffer, dpSource, sizeX, sizeY, dstRowPitch, srcRowPitch, stream); break;
default: assert(false);
}
}
void dwtFloat2DForward(
float* dpDest, float* dpBuffer, const float* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
cudaStream_t stream)
{
dwtFloatForward<float>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, srcChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DForward(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int srcChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
dwtFloatForward<float>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, srcChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverse(
float* dpDest, float* dpBuffer, const float* dpSource,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int srcRowPitch,
cudaStream_t stream)
{
dwtFloatInverse<float>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DInverse(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
dwtFloatInverse<float>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverseFromSymbols(
float* dpDest, float* dpBuffer,
const float* dpLowpass, const ushort*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int lowpassRowPitch,
cudaStream_t stream)
{
dwtFloatInverseFromSymbols<float, ushort>(dpDest, nullptr, dpBuffer, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, lowpassRowPitch, 0, stream);
}
void dwtFloat3DInverseFromSymbols(
float* dpDest, float* dpBuffer2, float* dpBuffer1,
const float* dpLowpass, const ushort*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
cudaStream_t stream)
{
dwtFloatInverseFromSymbols<float, ushort>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream);
}
void dwtFloat2DInverseFromSymbols(
float* dpDest, float* dpBuffer,
const float* dpLowpass, const uint*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int lowpassRowPitch,
cudaStream_t stream)
{
dwtFloatInverseFromSymbols<float, uint>(dpDest, nullptr, dpBuffer, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, lowpassRowPitch, 0, stream);
}
void dwtFloat3DInverseFromSymbols(
float* dpDest, float* dpBuffer2, float* dpBuffer1,
const float* dpLowpass, const uint*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
cudaStream_t stream)
{
dwtFloatInverseFromSymbols<float, uint>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream);
}
void dwtFloat2DForwardFromByte(
float* dpDest, float* dpBuffer, const byte* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
cudaStream_t stream)
{
dwtFloatForward<byte>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, srcChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DForwardFromByte(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const byte* dpSource,
int sizeX, int sizeY, int sizeZ, int srcChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
dwtFloatForward<byte>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, srcChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverseToByte(
byte* dpDest, float* dpBuffer, const float* dpSource,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int srcRowPitch,
cudaStream_t stream)
{
dwtFloatInverse<byte>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DInverseToByte(
byte* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
dwtFloatInverse<byte>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DForwardFromUshort(
float* dpDest, float* dpBuffer, const ushort* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
cudaStream_t stream)
{
dwtFloatForward<ushort>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, srcChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DForwardFromUshort(
float* dpDest, float* dpBuffer2, float* dpBuffer1, const ushort* dpSource,
int sizeX, int sizeY, int sizeZ, int srcChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
dwtFloatForward<ushort>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, srcChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverseToUshort(
ushort* dpDest, float* dpBuffer, const float* dpSource,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int srcRowPitch,
cudaStream_t stream)
{
dwtFloatInverse<ushort>(dpDest, nullptr, dpBuffer, dpSource, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, srcRowPitch, 0, stream);
}
void dwtFloat3DInverseToUshort(
ushort* dpDest, float* dpBuffer2, float* dpBuffer1, const float* dpSource,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int srcRowPitch, int srcSlicePitch,
cudaStream_t stream)
{
dwtFloatInverse<ushort>(dpDest, dpBuffer2, dpBuffer1, dpSource, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, srcRowPitch, srcSlicePitch, stream);
}
void dwtFloat2DInverseFromSymbolsToByte(
byte* dpDest, float* dpBuffer,
const float* dpLowpass, const ushort*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int dstChannelCount,
int dstRowPitch,
int lowpassRowPitch,
cudaStream_t stream)
{
dwtFloatInverseFromSymbols<byte>(dpDest, nullptr, dpBuffer, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, 1, dstChannelCount, dstRowPitch, 0, lowpassRowPitch, 0, stream);
}
void dwtFloat3DInverseFromSymbolsToByte(
byte* dpDest, float* dpBuffer2, float* dpBuffer1,
const float* dpLowpass, const ushort*const* dppHighpass, float quantStep,
int sizeX, int sizeY, int sizeZ, int dstChannelCount,
int dstRowPitch, int dstSlicePitch,
int lowpassRowPitch, int lowpassSlicePitch,
cudaStream_t stream)
{
dwtFloatInverseFromSymbols<byte>(dpDest, dpBuffer2, dpBuffer1, dpLowpass, dppHighpass, quantStep, sizeX, sizeY, sizeZ, dstChannelCount, dstRowPitch, dstSlicePitch, lowpassRowPitch, lowpassSlicePitch, stream);
}
void dwtFloat2DForwardLowpassOnlyFromByte(
float* dpDest, float* dpBuffer, const byte* dpSource,
int sizeX, int sizeY, int srcChannelCount,
int dstRowPitch,
int srcRowPitch,
cudaStream_t stream)
{
dwtFloatForwardLowpassOnly<byte>(dpDest, dpBuffer, dpSource, sizeX, sizeY, srcChannelCount, dstRowPitch, srcRowPitch, stream);
}
}
}
|
a8ccb301faf2c648c163e3a591993e443d2d18b4.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "find_all_sums_hub_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *hub = NULL;
hipMalloc(&hub, XSIZE*YSIZE);
int nhub = 1;
double *node_weight = NULL;
hipMalloc(&node_weight, XSIZE*YSIZE);
int *neighbor = NULL;
hipMalloc(&neighbor, XSIZE*YSIZE);
int *neighbor_start = NULL;
hipMalloc(&neighbor_start, XSIZE*YSIZE);
double *neighbor_accum_weight_result = NULL;
hipMalloc(&neighbor_accum_weight_result, XSIZE*YSIZE);
double *sum_weight_result = NULL;
hipMalloc(&sum_weight_result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
find_all_sums_hub_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, hub,nhub,node_weight,neighbor,neighbor_start,neighbor_accum_weight_result,sum_weight_result);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
find_all_sums_hub_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, hub,nhub,node_weight,neighbor,neighbor_start,neighbor_accum_weight_result,sum_weight_result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
find_all_sums_hub_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, hub,nhub,node_weight,neighbor,neighbor_start,neighbor_accum_weight_result,sum_weight_result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
a8ccb301faf2c648c163e3a591993e443d2d18b4.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "find_all_sums_hub_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *hub = NULL;
cudaMalloc(&hub, XSIZE*YSIZE);
int nhub = 1;
double *node_weight = NULL;
cudaMalloc(&node_weight, XSIZE*YSIZE);
int *neighbor = NULL;
cudaMalloc(&neighbor, XSIZE*YSIZE);
int *neighbor_start = NULL;
cudaMalloc(&neighbor_start, XSIZE*YSIZE);
double *neighbor_accum_weight_result = NULL;
cudaMalloc(&neighbor_accum_weight_result, XSIZE*YSIZE);
double *sum_weight_result = NULL;
cudaMalloc(&sum_weight_result, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
find_all_sums_hub_kernel<<<gridBlock,threadBlock>>>(hub,nhub,node_weight,neighbor,neighbor_start,neighbor_accum_weight_result,sum_weight_result);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
find_all_sums_hub_kernel<<<gridBlock,threadBlock>>>(hub,nhub,node_weight,neighbor,neighbor_start,neighbor_accum_weight_result,sum_weight_result);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
find_all_sums_hub_kernel<<<gridBlock,threadBlock>>>(hub,nhub,node_weight,neighbor,neighbor_start,neighbor_accum_weight_result,sum_weight_result);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
4052c41067c83085685b52ad81facf6840791844.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@author Mark Gates
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "common_magma.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd.
*/
static __device__
void zlacpy_full_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlacpy_lower_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlacpy_upper_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void zlacpy_full_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_lower_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_upper_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void zlacpy_full_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_lower_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_upper_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
ZLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as ZLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX_16 array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( zlacpy_lower_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( zlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
hipLaunchKernelGGL(( zlacpy_upper_kernel), dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
hipLaunchKernelGGL(( zlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use hipMemcpy or hipMemcpy2D ?
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
hipLaunchKernelGGL(( zlacpy_full_kernel) , dim3(grid), dim3(threads), 0, queue ,
mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
/**
@see magmablas_zlacpy_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb )
{
magmablas_zlacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream );
}
////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
ZLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray COMPLEX_16* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray COMPLEX_16* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if ( uplo == MagmaLower ) {
hipLaunchKernelGGL(( zlacpy_lower_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
hipLaunchKernelGGL(( zlacpy_upper_kernel_batched), dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb );
}
else {
hipLaunchKernelGGL(( zlacpy_full_kernel_batched) , dim3(grid), dim3(threads), 0, queue , m, n, dAarray, ldda, dBarray, lddb );
}
}
|
4052c41067c83085685b52ad81facf6840791844.cu
|
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@author Mark Gates
@author Azzam Haidar
@precisions normal z -> s d c
*/
#include "common_magma.h"
// To deal with really large matrices, this launchs multiple super blocks,
// each with up to 64K-1 x 64K-1 thread blocks, which is up to 4194240 x 4194240 matrix with BLK=64.
// CUDA architecture 2.0 limits each grid dimension to 64K-1.
// Instances arose for vectors used by sparse matrices with M > 4194240, though N is small.
const magma_int_t max_blocks = 65535;
// BLK_X and BLK_Y need to be equal for zlaset_q to deal with diag & offdiag
// when looping over super blocks.
// Formerly, BLK_X and BLK_Y could be different.
#define BLK_X 64
#define BLK_Y BLK_X
/*
Divides matrix into ceil( m/BLK_X ) x ceil( n/BLK_Y ) blocks.
Each block has BLK_X threads.
Each thread loops across one row, updating BLK_Y entries.
Code similar to zlaset, zlacpy, zlag2c, clag2z, zgeadd.
*/
static __device__
void zlacpy_full_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column */
bool full = (iby + BLK_Y <= n);
/* do only rows inside matrix */
if ( ind < m ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// partial block-column
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and below.
Blocks that are fully above the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlacpy_lower_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (below diag) */
bool full = (iby + BLK_Y <= n && (ind >= iby + BLK_Y));
/* do only rows inside matrix, and blocks not above diag */
if ( ind < m && ind + BLK_X > iby ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n && ind >= iby+j; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
/*
Similar to zlacpy_full, but updates only the diagonal and above.
Blocks that are fully below the diagonal exit immediately.
Code similar to zlaset, zlacpy, zlat2c, clat2z.
*/
static __device__
void zlacpy_upper_device(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
int ind = blockIdx.x*BLK_X + threadIdx.x;
int iby = blockIdx.y*BLK_Y;
/* check if full block-column && (above diag) */
bool full = (iby + BLK_Y <= n && (ind + BLK_X <= iby));
/* do only rows inside matrix, and blocks not below diag */
if ( ind < m && ind < iby + BLK_Y ) {
dA += ind + iby*ldda;
dB += ind + iby*lddb;
if ( full ) {
// full block-column, off-diagonal block
#pragma unroll
for( int j=0; j < BLK_Y; ++j ) {
dB[j*lddb] = dA[j*ldda];
}
}
else {
// either partial block-column or diagonal block
for( int j=0; j < BLK_Y && iby+j < n; ++j ) {
if ( ind <= iby+j ) {
dB[j*lddb] = dA[j*ldda];
}
}
}
}
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions.
*/
__global__
void zlacpy_full_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_full_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_lower_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_lower_device(m, n, dA, ldda, dB, lddb);
}
__global__
void zlacpy_upper_kernel(
int m, int n,
const magmaDoubleComplex *dA, int ldda,
magmaDoubleComplex *dB, int lddb )
{
zlacpy_upper_device(m, n, dA, ldda, dB, lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/*
kernel wrappers to call the device functions for the batched routine.
*/
__global__
void zlacpy_full_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_full_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_lower_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_lower_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
__global__
void zlacpy_upper_kernel_batched(
int m, int n,
magmaDoubleComplex const * const *dAarray, int ldda,
magmaDoubleComplex **dBarray, int lddb )
{
int batchid = blockIdx.z;
zlacpy_upper_device(m, n, dAarray[batchid], ldda, dBarray[batchid], lddb);
}
//////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
ZLACPY_Q copies all or part of a two-dimensional matrix dA to another
matrix dB.
This is the same as ZLACPY, but adds queue argument.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of the matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
- = MagmaFull: All of the matrix dA
@param[in]
m INTEGER
The number of rows of the matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of the matrix dA. N >= 0.
@param[in]
dA COMPLEX_16 array, dimension (LDDA,N)
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of the array dA. LDDA >= max(1,M).
@param[out]
dB COMPLEX_16 array, dimension (LDDB,N)
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of the array dB. LDDB >= max(1,M).
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_q(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb,
magma_queue_t queue )
{
#define dA(i_, j_) (dA + (i_) + (j_)*ldda)
#define dB(i_, j_) (dB + (i_) + (j_)*lddb)
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return; //info;
}
if ( m == 0 || n == 0 ) {
return;
}
assert( BLK_X == BLK_Y );
const magma_int_t super_NB = max_blocks*BLK_X;
dim3 super_grid( magma_ceildiv( m, super_NB ), magma_ceildiv( n, super_NB ) );
dim3 threads( BLK_X, 1 );
dim3 grid;
magma_int_t mm, nn;
if ( uplo == MagmaLower ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y && j <= i; ++j ) { // from left to diagonal
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
zlacpy_lower_kernel<<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
zlacpy_full_kernel <<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else if ( uplo == MagmaUpper ) {
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=i; j < super_grid.y; ++j ) { // from diagonal to right
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
if ( i == j ) { // diagonal super block
zlacpy_upper_kernel<<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
else { // off diagonal super block
zlacpy_full_kernel <<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
else {
// TODO: use cudaMemcpy or cudaMemcpy2D ?
for( unsigned int i=0; i < super_grid.x; ++i ) {
mm = (i == super_grid.x-1 ? m % super_NB : super_NB);
grid.x = magma_ceildiv( mm, BLK_X );
for( unsigned int j=0; j < super_grid.y; ++j ) { // full row
nn = (j == super_grid.y-1 ? n % super_NB : super_NB);
grid.y = magma_ceildiv( nn, BLK_Y );
zlacpy_full_kernel <<< grid, threads, 0, queue >>>
( mm, nn, dA(i*super_NB, j*super_NB), ldda, dB(i*super_NB, j*super_NB), lddb );
}
}
}
}
/**
@see magmablas_zlacpy_q
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr dA, magma_int_t ldda,
magmaDoubleComplex_ptr dB, magma_int_t lddb )
{
magmablas_zlacpy_q( uplo, m, n, dA, ldda, dB, lddb, magma_stream );
}
////////////////////////////////////////////////////////////////////////////////////////
/**
Purpose
-------
ZLACPY_BATCHED copies all or part of each two-dimensional matrix
dAarray[i] to matrix dBarray[i], for 0 <= i < batchcount.
Arguments
---------
@param[in]
uplo magma_uplo_t
Specifies the part of each matrix dA to be copied to dB.
- = MagmaUpper: Upper triangular part
- = MagmaLower: Lower triangular part
Otherwise: All of each matrix dA
@param[in]
m INTEGER
The number of rows of each matrix dA. M >= 0.
@param[in]
n INTEGER
The number of columns of each matrix dA. N >= 0.
@param[in]
dAarray COMPLEX_16* array, dimension (batchCount)
Array of pointers to the matrices dA, where each dA is of dimension (LDDA,N).
The M-by-N matrix dA.
If UPLO = MagmaUpper, only the upper triangle or trapezoid is accessed;
if UPLO = MagmaLower, only the lower triangle or trapezoid is accessed.
@param[in]
ldda INTEGER
The leading dimension of each array dA. LDDA >= max(1,M).
@param[out]
dBarray COMPLEX_16* array, dimension (batchCount)
Array of pointers to the matrices dB, where each dB is of dimension (LDDB,N).
The M-by-N matrix dB.
On exit, dB = dA in the locations specified by UPLO.
@param[in]
lddb INTEGER
The leading dimension of each array dB. LDDB >= max(1,M).
@param[in]
batchCount Number of matrices in dAarray and dBarray.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magma_zaux2
********************************************************************/
extern "C" void
magmablas_zlacpy_batched(
magma_uplo_t uplo, magma_int_t m, magma_int_t n,
magmaDoubleComplex_const_ptr const dAarray[], magma_int_t ldda,
magmaDoubleComplex_ptr dBarray[], magma_int_t lddb,
magma_int_t batchCount, magma_queue_t queue )
{
magma_int_t info = 0;
if ( uplo != MagmaLower && uplo != MagmaUpper && uplo != MagmaFull )
info = -1;
else if ( m < 0 )
info = -2;
else if ( n < 0 )
info = -3;
else if ( ldda < max(1,m))
info = -5;
else if ( lddb < max(1,m))
info = -7;
else if ( batchCount < 0 )
info = -8;
if ( info != 0 ) {
magma_xerbla( __func__, -(info) );
return;
}
if ( m == 0 || n == 0 || batchCount == 0 ) {
return;
}
dim3 threads( BLK_X, 1, 1 );
dim3 grid( magma_ceildiv( m, BLK_X ), magma_ceildiv( n, BLK_Y ), batchCount );
if ( uplo == MagmaLower ) {
zlacpy_lower_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb );
}
else if ( uplo == MagmaUpper ) {
zlacpy_upper_kernel_batched<<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb );
}
else {
zlacpy_full_kernel_batched <<< grid, threads, 0, queue >>> ( m, n, dAarray, ldda, dBarray, lddb );
}
}
|
8629814a7f722582359d6ab891d1d53ecbe008d7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by ops.py
//
__constant__ int dims_calupwindeff_kernel [7][1];
static int dims_calupwindeff_kernel_h [7][1] = {0};
//user function
__device__
void calupwindeff_kernel_gpu(const ACC<double>& cmp,
const ACC<double> >,
const ACC<double>& cf,
const ACC<double>& al,
const ACC<double>& ep2,
const ACC<double>& r,
ACC<double>& eff) {
double e1 = (cmp(0,0) * (gt(0,0) + gt(0,1)) - cf(0,0) * al(0,0)) * ep2(0,0);
double e2 = (cmp(1,0) * (gt(1,0) + gt(1,1)) - cf(1,0) * al(1,0)) * ep2(1,0);
double e3 = (cmp(2,0) * (gt(2,0) + gt(2,1)) - cf(2,0) * al(2,0)) * ep2(2,0);
eff(0,0)=e1 * r(0,0) + e2 * r(1,0) + e3 * r(2,0);
eff(1,0)=e1 * r(3,0) + e2 * r(4,0) + e3 * r(5,0);
eff(2,0)=e1 * r(6,0) + e2 * r(7,0) + e3 * r(8,0);
}
__global__ void ops_calupwindeff_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*3;
arg1 += idx_x * 1*3;
arg2 += idx_x * 1*3;
arg3 += idx_x * 1*3;
arg4 += idx_x * 1*3;
arg5 += idx_x * 1*9;
arg6 += idx_x * 1*3;
if (idx_x < size0) {
const ACC<double> argp0(3, dims_calupwindeff_kernel[0][0], arg0);
const ACC<double> argp1(3, dims_calupwindeff_kernel[1][0], arg1);
const ACC<double> argp2(3, dims_calupwindeff_kernel[2][0], arg2);
const ACC<double> argp3(3, dims_calupwindeff_kernel[3][0], arg3);
const ACC<double> argp4(3, dims_calupwindeff_kernel[4][0], arg4);
const ACC<double> argp5(9, dims_calupwindeff_kernel[5][0], arg5);
ACC<double> argp6(3, dims_calupwindeff_kernel[6][0], arg6);
calupwindeff_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_calupwindeff_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_calupwindeff_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,11)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(11,"calupwindeff_kernel");
OPS_kernels[11].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
int xdim6 = args[6].dat->size[0];
if (xdim0 != dims_calupwindeff_kernel_h[0][0] || xdim1 != dims_calupwindeff_kernel_h[1][0] || xdim2 != dims_calupwindeff_kernel_h[2][0] || xdim3 != dims_calupwindeff_kernel_h[3][0] || xdim4 != dims_calupwindeff_kernel_h[4][0] || xdim5 != dims_calupwindeff_kernel_h[5][0] || xdim6 != dims_calupwindeff_kernel_h[6][0]) {
dims_calupwindeff_kernel_h[0][0] = xdim0;
dims_calupwindeff_kernel_h[1][0] = xdim1;
dims_calupwindeff_kernel_h[2][0] = xdim2;
dims_calupwindeff_kernel_h[3][0] = xdim3;
dims_calupwindeff_kernel_h[4][0] = xdim4;
dims_calupwindeff_kernel_h[5][0] = xdim5;
dims_calupwindeff_kernel_h[6][0] = xdim6;
cutilSafeCall(hipMemcpyToSymbol( dims_calupwindeff_kernel, dims_calupwindeff_kernel_h, sizeof(dims_calupwindeff_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[11].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
hipLaunchKernelGGL(( ops_calupwindeff_kernel), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6],x_size);
cutilSafeCall(hipGetLastError());
if (OPS_diags>1) {
cutilSafeCall(hipDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[11].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[11].mpi_time += t2-t1;
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_calupwindeff_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 11;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 11;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->function = ops_par_loop_calupwindeff_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(11,"calupwindeff_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
8629814a7f722582359d6ab891d1d53ecbe008d7.cu
|
//
// auto-generated by ops.py
//
__constant__ int dims_calupwindeff_kernel [7][1];
static int dims_calupwindeff_kernel_h [7][1] = {0};
//user function
__device__
void calupwindeff_kernel_gpu(const ACC<double>& cmp,
const ACC<double> >,
const ACC<double>& cf,
const ACC<double>& al,
const ACC<double>& ep2,
const ACC<double>& r,
ACC<double>& eff) {
double e1 = (cmp(0,0) * (gt(0,0) + gt(0,1)) - cf(0,0) * al(0,0)) * ep2(0,0);
double e2 = (cmp(1,0) * (gt(1,0) + gt(1,1)) - cf(1,0) * al(1,0)) * ep2(1,0);
double e3 = (cmp(2,0) * (gt(2,0) + gt(2,1)) - cf(2,0) * al(2,0)) * ep2(2,0);
eff(0,0)=e1 * r(0,0) + e2 * r(1,0) + e3 * r(2,0);
eff(1,0)=e1 * r(3,0) + e2 * r(4,0) + e3 * r(5,0);
eff(2,0)=e1 * r(6,0) + e2 * r(7,0) + e3 * r(8,0);
}
__global__ void ops_calupwindeff_kernel(
double* __restrict arg0,
double* __restrict arg1,
double* __restrict arg2,
double* __restrict arg3,
double* __restrict arg4,
double* __restrict arg5,
double* __restrict arg6,
int size0 ){
int idx_x = blockDim.x * blockIdx.x + threadIdx.x;
arg0 += idx_x * 1*3;
arg1 += idx_x * 1*3;
arg2 += idx_x * 1*3;
arg3 += idx_x * 1*3;
arg4 += idx_x * 1*3;
arg5 += idx_x * 1*9;
arg6 += idx_x * 1*3;
if (idx_x < size0) {
const ACC<double> argp0(3, dims_calupwindeff_kernel[0][0], arg0);
const ACC<double> argp1(3, dims_calupwindeff_kernel[1][0], arg1);
const ACC<double> argp2(3, dims_calupwindeff_kernel[2][0], arg2);
const ACC<double> argp3(3, dims_calupwindeff_kernel[3][0], arg3);
const ACC<double> argp4(3, dims_calupwindeff_kernel[4][0], arg4);
const ACC<double> argp5(9, dims_calupwindeff_kernel[5][0], arg5);
ACC<double> argp6(3, dims_calupwindeff_kernel[6][0], arg6);
calupwindeff_kernel_gpu(argp0, argp1, argp2, argp3,
argp4, argp5, argp6);
}
}
// host stub function
#ifndef OPS_LAZY
void ops_par_loop_calupwindeff_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3,
ops_arg arg4, ops_arg arg5, ops_arg arg6) {
#else
void ops_par_loop_calupwindeff_kernel_execute(ops_kernel_descriptor *desc) {
int dim = desc->dim;
#if OPS_MPI
ops_block block = desc->block;
#endif
int *range = desc->range;
ops_arg arg0 = desc->args[0];
ops_arg arg1 = desc->args[1];
ops_arg arg2 = desc->args[2];
ops_arg arg3 = desc->args[3];
ops_arg arg4 = desc->args[4];
ops_arg arg5 = desc->args[5];
ops_arg arg6 = desc->args[6];
#endif
//Timing
double t1,t2,c1,c2;
ops_arg args[7] = { arg0, arg1, arg2, arg3, arg4, arg5, arg6};
#if CHECKPOINTING && !OPS_LAZY
if (!ops_checkpointing_before(args,7,range,11)) return;
#endif
if (OPS_diags > 1) {
ops_timing_realloc(11,"calupwindeff_kernel");
OPS_kernels[11].count++;
ops_timers_core(&c1,&t1);
}
//compute locally allocated range for the sub-block
int start[1];
int end[1];
#if OPS_MPI && !OPS_LAZY
sub_block_list sb = OPS_sub_block_list[block->index];
#endif //OPS_MPI
#ifdef OPS_MPI
int arg_idx[1];
#endif
#ifdef OPS_MPI
if (compute_ranges(args, 7,block, range, start, end, arg_idx) < 0) return;
#else //OPS_MPI
for ( int n=0; n<1; n++ ){
start[n] = range[2*n];end[n] = range[2*n+1];
}
#endif
int xdim0 = args[0].dat->size[0];
int xdim1 = args[1].dat->size[0];
int xdim2 = args[2].dat->size[0];
int xdim3 = args[3].dat->size[0];
int xdim4 = args[4].dat->size[0];
int xdim5 = args[5].dat->size[0];
int xdim6 = args[6].dat->size[0];
if (xdim0 != dims_calupwindeff_kernel_h[0][0] || xdim1 != dims_calupwindeff_kernel_h[1][0] || xdim2 != dims_calupwindeff_kernel_h[2][0] || xdim3 != dims_calupwindeff_kernel_h[3][0] || xdim4 != dims_calupwindeff_kernel_h[4][0] || xdim5 != dims_calupwindeff_kernel_h[5][0] || xdim6 != dims_calupwindeff_kernel_h[6][0]) {
dims_calupwindeff_kernel_h[0][0] = xdim0;
dims_calupwindeff_kernel_h[1][0] = xdim1;
dims_calupwindeff_kernel_h[2][0] = xdim2;
dims_calupwindeff_kernel_h[3][0] = xdim3;
dims_calupwindeff_kernel_h[4][0] = xdim4;
dims_calupwindeff_kernel_h[5][0] = xdim5;
dims_calupwindeff_kernel_h[6][0] = xdim6;
cutilSafeCall(cudaMemcpyToSymbol( dims_calupwindeff_kernel, dims_calupwindeff_kernel_h, sizeof(dims_calupwindeff_kernel)));
}
int x_size = MAX(0,end[0]-start[0]);
dim3 grid( (x_size-1)/OPS_block_size_x+ 1, 1, 1);
dim3 tblock(OPS_block_size_x,1,1);
int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size);
int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size);
int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size);
int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size);
int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size);
int dat5 = (OPS_soa ? args[5].dat->type_size : args[5].dat->elem_size);
int dat6 = (OPS_soa ? args[6].dat->type_size : args[6].dat->elem_size);
char *p_a[7];
//set up initial pointers
int base0 = args[0].dat->base_offset +
dat0 * 1 * (start[0] * args[0].stencil->stride[0]);
p_a[0] = (char *)args[0].data_d + base0;
int base1 = args[1].dat->base_offset +
dat1 * 1 * (start[0] * args[1].stencil->stride[0]);
p_a[1] = (char *)args[1].data_d + base1;
int base2 = args[2].dat->base_offset +
dat2 * 1 * (start[0] * args[2].stencil->stride[0]);
p_a[2] = (char *)args[2].data_d + base2;
int base3 = args[3].dat->base_offset +
dat3 * 1 * (start[0] * args[3].stencil->stride[0]);
p_a[3] = (char *)args[3].data_d + base3;
int base4 = args[4].dat->base_offset +
dat4 * 1 * (start[0] * args[4].stencil->stride[0]);
p_a[4] = (char *)args[4].data_d + base4;
int base5 = args[5].dat->base_offset +
dat5 * 1 * (start[0] * args[5].stencil->stride[0]);
p_a[5] = (char *)args[5].data_d + base5;
int base6 = args[6].dat->base_offset +
dat6 * 1 * (start[0] * args[6].stencil->stride[0]);
p_a[6] = (char *)args[6].data_d + base6;
#ifndef OPS_LAZY
ops_H_D_exchanges_device(args, 7);
ops_halo_exchanges(args,7,range);
#endif
if (OPS_diags > 1) {
ops_timers_core(&c2,&t2);
OPS_kernels[11].mpi_time += t2-t1;
}
//call kernel wrapper function, passing in pointers to data
if (x_size > 0)
ops_calupwindeff_kernel<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1],
(double *)p_a[2], (double *)p_a[3],
(double *)p_a[4], (double *)p_a[5],
(double *)p_a[6],x_size);
cutilSafeCall(cudaGetLastError());
if (OPS_diags>1) {
cutilSafeCall(cudaDeviceSynchronize());
ops_timers_core(&c1,&t1);
OPS_kernels[11].time += t1-t2;
}
#ifndef OPS_LAZY
ops_set_dirtybit_device(args, 7);
ops_set_halo_dirtybit3(&args[6],range);
#endif
if (OPS_diags > 1) {
//Update kernel record
ops_timers_core(&c2,&t2);
OPS_kernels[11].mpi_time += t2-t1;
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg0);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg1);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg2);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg3);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg4);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg5);
OPS_kernels[11].transfer += ops_compute_transfer(dim, start, end, &arg6);
}
}
#ifdef OPS_LAZY
void ops_par_loop_calupwindeff_kernel(char const *name, ops_block block, int dim, int* range,
ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4, ops_arg arg5, ops_arg arg6) {
ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor));
desc->name = name;
desc->block = block;
desc->dim = dim;
desc->device = 1;
desc->index = 11;
desc->hash = 5381;
desc->hash = ((desc->hash << 5) + desc->hash) + 11;
for ( int i=0; i<2; i++ ){
desc->range[i] = range[i];
desc->orig_range[i] = range[i];
desc->hash = ((desc->hash << 5) + desc->hash) + range[i];
}
desc->nargs = 7;
desc->args = (ops_arg*)malloc(7*sizeof(ops_arg));
desc->args[0] = arg0;
desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index;
desc->args[1] = arg1;
desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index;
desc->args[2] = arg2;
desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index;
desc->args[3] = arg3;
desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index;
desc->args[4] = arg4;
desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index;
desc->args[5] = arg5;
desc->hash = ((desc->hash << 5) + desc->hash) + arg5.dat->index;
desc->args[6] = arg6;
desc->hash = ((desc->hash << 5) + desc->hash) + arg6.dat->index;
desc->function = ops_par_loop_calupwindeff_kernel_execute;
if (OPS_diags > 1) {
ops_timing_realloc(11,"calupwindeff_kernel");
}
ops_enqueue_kernel(desc);
}
#endif
|
d0f581c2c8274c8353d4eb5586a2e1918e5c23e9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <hip/hip_fp16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union FP16
{
unsigned short int i;
__half f;
};
__global__ void test(float* dst, __half* a, __half* b, float* c){
asm volatile(
"ld.param.u64 %rd1, [_Z4testPfP6__halfS1_S__param_0];\n\t"
".reg .b32 a<8>, b<8>, c<8>,d<8>;\n\t"
"wmma.load.a.sync.aligned.m16n16k16.global.row.f16 {a0, a1, a2, a3, a4, a5, a6, a7}, [%1];\n\t"
"wmma.load.b.sync.aligned.m16n16k16.global.col.f16 {b0, b1, b2, b3, b4, b5, b6, b7}, [%2];\n\t"
"wmma.load.c.sync.aligned.m16n16k16.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
"wmma.mma.sync.aligned.m16n16k16.row.col.f32.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3, a4, a5, a6, a7}, {b0, b1, b2, b3, b4, b5, b6, b7}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
"wmma.store.d.sync.aligned.m16n16k16.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(dst): "l"(a), "l"(b), "l"(c));
}
void InitOne(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
__half* host_a=(__half*)malloc(sizeof(__half) * size);
__half* host_b=(__half*)malloc(sizeof(__half) * size);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__half* device_a=NULL;
__half* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
hipMalloc((void**)(&device_a), sizeof(__half) * size);
hipMalloc((void**)(&device_b), sizeof(__half) * size);
hipMalloc((void**)(&device_c), sizeof(float) * size);
hipMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size);
InitOne(host_b, size);
InitZero_float(host_c, size);
InitZero_float(host_d, size);
FP16 fp16;
fp16.i = 0x3c00; host_a[0]=fp16.f;
fp16.i = 0x3c00; host_a[1]=fp16.f;
fp16.i = 0x3c00; host_a[2]=fp16.f;
fp16.i = 0x3c00; host_a[3]=fp16.f;
fp16.i = 0x3c00; host_a[4]=fp16.f;
fp16.i = 0x3c00; host_a[5]=fp16.f;
fp16.i = 0x3c00; host_a[6]=fp16.f;
fp16.i = 0x3c00; host_a[7]=fp16.f;
FP32 fp32;
fp32.i = 0x4c000000; host_c[0]=fp32.f;
hipMemcpy((void*)device_a, (void*)host_a, sizeof(__half)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_b, (void*)host_b, sizeof(__half)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, hipMemcpyHostToDevice);
hipMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test), dim3(1),dim3(32), 0, 0, device_d, device_a, device_b, device_c);
hipDeviceSynchronize();
hipMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
//FP32 fp32;
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
|
d0f581c2c8274c8353d4eb5586a2e1918e5c23e9.cu
|
#include <iostream>
#include <cuda.h>
#include <cuda_fp16.h>
#include <stdio.h>
using namespace std;
#include <sys/time.h>
#include <unistd.h>
#include <stdlib.h>
#include <cstdlib>
union FP32
{
unsigned int i;
float f;
};
union FP16
{
unsigned short int i;
__half f;
};
__global__ void test(float* dst, __half* a, __half* b, float* c){
asm volatile(
"ld.param.u64 %rd1, [_Z4testPfP6__halfS1_S__param_0];\n\t"
".reg .b32 a<8>, b<8>, c<8>,d<8>;\n\t"
"wmma.load.a.sync.aligned.m16n16k16.global.row.f16 {a0, a1, a2, a3, a4, a5, a6, a7}, [%1];\n\t"
"wmma.load.b.sync.aligned.m16n16k16.global.col.f16 {b0, b1, b2, b3, b4, b5, b6, b7}, [%2];\n\t"
"wmma.load.c.sync.aligned.m16n16k16.global.row.f32 {c0, c1, c2, c3, c4, c5, c6, c7}, [%3];\n\t"
"wmma.mma.sync.aligned.m16n16k16.row.col.f32.f32 {d0,d1,d2,d3,d4,d5,d6,d7}, {a0, a1, a2, a3, a4, a5, a6, a7}, {b0, b1, b2, b3, b4, b5, b6, b7}, {c0, c1, c2, c3, c4, c5, c6, c7};\n\t"
"wmma.store.d.sync.aligned.m16n16k16.global.row.f32 [%0], {d0,d1,d2,d3,d4,d5,d6,d7};" : "=l"(dst): "l"(a), "l"(b), "l"(c));
}
void InitOne(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 1.0;
}
}
void InitZero(__half* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void InitZero_float(float* a, const int n) {
for ( int i = 0; i < n; i++ ) {
a[i] = 0.0;
}
}
void show(float * a, const int n) {
std::cout << std::endl;
for ( int i=0; i<n; i++){
std::cout<<a[i] << std::endl;
}
std::cout << std::endl;
}
int main(int argc, char** argv){
int size = 256;
__half* host_a=(__half*)malloc(sizeof(__half) * size);
__half* host_b=(__half*)malloc(sizeof(__half) * size);
float* host_c=(float*)malloc(sizeof(float) * size);
float* host_d=(float*)malloc(sizeof(float) * size);
__half* device_a=NULL;
__half* device_b=NULL;
float* device_c=NULL;
float* device_d=NULL;
cudaMalloc((void**)(&device_a), sizeof(__half) * size);
cudaMalloc((void**)(&device_b), sizeof(__half) * size);
cudaMalloc((void**)(&device_c), sizeof(float) * size);
cudaMalloc((void**)(&device_d), sizeof(float) * size);
InitZero(host_a, size);
InitOne(host_b, size);
InitZero_float(host_c, size);
InitZero_float(host_d, size);
FP16 fp16;
fp16.i = 0x3c00; host_a[0]=fp16.f;
fp16.i = 0x3c00; host_a[1]=fp16.f;
fp16.i = 0x3c00; host_a[2]=fp16.f;
fp16.i = 0x3c00; host_a[3]=fp16.f;
fp16.i = 0x3c00; host_a[4]=fp16.f;
fp16.i = 0x3c00; host_a[5]=fp16.f;
fp16.i = 0x3c00; host_a[6]=fp16.f;
fp16.i = 0x3c00; host_a[7]=fp16.f;
FP32 fp32;
fp32.i = 0x4c000000; host_c[0]=fp32.f;
cudaMemcpy((void*)device_a, (void*)host_a, sizeof(__half)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_b, (void*)host_b, sizeof(__half)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_c, (void*)host_c, sizeof(float)* size, cudaMemcpyHostToDevice);
cudaMemcpy((void*)device_d, (void*)host_d, sizeof(float)* size, cudaMemcpyHostToDevice);
test<<<1,32>>>(device_d, device_a, device_b, device_c);
cudaDeviceSynchronize();
cudaMemcpy((void*)host_d, (void*)device_d, sizeof(float) * size, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
//FP32 fp32;
fp32.f=host_d[0];
//std::cout<< host_d[0] << std::endl;
std::cout<< hex << fp32.i << std::endl;
//show(host_d, size);
}
|
3632d5774efb42e8199dccc73b9c1194da1c34ed.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <limits.h>
#include <sys/time.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#define THREADS 512
#define MAXCITIES 1296
extern "C" int tsp(int, int,int, int, int, float *, float *);
__global__ void TspKernel(int kCities, int kSamples, float *kPosx, float *kPosy, int *dlength)
{
__shared__ int local_length;
register int iter, i, j, len, from, to;
register float dx, dy;
register unsigned short tmp;
unsigned short tour[MAXCITIES+1];
hiprandState_t rndstate;
iter = threadIdx.x + blockIdx.x * blockDim.x;
tour[kCities] = 0;
local_length = INT_MAX;
if(iter==0)
{
*dlength = INT_MAX;
}
__syncthreads();
/* iterate number of sample times */
if (iter < kSamples) {
/* generate a random tour */
hiprand_init(iter, 0, 0, &rndstate);
for (i = 1; i < kCities; i++) tour[i] = i;
for (i = 1; i < kCities; i++) {
j = hiprand(&rndstate) % (kCities - 1) + 1;
tmp = tour[i];
tour[i] = tour[j];
tour[j] = tmp;
}
/* compute tour length */
len = 0;
from = 0;
for (i = 1; i <= kCities; i++) {
to = tour[i];
dx = kPosx[to] - kPosx[from];
dy = kPosy[to] - kPosy[from];
len += (int)(sqrtf(dx * dx + dy * dy) + 0.5f);
from = to;
}
/* check if new shortest tour */
atomicMin(&local_length, len);
}
__syncthreads();
if (threadIdx.x == 0) {
atomicMin(dlength, local_length);
}
}
static int read_input(char *filename, float *posx, float *posy)
{
register int cnt;
int i1, cities;
float i2, i3;
register FILE *f;
/* open input text file */
f = fopen(filename, "r+t");
if (f == NULL) {fprintf(stderr, "could not open file %s\n", filename); exit(-1);}
/* read the number of cities from first line */
cities = -1;
fscanf(f, "%d\n", &cities);
if ((cities < 1) || (cities >= MAXCITIES)) {fprintf(stderr, "cities out of range\n"); exit(-1);}
/* read in the cities' coordinates */
cnt = 0;
while (fscanf(f, "%d %f %f\n", &i1, &i2, &i3)) {
posx[cnt] = i2;
posy[cnt] = i3;
cnt++;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != i1) {fprintf(stderr, "input line mismatch\n"); exit(-1);}
}
if (cnt != cities) {fprintf(stderr, "wrong number of cities read\n"); exit(-1);}
/* return the number of cities */
fclose(f);
return cities;
}
int main(int argc, char *argv[])
{
register int blocks, samples, c_samples, o_samples, cities;
float posx[MAXCITIES], posy[MAXCITIES], *dposx, *dposy;
struct timeval start, end;
int *dlength, length, o_length, final_length, thread_count;
printf("TSP v1.0(CUDA)\n");
/* check command line */
if (argc != 4) {fprintf(stderr, "usage: %s input_file_name number_of_samples\n", argv[0]); exit(-1);}
cities = read_input(argv[1], posx, posy);
samples = atoi(argv[2]);
if (samples < 1) {fprintf(stderr, "number of samples must be at least 1\n"); exit(-1);}
printf("%d cities and %d samples (%s)\n", cities, samples, argv[1]);
o_length = INT_MAX;
thread_count = strtol(argv[3],NULL,10);
c_samples = (int)ceil(samples/2);
o_samples = (int)floor(samples/2);
blocks = (c_samples + THREADS - 1) / THREADS;
if (hipSuccess != hipMalloc((void **)&dlength, sizeof(int))) fprintf(stderr, "could not allocate array\n");
if (hipSuccess != hipMalloc((void **)&dposx, (cities*sizeof(float)))) fprintf(stderr, "could not allocate array\n");
if (hipSuccess != hipMalloc((void **)&dposy, (cities*sizeof(float)))) fprintf(stderr, "could not allocate array\n");
/* start time */
gettimeofday(&start, NULL);
if (hipSuccess != hipMemcpy(dposx, posx, (cities*sizeof(float)), hipMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n");
if (hipSuccess != hipMemcpy(dposy, posy, (cities*sizeof(float)), hipMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n");
hipLaunchKernelGGL(( TspKernel), dim3(blocks), dim3(THREADS), 0, 0, cities, c_samples, dposx, dposy, dlength);
o_length = tsp(thread_count, samples, o_samples, cities, o_length, posx, posy);
if (hipSuccess != hipMemcpy(&length, dlength, sizeof(1), hipMemcpyDeviceToHost)) fprintf(stderr, "copying of dlength from device failed\n");
/* end time */
gettimeofday(&end, NULL);
printf("runtime: %.4lf s\n", end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0);
/* output result */
if(length < o_length) final_length = length;
else final_length = o_length;
printf("length of shortest found tour: %d\n\n", final_length);
/* freeing memory */
hipFree(dlength);
hipFree(dposx);
hipFree(dposy);
return 0;
}
|
3632d5774efb42e8199dccc73b9c1194da1c34ed.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <limits.h>
#include <sys/time.h>
#include <cuda.h>
#include <curand_kernel.h>
#define THREADS 512
#define MAXCITIES 1296
extern "C" int tsp(int, int,int, int, int, float *, float *);
__global__ void TspKernel(int kCities, int kSamples, float *kPosx, float *kPosy, int *dlength)
{
__shared__ int local_length;
register int iter, i, j, len, from, to;
register float dx, dy;
register unsigned short tmp;
unsigned short tour[MAXCITIES+1];
curandState rndstate;
iter = threadIdx.x + blockIdx.x * blockDim.x;
tour[kCities] = 0;
local_length = INT_MAX;
if(iter==0)
{
*dlength = INT_MAX;
}
__syncthreads();
/* iterate number of sample times */
if (iter < kSamples) {
/* generate a random tour */
curand_init(iter, 0, 0, &rndstate);
for (i = 1; i < kCities; i++) tour[i] = i;
for (i = 1; i < kCities; i++) {
j = curand(&rndstate) % (kCities - 1) + 1;
tmp = tour[i];
tour[i] = tour[j];
tour[j] = tmp;
}
/* compute tour length */
len = 0;
from = 0;
for (i = 1; i <= kCities; i++) {
to = tour[i];
dx = kPosx[to] - kPosx[from];
dy = kPosy[to] - kPosy[from];
len += (int)(sqrtf(dx * dx + dy * dy) + 0.5f);
from = to;
}
/* check if new shortest tour */
atomicMin(&local_length, len);
}
__syncthreads();
if (threadIdx.x == 0) {
atomicMin(dlength, local_length);
}
}
static int read_input(char *filename, float *posx, float *posy)
{
register int cnt;
int i1, cities;
float i2, i3;
register FILE *f;
/* open input text file */
f = fopen(filename, "r+t");
if (f == NULL) {fprintf(stderr, "could not open file %s\n", filename); exit(-1);}
/* read the number of cities from first line */
cities = -1;
fscanf(f, "%d\n", &cities);
if ((cities < 1) || (cities >= MAXCITIES)) {fprintf(stderr, "cities out of range\n"); exit(-1);}
/* read in the cities' coordinates */
cnt = 0;
while (fscanf(f, "%d %f %f\n", &i1, &i2, &i3)) {
posx[cnt] = i2;
posy[cnt] = i3;
cnt++;
if (cnt > cities) {fprintf(stderr, "input too long\n"); exit(-1);}
if (cnt != i1) {fprintf(stderr, "input line mismatch\n"); exit(-1);}
}
if (cnt != cities) {fprintf(stderr, "wrong number of cities read\n"); exit(-1);}
/* return the number of cities */
fclose(f);
return cities;
}
int main(int argc, char *argv[])
{
register int blocks, samples, c_samples, o_samples, cities;
float posx[MAXCITIES], posy[MAXCITIES], *dposx, *dposy;
struct timeval start, end;
int *dlength, length, o_length, final_length, thread_count;
printf("TSP v1.0(CUDA)\n");
/* check command line */
if (argc != 4) {fprintf(stderr, "usage: %s input_file_name number_of_samples\n", argv[0]); exit(-1);}
cities = read_input(argv[1], posx, posy);
samples = atoi(argv[2]);
if (samples < 1) {fprintf(stderr, "number of samples must be at least 1\n"); exit(-1);}
printf("%d cities and %d samples (%s)\n", cities, samples, argv[1]);
o_length = INT_MAX;
thread_count = strtol(argv[3],NULL,10);
c_samples = (int)ceil(samples/2);
o_samples = (int)floor(samples/2);
blocks = (c_samples + THREADS - 1) / THREADS;
if (cudaSuccess != cudaMalloc((void **)&dlength, sizeof(int))) fprintf(stderr, "could not allocate array\n");
if (cudaSuccess != cudaMalloc((void **)&dposx, (cities*sizeof(float)))) fprintf(stderr, "could not allocate array\n");
if (cudaSuccess != cudaMalloc((void **)&dposy, (cities*sizeof(float)))) fprintf(stderr, "could not allocate array\n");
/* start time */
gettimeofday(&start, NULL);
if (cudaSuccess != cudaMemcpy(dposx, posx, (cities*sizeof(float)), cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n");
if (cudaSuccess != cudaMemcpy(dposy, posy, (cities*sizeof(float)), cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n");
TspKernel<<<blocks, THREADS>>>(cities, c_samples, dposx, dposy, dlength);
o_length = tsp(thread_count, samples, o_samples, cities, o_length, posx, posy);
if (cudaSuccess != cudaMemcpy(&length, dlength, sizeof(1), cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of dlength from device failed\n");
/* end time */
gettimeofday(&end, NULL);
printf("runtime: %.4lf s\n", end.tv_sec + end.tv_usec / 1000000.0 - start.tv_sec - start.tv_usec / 1000000.0);
/* output result */
if(length < o_length) final_length = length;
else final_length = o_length;
printf("length of shortest found tour: %d\n\n", final_length);
/* freeing memory */
cudaFree(dlength);
cudaFree(dposx);
cudaFree(dposy);
return 0;
}
|
e3e4973f2057ac38b30c58d84fa1e2173e8b9659.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void kApplySin(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __sinf(mat[i]);
}
|
e3e4973f2057ac38b30c58d84fa1e2173e8b9659.cu
|
#include "includes.h"
__global__ void kApplySin(float* mat, float* target, unsigned int len) {
const unsigned int idx = blockIdx.x * blockDim.x + threadIdx.x;
const unsigned int numThreads = blockDim.x * gridDim.x;
for (unsigned int i = idx; i < len; i += numThreads) target[i] = __sinf(mat[i]);
}
|
max.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <iostream>
using namespace std;
#define SIZE 7
__global__ void max(int *a , int *c)
{
int i = threadIdx.x;
*c = a[0];
if(a[i] > *c)
{
*c = a[i];
}
}
int main()
{
int i;
int a[SIZE];
int c;
int *dev_a, *dev_c;
hipMalloc((void **) &dev_a, SIZE*sizeof(int));
hipMalloc((void **) &dev_c, SIZE*sizeof(int));
cout<<"Enter the numbers : \n";
for( i = 0 ; i < SIZE ; i++)
{
cin>>a[i];
}
for( i = 0 ; i < SIZE ; i++)
{
cout<<a[i]<<" ";
}
hipMemcpy(dev_a , a, SIZE*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( max), dim3(1),dim3(SIZE), 0, 0, dev_a,dev_c);
hipMemcpy(&c, dev_c, SIZE*sizeof(int),hipMemcpyDeviceToHost);
cout<<"\n max value = ";
cout<<c;
hipFree(dev_a);
hipFree(dev_c);
return 0;
}
|
max.cu
|
#include <cuda.h>
#include <iostream>
using namespace std;
#define SIZE 7
__global__ void max(int *a , int *c)
{
int i = threadIdx.x;
*c = a[0];
if(a[i] > *c)
{
*c = a[i];
}
}
int main()
{
int i;
int a[SIZE];
int c;
int *dev_a, *dev_c;
cudaMalloc((void **) &dev_a, SIZE*sizeof(int));
cudaMalloc((void **) &dev_c, SIZE*sizeof(int));
cout<<"Enter the numbers : \n";
for( i = 0 ; i < SIZE ; i++)
{
cin>>a[i];
}
for( i = 0 ; i < SIZE ; i++)
{
cout<<a[i]<<" ";
}
cudaMemcpy(dev_a , a, SIZE*sizeof(int),cudaMemcpyHostToDevice);
max<<<1,SIZE>>>(dev_a,dev_c);
cudaMemcpy(&c, dev_c, SIZE*sizeof(int),cudaMemcpyDeviceToHost);
cout<<"\n max value = ";
cout<<c;
cudaFree(dev_a);
cudaFree(dev_c);
return 0;
}
|
2a9728295f290820c5ee68303411271ae1b06ef1.hip
|
// !!! This is a file automatically generated by hipify!!!
//
// Created by steve on 17-3-9.
//
#include <iostream>
//#include "stdio.h"
//#include "hip/hip_runtime.h"
#include "/usr/include/cuda_runtime.h"
#include "../../../../usr/include/host_defines.h"
//#include "../../../../usr/include///hip/hip_runtime_api.h"
//#include "../../../../usr/include/c++/6/cstdio"
__global__ void test_add(int * a , int * b,int *c)
{
int ii= threadIdx.x;
c[ii] = a[ii]+b[ii];
return;
}
int main()
{
int **t;
hipMalloc((void**)(&t),100*sizeof(int*));
// printf("in the function");
std::cout << "in function " << std::endl;
int a[10],b[10],c[10];
for(int i(0);i<10;++i)
{
a[i] = i*2;
b[i]=i*10;
std::cout << "a,b:"<<a[i]<<" "<<b[i]<<std::endl;
}
int *da,*db,*dc;
hipMalloc((void**)&da,10*sizeof(int*));
hipMalloc((void**)&db,10*sizeof(int*));
hipMalloc((void**)&dc,10*sizeof(int*));
hipMemcpy(da,a,10*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(db,b,10*sizeof(int),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( test_add), dim3(1),dim3(10), 0, 0, da,db,dc);
hipMemcpy(c,dc,10*sizeof(int),hipMemcpyDeviceToHost);
for(int i(0);i<10;++i)
{
std::cout << "c:"<<i<<":"<<c[i]<<std::endl;
};
return 0;
}
|
2a9728295f290820c5ee68303411271ae1b06ef1.cu
|
//
// Created by steve on 17-3-9.
//
#include <iostream>
//#include "stdio.h"
//#include "cuda_runtime.h"
#include "/usr/include/cuda_runtime.h"
#include "../../../../usr/include/host_defines.h"
//#include "../../../../usr/include///cuda_runtime_api.h"
//#include "../../../../usr/include/c++/6/cstdio"
__global__ void test_add(int * a , int * b,int *c)
{
int ii= threadIdx.x;
c[ii] = a[ii]+b[ii];
return;
}
int main()
{
int **t;
cudaMalloc((void**)(&t),100*sizeof(int*));
// printf("in the function");
std::cout << "in function " << std::endl;
int a[10],b[10],c[10];
for(int i(0);i<10;++i)
{
a[i] = i*2;
b[i]=i*10;
std::cout << "a,b:"<<a[i]<<" "<<b[i]<<std::endl;
}
int *da,*db,*dc;
cudaMalloc((void**)&da,10*sizeof(int*));
cudaMalloc((void**)&db,10*sizeof(int*));
cudaMalloc((void**)&dc,10*sizeof(int*));
cudaMemcpy(da,a,10*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(db,b,10*sizeof(int),cudaMemcpyHostToDevice);
test_add<<<1,10>>>(da,db,dc);
cudaMemcpy(c,dc,10*sizeof(int),cudaMemcpyDeviceToHost);
for(int i(0);i<10;++i)
{
std::cout << "c:"<<i<<":"<<c[i]<<std::endl;
};
return 0;
}
|
751bb3f6edc1e6959a9e06188c6468b63e650caa.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void forwardGPU(const int nthreads, const Dtype* bottom_data, const Dtype* weight,
const Dtype* bias, int inner_size, int K0, int group, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// index = (outer_idx * K0 + channel) * inner_size_ + inner_idx
// bottom_index = (outer_idx * group * K0 + i*K0 + channel) * inner_size_ + inner_idx
// nthreads = outer_num * channel_out_ * inner_size_
int channel = (index/inner_size) % K0;
int outer_idx = (index/inner_size) / K0;
bottom_data += outer_idx*group*K0*inner_size;
for (int i = 0; i < group; ++i)
{
top_data[index] += weight[channel+i*K0] * bottom_data[channel+inner_size*K0*i];
}
if (bias != NULL)
top_data[index] += bias[channel];
}
}
template <typename Dtype>
void WeightedSumLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bias = NULL;
if (bias_term_) {
bias = this->blobs_[1]->gpu_data();
}
caffe_gpu_set(count, Dtype(0), top_data);
// The first "axis_" dimensions are independent inner products; the total
// number of these is M_, the product over these dimensions.
hipLaunchKernelGGL(( forwardGPU<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, weight, bias, inner_size_, channel_out_, group_, top_data);
}
template <typename Dtype>
__global__ void gpu_backward_bias(const int nthreads, const Dtype* top_diff,
int inner_size, int outer_num, Dtype* bias_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// top_index = (outer_idx * nthreads + index) * inner_size_ + inner_idx
// nthreads = channel_out_
int offset = nthreads*inner_size;
for (int i = 0; i < outer_num; ++i)
{
for (int j = 0; j < inner_size; ++j)
bias_diff[index] += top_diff[i*offset+index*inner_size+j];
}
}
}
template <typename Dtype>
__global__ void gpu_backward_weight(const int nthreads, const Dtype* top_diff, const Dtype* bottom_data,
int inner_size, int outer_num, int group, Dtype* weight_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// top_index = (outer_idx * K0 + channel) * inner_size_ + inner_idx
// bottom_index = (outer_idx * nthreads + index) * inner_size_ + inner_idx
// nthreads = K0*group
int top_offset = (nthreads/group)*inner_size;
int bottom_offset = nthreads*inner_size;
int channel = index % (nthreads/group);
for (int i = 0; i < outer_num; ++i)
{
for (int j = 0; j < inner_size; ++j){
weight_diff[index] += top_diff[channel*inner_size+j]*bottom_data[index*inner_size+j];
}
// shift batch
top_diff += top_offset;
bottom_data += bottom_offset;
}
}
}
template <typename Dtype>
void WeightedSumLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const int outer_num = bottom[0]->count(0, axis_);
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const int count = this->blobs_[0]->count();
// Gradient with respect to weight
// sum_{out}(top_diff_patch.*bottom_data_patch)
hipLaunchKernelGGL(( gpu_backward_weight<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, bottom_data, inner_size_, outer_num, group_, weight_diff);
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
const int outer_num = bottom[0]->count(0, axis_);
const int count = this->blobs_[1]->count();
// Gradient with respect to bias
hipLaunchKernelGGL(( gpu_backward_bias<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, inner_size_, outer_num, bias_diff);
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const int outer_num = bottom[0]->count(0, axis_);
const Dtype* weights = this->blobs_[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int offset = top[0]->count(axis_);
// Gradient with respect to bottom data
for (int i = 0; i < outer_num; ++i)
{
for (int k = 0; k < group_; ++k)
{
for (int j = 0; j < channel_out_; ++j)
{
caffe_gpu_axpby(inner_size_, weights[j], top_diff+j*inner_size_, Dtype(0), bottom_diff+j*inner_size_);
}
bottom_diff += channel_out_*inner_size_;
}
top_diff += offset;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSumLayer);
} // namespace caffe
|
751bb3f6edc1e6959a9e06188c6468b63e650caa.cu
|
#include <vector>
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void forwardGPU(const int nthreads, const Dtype* bottom_data, const Dtype* weight,
const Dtype* bias, int inner_size, int K0, int group, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
// index = (outer_idx * K0 + channel) * inner_size_ + inner_idx
// bottom_index = (outer_idx * group * K0 + i*K0 + channel) * inner_size_ + inner_idx
// nthreads = outer_num * channel_out_ * inner_size_
int channel = (index/inner_size) % K0;
int outer_idx = (index/inner_size) / K0;
bottom_data += outer_idx*group*K0*inner_size;
for (int i = 0; i < group; ++i)
{
top_data[index] += weight[channel+i*K0] * bottom_data[channel+inner_size*K0*i];
}
if (bias != NULL)
top_data[index] += bias[channel];
}
}
template <typename Dtype>
void WeightedSumLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = top[0]->mutable_gpu_data();
int count = top[0]->count();
const Dtype* weight = this->blobs_[0]->gpu_data();
const Dtype* bias = NULL;
if (bias_term_) {
bias = this->blobs_[1]->gpu_data();
}
caffe_gpu_set(count, Dtype(0), top_data);
// The first "axis_" dimensions are independent inner products; the total
// number of these is M_, the product over these dimensions.
forwardGPU<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, weight, bias, inner_size_, channel_out_, group_, top_data);
}
template <typename Dtype>
__global__ void gpu_backward_bias(const int nthreads, const Dtype* top_diff,
int inner_size, int outer_num, Dtype* bias_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// top_index = (outer_idx * nthreads + index) * inner_size_ + inner_idx
// nthreads = channel_out_
int offset = nthreads*inner_size;
for (int i = 0; i < outer_num; ++i)
{
for (int j = 0; j < inner_size; ++j)
bias_diff[index] += top_diff[i*offset+index*inner_size+j];
}
}
}
template <typename Dtype>
__global__ void gpu_backward_weight(const int nthreads, const Dtype* top_diff, const Dtype* bottom_data,
int inner_size, int outer_num, int group, Dtype* weight_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// top_index = (outer_idx * K0 + channel) * inner_size_ + inner_idx
// bottom_index = (outer_idx * nthreads + index) * inner_size_ + inner_idx
// nthreads = K0*group
int top_offset = (nthreads/group)*inner_size;
int bottom_offset = nthreads*inner_size;
int channel = index % (nthreads/group);
for (int i = 0; i < outer_num; ++i)
{
for (int j = 0; j < inner_size; ++j){
weight_diff[index] += top_diff[channel*inner_size+j]*bottom_data[index*inner_size+j];
}
// shift batch
top_diff += top_offset;
bottom_data += bottom_offset;
}
}
}
template <typename Dtype>
void WeightedSumLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
const vector<Blob<Dtype>*>& bottom) {
if (this->param_propagate_down_[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const Dtype* bottom_data = bottom[0]->gpu_data();
const int outer_num = bottom[0]->count(0, axis_);
Dtype* weight_diff = this->blobs_[0]->mutable_gpu_diff();
const int count = this->blobs_[0]->count();
// Gradient with respect to weight
// sum_{out}(top_diff_patch.*bottom_data_patch)
gpu_backward_weight<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, bottom_data, inner_size_, outer_num, group_, weight_diff);
}
if (bias_term_ && this->param_propagate_down_[1]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bias_diff = this->blobs_[1]->mutable_gpu_diff();
const int outer_num = bottom[0]->count(0, axis_);
const int count = this->blobs_[1]->count();
// Gradient with respect to bias
gpu_backward_bias<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, inner_size_, outer_num, bias_diff);
}
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
const int outer_num = bottom[0]->count(0, axis_);
const Dtype* weights = this->blobs_[0]->cpu_data();
Dtype* bottom_diff = bottom[0]->mutable_gpu_diff();
const int offset = top[0]->count(axis_);
// Gradient with respect to bottom data
for (int i = 0; i < outer_num; ++i)
{
for (int k = 0; k < group_; ++k)
{
for (int j = 0; j < channel_out_; ++j)
{
caffe_gpu_axpby(inner_size_, weights[j], top_diff+j*inner_size_, Dtype(0), bottom_diff+j*inner_size_);
}
bottom_diff += channel_out_*inner_size_;
}
top_diff += offset;
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(WeightedSumLayer);
} // namespace caffe
|
7abd0c0b20b8a2e34992b14beac7560f7b82eb62.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Pi - CUDA version 1 - uses integers for CUDA kernels
* Author: Felipe Gutierrez, SBEL, July 2015
*/
#include <iostream>
#include <stdio.h> /* fprintf() */
#include <cstdlib> /* malloc and free */
#include <float.h> /* DBL_EPSILON() */
#include <math.h> /* sqrt() */
#include <ctime>
#include "pi-kernel.h"
/* Only add openmp if it will be used */
#if OPENMP_ENABLED
#include <omp.h>
#endif
/**
* @brief CUDA macro
* @details
* If CUDA is enabled we need to define:
* * nthreads = Number of threads per block we want.
*
* * NUMBLOCKS = Gives the number of blocks we want to use to parallelize a problem of
* size n.
*
* * KERNEL = KERNEL(n) to specified the number of blocks and the number of threads
* per block if CUDA is ENABLED. If CUDA is not enabled then KERNEL(n) is just an empty
* piece of code.
*
*/
#if CUDA_ENABLED
#include "TimerGPU.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/system/hip/execution_policy.h>
#include <thrust/system/omp/execution_policy.h>
#define nthreads 1024
#define getGridDim(n) (int)ceil(sqrt(n/nthreads))
#define GRID(n) dim3(getGridDim(n), getGridDim(n))
#define BLOCK(n) dim3(nthreads)
#definehipLaunchKernelGGL(( KERNEL(n)) , dim3(GRID(n)), dim3(BLOCK(n)), 0, 0, /* Necessary for kernels */
#else
#include "TimerCPU.h"
#define KERNELn) /* Empty code */
#endif
/**
* @brief calculateAreas kernel
* @details
* * threadId: Index in the areas area. Tells us where to store the calculated area. With
* CUDA this is calculated with threadId and blockId. In serial and OpenMP this is the
* obtained by the for loop counter.
* * x: Current x coordinate
* * heightSq: height of rectangle squared
*
* @param numRects numRects we are going to use to estimate the area under the curve. This defines
* how big our problem size will be. This is the n in KERNEL(n).
*
* @param width of rectangle
*
* @param areas Pre allocated array that will contain areas. --> This array was allocated with
* hipMallocManaged() function which is what leads to UnifiedMemory.
*
* @return fills the areas array
*/
#if CUDA_ENABLED
__global__
#endif
void calculateAreas(const long numRects, const double width, double *dev_areas)
{
/* If cuda is enabled calculate the threadId which gives us the index in dev_areas */
#if CUDA_ENABLED
/* Calculate threadId for 1D grid 1D block*/
//int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
/* Calculate threadId for 2D grid 1D block*/
int threadId = (blockIdx.y*gridDim.x + blockIdx.x)*blockDim.x + threadIdx.x;
if(threadId >= numRects)
{
return;
}
#elif OPENMP_ENABLED
#pragma omp parallel for
#endif
#if !CUDA_ENABLED
/* Define the for loop if cuda is not enable. This is used in both the serial and openmp version */
for(int threadId = 0;threadId < numRects;threadId++)
#endif
{
double x = threadId * width;
double heightSq = 1 - (x*x);
double height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
/* Add Extra computations in order to be able to see the performance difference between CPU and GPU */
x = sqrt((float)threadId) * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (sqrt((float)x)*pow(width,3));
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = sqrt((float)x) * sqrt((float)x);
heightSq = 1 - (pow(x,4)*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
x = sqrt((float)threadId) * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (sqrt((float)x)*pow(width,3));
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = sqrt((float)x) * sqrt((float)x);
heightSq = 1 - (pow(x,4)*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
x = sqrt((float)threadId) * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (sqrt((float)x)*pow(width,3));
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = sqrt((float)x) * sqrt((float)x);
heightSq = 1 - (pow(x,4)*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
x = sqrt((float)threadId) * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (sqrt((float)x)*pow(width,3));
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = sqrt((float)x) * sqrt((float)x);
heightSq = 1 - (pow(x,4)*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
dev_areas[threadId] = (width * height);
}
}
void calculateArea(const long numRects, double *area) {
double *hostAreas;
double *deviceAreas;
double *unifiedAreas;
thrust::host_vector<double> t_hostAreas;
thrust::device_vector<double> t_deviceAreas;
int i;
/////////////////////////////// MEMORY ALLOCATION SECTION ////////////////////////////////////////
/* If CUDA is enabled allocate memory in device either using hipMalloc or hipMallocManaged */
#if CUDA_ENABLED
hipError_t err;
if(getGridDim(numRects) >= 65535)
{
fprintf(stderr, "Error: WAY TOO MANY RECTANGLES. Do you really want to compute more than 4.3979123e+12 rectangles!!!! Please input less rectangles");
return;
}
std::cout << "Grid Dimensions = " << getGridDim(numRects) << std::endl;
#if UNIFIEDMEM_ENABLED
printf("Unified Memory is Enabled. Allocating using hipMallocManaged \n");
err = hipMallocManaged(&unifiedAreas, numRects * sizeof(double));
#else
printf("Unified Memory is NOT Enabled. Allocating using hipMalloc \n");
//err = hipMalloc(&deviceAreas, numRects * sizeof(double));
t_deviceAreas = thrust::device_vector<double>(numRects);
#endif
/* Check for error in device memory allocation */
if (err != hipSuccess)
{
fprintf(stderr, "hipMalloc or hipMallocManaged failed: %s\n", hipGetErrorString(err));
}
/* If CUDA is not enabled we are running on the CPU either serially or with openmp so we allocate memory in the host */
#else
hostAreas = (double*)malloc(numRects * sizeof(double));
if (hostAreas == NULL)
{
fprintf(stderr, "malloc failed!\n");
}
#endif
/////////////////////////////// KERNEL CALL SECTION ////////////////////////////////////////
/* If CUDA is enabled do the kernel and reduce call either with unifiedMemory or with device memory*/
#if CUDA_ENABLED
/* Start all cudaEvents so we can record timings */
GpuTimer kernelTimer("Kernel");
GpuTimer reduceTimer("Reduce");
GpuTimer allTimer("All");
allTimer.Start();
kernelTimer.Start();
#if UNIFIEDMEM_ENABLED
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), unifiedAreas);
#else
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), (double*)thrust::raw_pointer_cast(&t_deviceAreas[0]));
#endif
kernelTimer.Stop();
reduceTimer.Start();
#if UNIFIEDMEM_ENABLED
(*area) = thrust::reduce(thrust::hip::par, unifiedAreas, unifiedAreas + numRects);
#else
(*area) = thrust::reduce(thrust::hip::par, t_deviceAreas.begin(), t_deviceAreas.end());
#endif
reduceTimer.Stop();
allTimer.Stop();
kernelTimer.print();
reduceTimer.print();
allTimer.print();
hipFree(deviceAreas);
hipFree(unifiedAreas);
/* If CUDA is not enabled calculateAreas is not a kernel but a normal function. */
#else
/* This kernel call could also be given unifiedMemory as argument but for organization purposes it is called with hostAreas */
CpuTimer kernelTimer("Kernel");
CpuTimer reduceTimer("Reduce");
CpuTimer allTimer("All");
allTimer.Start();
allTimer.Start_cputimer();
kernelTimer.Start();
kernelTimer.Start_cputimer();
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), hostAreas);
kernelTimer.Stop_cputimer();
kernelTimer.Stop();
(*area) = 0.0;
reduceTimer.Start();
reduceTimer.Start_cputimer();
for (i = 0; i < numRects; i++)
{
(*area) += hostAreas[i];
}
reduceTimer.Stop_cputimer();
reduceTimer.Stop();
allTimer.Stop_cputimer();
allTimer.Stop();
kernelTimer.print();
reduceTimer.print();
allTimer.print();
free(hostAreas);
#endif
///////////////////// GPU OR CPU FREE THE MEMORY ////////////////////
}
#if CUDA_ENABLED
void printDeviceInfo()
{
int device;
struct hipDeviceProp_t props;
hipGetDevice(&device);
hipGetDeviceProperties(&props, device);
std::cout << "Device info: " <<std::endl;
std::cout << "Name: " << props.name <<std::endl;
std::cout << "version: " << props.major << "," << props.minor <<std::endl;
}
#endif
|
7abd0c0b20b8a2e34992b14beac7560f7b82eb62.cu
|
/* Pi - CUDA version 1 - uses integers for CUDA kernels
* Author: Felipe Gutierrez, SBEL, July 2015
*/
#include <iostream>
#include <stdio.h> /* fprintf() */
#include <cstdlib> /* malloc and free */
#include <float.h> /* DBL_EPSILON() */
#include <math.h> /* sqrt() */
#include <ctime>
#include "pi-kernel.h"
/* Only add openmp if it will be used */
#if OPENMP_ENABLED
#include <omp.h>
#endif
/**
* @brief CUDA macro
* @details
* If CUDA is enabled we need to define:
* * nthreads = Number of threads per block we want.
*
* * NUMBLOCKS = Gives the number of blocks we want to use to parallelize a problem of
* size n.
*
* * KERNEL = KERNEL(n) to specified the number of blocks and the number of threads
* per block if CUDA is ENABLED. If CUDA is not enabled then KERNEL(n) is just an empty
* piece of code.
*
*/
#if CUDA_ENABLED
#include "TimerGPU.h"
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/reduce.h>
#include <thrust/system/cuda/execution_policy.h>
#include <thrust/system/omp/execution_policy.h>
#define nthreads 1024
#define getGridDim(n) (int)ceil(sqrt(n/nthreads))
#define GRID(n) dim3(getGridDim(n), getGridDim(n))
#define BLOCK(n) dim3(nthreads)
#define KERNEL(n) <<<GRID(n), BLOCK(n)>>> /* Necessary for kernels */
#else
#include "TimerCPU.h"
#define KERNEL(n) /* Empty code */
#endif
/**
* @brief calculateAreas kernel
* @details
* * threadId: Index in the areas area. Tells us where to store the calculated area. With
* CUDA this is calculated with threadId and blockId. In serial and OpenMP this is the
* obtained by the for loop counter.
* * x: Current x coordinate
* * heightSq: height of rectangle squared
*
* @param numRects numRects we are going to use to estimate the area under the curve. This defines
* how big our problem size will be. This is the n in KERNEL(n).
*
* @param width of rectangle
*
* @param areas Pre allocated array that will contain areas. --> This array was allocated with
* cudaMallocManaged() function which is what leads to UnifiedMemory.
*
* @return fills the areas array
*/
#if CUDA_ENABLED
__global__
#endif
void calculateAreas(const long numRects, const double width, double *dev_areas)
{
/* If cuda is enabled calculate the threadId which gives us the index in dev_areas */
#if CUDA_ENABLED
/* Calculate threadId for 1D grid 1D block*/
//int threadId = threadIdx.x + (blockIdx.x * blockDim.x);
/* Calculate threadId for 2D grid 1D block*/
int threadId = (blockIdx.y*gridDim.x + blockIdx.x)*blockDim.x + threadIdx.x;
if(threadId >= numRects)
{
return;
}
#elif OPENMP_ENABLED
#pragma omp parallel for
#endif
#if !CUDA_ENABLED
/* Define the for loop if cuda is not enable. This is used in both the serial and openmp version */
for(int threadId = 0;threadId < numRects;threadId++)
#endif
{
double x = threadId * width;
double heightSq = 1 - (x*x);
double height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
/* Add Extra computations in order to be able to see the performance difference between CPU and GPU */
x = sqrt((float)threadId) * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (sqrt((float)x)*pow(width,3));
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = sqrt((float)x) * sqrt((float)x);
heightSq = 1 - (pow(x,4)*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
x = sqrt((float)threadId) * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (sqrt((float)x)*pow(width,3));
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = sqrt((float)x) * sqrt((float)x);
heightSq = 1 - (pow(x,4)*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
x = sqrt((float)threadId) * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (sqrt((float)x)*pow(width,3));
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = sqrt((float)x) * sqrt((float)x);
heightSq = 1 - (pow(x,4)*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
x = sqrt((float)threadId) * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * pow(width,3);
heightSq = 1 - (sqrt((float)x)*pow(width,3));
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = sqrt((float)x) * sqrt((float)x);
heightSq = 1 - (pow(x,4)*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt((float)heightSq));
x = threadId * width;
heightSq = 1 - (x*x);
height = (heightSq < DBL_EPSILON) ? (0.0) : (sqrt(heightSq));
dev_areas[threadId] = (width * height);
}
}
void calculateArea(const long numRects, double *area) {
double *hostAreas;
double *deviceAreas;
double *unifiedAreas;
thrust::host_vector<double> t_hostAreas;
thrust::device_vector<double> t_deviceAreas;
int i;
/////////////////////////////// MEMORY ALLOCATION SECTION ////////////////////////////////////////
/* If CUDA is enabled allocate memory in device either using cudaMalloc or cudaMallocManaged */
#if CUDA_ENABLED
cudaError_t err;
if(getGridDim(numRects) >= 65535)
{
fprintf(stderr, "Error: WAY TOO MANY RECTANGLES. Do you really want to compute more than 4.3979123e+12 rectangles!!!! Please input less rectangles");
return;
}
std::cout << "Grid Dimensions = " << getGridDim(numRects) << std::endl;
#if UNIFIEDMEM_ENABLED
printf("Unified Memory is Enabled. Allocating using cudaMallocManaged \n");
err = cudaMallocManaged(&unifiedAreas, numRects * sizeof(double));
#else
printf("Unified Memory is NOT Enabled. Allocating using cudaMalloc \n");
//err = cudaMalloc(&deviceAreas, numRects * sizeof(double));
t_deviceAreas = thrust::device_vector<double>(numRects);
#endif
/* Check for error in device memory allocation */
if (err != cudaSuccess)
{
fprintf(stderr, "cudaMalloc or cudaMallocManaged failed: %s\n", cudaGetErrorString(err));
}
/* If CUDA is not enabled we are running on the CPU either serially or with openmp so we allocate memory in the host */
#else
hostAreas = (double*)malloc(numRects * sizeof(double));
if (hostAreas == NULL)
{
fprintf(stderr, "malloc failed!\n");
}
#endif
/////////////////////////////// KERNEL CALL SECTION ////////////////////////////////////////
/* If CUDA is enabled do the kernel and reduce call either with unifiedMemory or with device memory*/
#if CUDA_ENABLED
/* Start all cudaEvents so we can record timings */
GpuTimer kernelTimer("Kernel");
GpuTimer reduceTimer("Reduce");
GpuTimer allTimer("All");
allTimer.Start();
kernelTimer.Start();
#if UNIFIEDMEM_ENABLED
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), unifiedAreas);
#else
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), (double*)thrust::raw_pointer_cast(&t_deviceAreas[0]));
#endif
kernelTimer.Stop();
reduceTimer.Start();
#if UNIFIEDMEM_ENABLED
(*area) = thrust::reduce(thrust::cuda::par, unifiedAreas, unifiedAreas + numRects);
#else
(*area) = thrust::reduce(thrust::cuda::par, t_deviceAreas.begin(), t_deviceAreas.end());
#endif
reduceTimer.Stop();
allTimer.Stop();
kernelTimer.print();
reduceTimer.print();
allTimer.print();
cudaFree(deviceAreas);
cudaFree(unifiedAreas);
/* If CUDA is not enabled calculateAreas is not a kernel but a normal function. */
#else
/* This kernel call could also be given unifiedMemory as argument but for organization purposes it is called with hostAreas */
CpuTimer kernelTimer("Kernel");
CpuTimer reduceTimer("Reduce");
CpuTimer allTimer("All");
allTimer.Start();
allTimer.Start_cputimer();
kernelTimer.Start();
kernelTimer.Start_cputimer();
calculateAreas KERNEL(numRects) (numRects, (1.0 / numRects), hostAreas);
kernelTimer.Stop_cputimer();
kernelTimer.Stop();
(*area) = 0.0;
reduceTimer.Start();
reduceTimer.Start_cputimer();
for (i = 0; i < numRects; i++)
{
(*area) += hostAreas[i];
}
reduceTimer.Stop_cputimer();
reduceTimer.Stop();
allTimer.Stop_cputimer();
allTimer.Stop();
kernelTimer.print();
reduceTimer.print();
allTimer.print();
free(hostAreas);
#endif
///////////////////// GPU OR CPU FREE THE MEMORY ////////////////////
}
#if CUDA_ENABLED
void printDeviceInfo()
{
int device;
struct cudaDeviceProp props;
cudaGetDevice(&device);
cudaGetDeviceProperties(&props, device);
std::cout << "Device info: " <<std::endl;
std::cout << "Name: " << props.name <<std::endl;
std::cout << "version: " << props.major << "," << props.minor <<std::endl;
}
#endif
|
d1b92e79b38e9037bc131e1ce32f1c45420aaebe.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
#ifdef USE_DOUBLE
#define dfloat double
#else
#define dfloat float
#endif
#ifndef POLYNOMIAL_ORDER
#define POLYNOMIAL_ORDER 4
#endif
// note the order of the fields below is also assumed in the code.
const int64_t _nstate = 5;
const int64_t _R = 0, _U = 1, _V = 2, _W = 3, _E = 4;
const int64_t _nvgeo = 14;
const int64_t _XIx = 0;
const int64_t _ETAx = 1;
const int64_t _ZETAx = 2;
const int64_t _XIy = 3;
const int64_t _ETAy = 4;
const int64_t _ZETAy = 5;
const int64_t _XIz = 6;
const int64_t _ETAz = 7;
const int64_t _ZETAz = 8;
const int64_t _MJ = 9;
const int64_t _MJI = 10;
const int64_t _x = 11;
const int64_t _y = 12;
const int64_t _z = 13;
#define grav ((dfloat) 9.81)
#define gdm1 ((dfloat) 0.4)
template <int64_t Nq, int64_t Np, int64_t nvar>
__global__ void volumerhs(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const dfloat * __restrict__ vgeo,
const dfloat gravity,
const dfloat * __restrict__ D,
const int64_t nelem){
__shared__ dfloat s_D[Nq][Nq];
__shared__ dfloat s_F[Nq][Nq][_nstate];
__shared__ dfloat s_G[Nq][Nq][_nstate];
dfloat r_rhsR[Nq];
dfloat r_rhsU[Nq];
dfloat r_rhsV[Nq];
dfloat r_rhsW[Nq];
dfloat r_rhsE[Nq];
int64_t e = blockIdx.x;
int64_t j = threadIdx.y;
int64_t i = threadIdx.x;
s_D[j][i] = D[j*Nq+i];
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
r_rhsR[k] = 0;
r_rhsU[k] = 0;
r_rhsV[k] = 0;
r_rhsW[k] = 0;
r_rhsE[k] = 0;
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
__syncthreads();
// Load values will need int64_to registers
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJ = vgeo[gid + _MJ*Np];
dfloat XIx = vgeo[gid + _XIx*Np];
dfloat XIy = vgeo[gid + _XIy*Np];
dfloat XIz = vgeo[gid + _XIz*Np];
dfloat ETAx = vgeo[gid + _ETAx*Np];
dfloat ETAy = vgeo[gid + _ETAy*Np];
dfloat ETAz = vgeo[gid + _ETAz*Np];
dfloat ZETAx = vgeo[gid + _ZETAx*Np];
dfloat ZETAy = vgeo[gid + _ZETAy*Np];
dfloat ZETAz = vgeo[gid + _ZETAz*Np];
dfloat z = vgeo[gid + _z*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
dfloat R = Q[qid + _R*Np];
dfloat U = Q[qid + _U*Np];
dfloat V = Q[qid + _V*Np];
dfloat W = Q[qid + _W*Np];
dfloat E = Q[qid + _E*Np];
dfloat P = gdm1*(E - (U*U + V*V + W*W)/(2*R) - R*gravity*z);
dfloat Rinv = 1 / R;
dfloat fluxR_x = U;
dfloat fluxU_x = Rinv * U * U + P;
dfloat fluxV_x = Rinv * U * V;
dfloat fluxW_x = Rinv * U * W;
dfloat fluxE_x = Rinv * U * (E + P);
dfloat fluxR_y = V;
dfloat fluxU_y = Rinv * V * U;
dfloat fluxV_y = Rinv * V * V + P;
dfloat fluxW_y = Rinv * V * W;
dfloat fluxE_y = Rinv * V * (E + P);
dfloat fluxR_z = W;
dfloat fluxU_z = Rinv * W * U;
dfloat fluxV_z = Rinv * W * V;
dfloat fluxW_z = Rinv * W * W + P;
dfloat fluxE_z = Rinv * W * (E + P);
s_F[i][j][ _R] = MJ * (XIx * fluxR_x + XIy * fluxR_y + XIz * fluxR_z);
s_F[i][j][ _U] = MJ * (XIx * fluxU_x + XIy * fluxU_y + XIz * fluxU_z);
s_F[i][j][ _V] = MJ * (XIx * fluxV_x + XIy * fluxV_y + XIz * fluxV_z);
s_F[i][j][ _W] = MJ * (XIx * fluxW_x + XIy * fluxW_y + XIz * fluxW_z);
s_F[i][j][ _E] = MJ * (XIx * fluxE_x + XIy * fluxE_y + XIz * fluxE_z);
s_G[i][j][ _R] = MJ * (ETAx * fluxR_x + ETAy * fluxR_y + ETAz * fluxR_z);
s_G[i][j][ _U] = MJ * (ETAx * fluxU_x + ETAy * fluxU_y + ETAz * fluxU_z);
s_G[i][j][ _V] = MJ * (ETAx * fluxV_x + ETAy * fluxV_y + ETAz * fluxV_z);
s_G[i][j][ _W] = MJ * (ETAx * fluxW_x + ETAy * fluxW_y + ETAz * fluxW_z);
s_G[i][j][ _E] = MJ * (ETAx * fluxE_x + ETAy * fluxE_y + ETAz * fluxE_z);
dfloat r_HR = MJ * (ZETAx * fluxR_x + ZETAy * fluxR_y + ZETAz * fluxR_z);
dfloat r_HU = MJ * (ZETAx * fluxU_x + ZETAy * fluxU_y + ZETAz * fluxU_z);
dfloat r_HV = MJ * (ZETAx * fluxV_x + ZETAy * fluxV_y + ZETAz * fluxV_z);
dfloat r_HW = MJ * (ZETAx * fluxW_x + ZETAy * fluxW_y + ZETAz * fluxW_z);
dfloat r_HE = MJ * (ZETAx * fluxE_x + ZETAy * fluxE_y + ZETAz * fluxE_z);
// one shared access per 10 flops
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dkn = s_D[k][n];
r_rhsR[n] += Dkn * r_HR;
r_rhsU[n] += Dkn * r_HU;
r_rhsV[n] += Dkn * r_HV;
r_rhsW[n] += Dkn * r_HW;
r_rhsE[n] += Dkn * r_HE;
}
r_rhsW[k] -= MJ * R * gravity;
__syncthreads();
// loop of XI-grid lines
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dni = s_D[n][i];
dfloat Dnj = s_D[n][j];
r_rhsR[k] += Dni * s_F[n][j][_R];
r_rhsR[k] += Dnj * s_G[i][n][_R];
r_rhsU[k] += Dni * s_F[n][j][_U];
r_rhsU[k] += Dnj * s_G[i][n][_U];
r_rhsV[k] += Dni * s_F[n][j][_V];
r_rhsV[k] += Dnj * s_G[i][n][_V];
r_rhsW[k] += Dni * s_F[n][j][_W];
r_rhsW[k] += Dnj * s_G[i][n][_W];
r_rhsE[k] += Dni * s_F[n][j][_E];
r_rhsE[k] += Dnj * s_G[i][n][_E];
}
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJI = vgeo[gid + _MJI*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
rhs[qid+_U*Np] += MJI*r_rhsU[k];
rhs[qid+_V*Np] += MJI*r_rhsV[k];
rhs[qid+_W*Np] += MJI*r_rhsW[k];
rhs[qid+_R*Np] += MJI*r_rhsR[k];
rhs[qid+_E*Np] += MJI*r_rhsE[k];
}
}
void randArray(int64_t N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
hipMalloc(c_q, N*sizeof(dfloat));
for(int64_t n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
hipMemcpy(c_q[0], q[0], N*sizeof(dfloat), hipMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
const int64_t N = POLYNOMIAL_ORDER;
const int64_t nelem = 4000;
const int64_t Nq = N+1;
const int64_t Np = Nq*Nq*Nq;
const int64_t Ntotal = Np*nelem*_nstate;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idR = n + _R*Np + e*_nstate*Np;
int64_t idE = n + _E*Np + e*_nstate*Np;
Q[idR] += 2.;
Q[idE] += 20.;
}
}
hipMemcpy(c_Q, Q, nelem*_nstate*Np*sizeof(dfloat), hipMemcpyHostToDevice);
const int64_t Gtotal = Np*nelem*_nvgeo;
dfloat *vgeo, *c_vgeo;
randArray(Gtotal, 0, 1., &vgeo, &c_vgeo);
// Make sure the entries of the mass matrix satisfy the inverse relation
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idMJ = n + _MJ*Np + e*_nvgeo*Np;
int64_t idMJI = n + _MJI*Np + e*_nvgeo*Np;
vgeo[idMJ] += 3;
vgeo[idMJI] = 1./vgeo[idMJ];
}
}
hipMemcpy(c_vgeo, vgeo, nelem*_nvgeo*Np*sizeof(dfloat), hipMemcpyHostToDevice);
dfloat *D, *c_D;
randArray(Nq*Nq, 1., 1., &D, &c_D);
dfloat *rhs, *c_rhs;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs, &c_rhs);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
hipLaunchKernelGGL(( volumerhs<Nq, Np, _nstate>) , dim3(G), dim3(B3) , 0, 0, c_rhs, c_Q, c_vgeo, grav, c_D, nelem);
hipDeviceSynchronize();
exit(0);
return 0;
}
|
d1b92e79b38e9037bc131e1ce32f1c45420aaebe.cu
|
#include <cuda.h>
#include <stdlib.h>
#include <stdio.h>
#include <unistd.h>
#include <math.h>
// to build on Titan V:
// nvcc -arch=sm_70 --ptxas-options=-v -o vanilladeriv vanilladeriv.cu;
#ifdef USE_DOUBLE
#define dfloat double
#else
#define dfloat float
#endif
#ifndef POLYNOMIAL_ORDER
#define POLYNOMIAL_ORDER 4
#endif
// note the order of the fields below is also assumed in the code.
const int64_t _nstate = 5;
const int64_t _R = 0, _U = 1, _V = 2, _W = 3, _E = 4;
const int64_t _nvgeo = 14;
const int64_t _XIx = 0;
const int64_t _ETAx = 1;
const int64_t _ZETAx = 2;
const int64_t _XIy = 3;
const int64_t _ETAy = 4;
const int64_t _ZETAy = 5;
const int64_t _XIz = 6;
const int64_t _ETAz = 7;
const int64_t _ZETAz = 8;
const int64_t _MJ = 9;
const int64_t _MJI = 10;
const int64_t _x = 11;
const int64_t _y = 12;
const int64_t _z = 13;
#define grav ((dfloat) 9.81)
#define gdm1 ((dfloat) 0.4)
template <int64_t Nq, int64_t Np, int64_t nvar>
__global__ void volumerhs(dfloat * __restrict__ rhs,
const dfloat * __restrict__ Q,
const dfloat * __restrict__ vgeo,
const dfloat gravity,
const dfloat * __restrict__ D,
const int64_t nelem){
__shared__ dfloat s_D[Nq][Nq];
__shared__ dfloat s_F[Nq][Nq][_nstate];
__shared__ dfloat s_G[Nq][Nq][_nstate];
dfloat r_rhsR[Nq];
dfloat r_rhsU[Nq];
dfloat r_rhsV[Nq];
dfloat r_rhsW[Nq];
dfloat r_rhsE[Nq];
int64_t e = blockIdx.x;
int64_t j = threadIdx.y;
int64_t i = threadIdx.x;
s_D[j][i] = D[j*Nq+i];
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
r_rhsR[k] = 0;
r_rhsU[k] = 0;
r_rhsV[k] = 0;
r_rhsW[k] = 0;
r_rhsE[k] = 0;
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
__syncthreads();
// Load values will need int64_to registers
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJ = vgeo[gid + _MJ*Np];
dfloat XIx = vgeo[gid + _XIx*Np];
dfloat XIy = vgeo[gid + _XIy*Np];
dfloat XIz = vgeo[gid + _XIz*Np];
dfloat ETAx = vgeo[gid + _ETAx*Np];
dfloat ETAy = vgeo[gid + _ETAy*Np];
dfloat ETAz = vgeo[gid + _ETAz*Np];
dfloat ZETAx = vgeo[gid + _ZETAx*Np];
dfloat ZETAy = vgeo[gid + _ZETAy*Np];
dfloat ZETAz = vgeo[gid + _ZETAz*Np];
dfloat z = vgeo[gid + _z*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
dfloat R = Q[qid + _R*Np];
dfloat U = Q[qid + _U*Np];
dfloat V = Q[qid + _V*Np];
dfloat W = Q[qid + _W*Np];
dfloat E = Q[qid + _E*Np];
dfloat P = gdm1*(E - (U*U + V*V + W*W)/(2*R) - R*gravity*z);
dfloat Rinv = 1 / R;
dfloat fluxR_x = U;
dfloat fluxU_x = Rinv * U * U + P;
dfloat fluxV_x = Rinv * U * V;
dfloat fluxW_x = Rinv * U * W;
dfloat fluxE_x = Rinv * U * (E + P);
dfloat fluxR_y = V;
dfloat fluxU_y = Rinv * V * U;
dfloat fluxV_y = Rinv * V * V + P;
dfloat fluxW_y = Rinv * V * W;
dfloat fluxE_y = Rinv * V * (E + P);
dfloat fluxR_z = W;
dfloat fluxU_z = Rinv * W * U;
dfloat fluxV_z = Rinv * W * V;
dfloat fluxW_z = Rinv * W * W + P;
dfloat fluxE_z = Rinv * W * (E + P);
s_F[i][j][ _R] = MJ * (XIx * fluxR_x + XIy * fluxR_y + XIz * fluxR_z);
s_F[i][j][ _U] = MJ * (XIx * fluxU_x + XIy * fluxU_y + XIz * fluxU_z);
s_F[i][j][ _V] = MJ * (XIx * fluxV_x + XIy * fluxV_y + XIz * fluxV_z);
s_F[i][j][ _W] = MJ * (XIx * fluxW_x + XIy * fluxW_y + XIz * fluxW_z);
s_F[i][j][ _E] = MJ * (XIx * fluxE_x + XIy * fluxE_y + XIz * fluxE_z);
s_G[i][j][ _R] = MJ * (ETAx * fluxR_x + ETAy * fluxR_y + ETAz * fluxR_z);
s_G[i][j][ _U] = MJ * (ETAx * fluxU_x + ETAy * fluxU_y + ETAz * fluxU_z);
s_G[i][j][ _V] = MJ * (ETAx * fluxV_x + ETAy * fluxV_y + ETAz * fluxV_z);
s_G[i][j][ _W] = MJ * (ETAx * fluxW_x + ETAy * fluxW_y + ETAz * fluxW_z);
s_G[i][j][ _E] = MJ * (ETAx * fluxE_x + ETAy * fluxE_y + ETAz * fluxE_z);
dfloat r_HR = MJ * (ZETAx * fluxR_x + ZETAy * fluxR_y + ZETAz * fluxR_z);
dfloat r_HU = MJ * (ZETAx * fluxU_x + ZETAy * fluxU_y + ZETAz * fluxU_z);
dfloat r_HV = MJ * (ZETAx * fluxV_x + ZETAy * fluxV_y + ZETAz * fluxV_z);
dfloat r_HW = MJ * (ZETAx * fluxW_x + ZETAy * fluxW_y + ZETAz * fluxW_z);
dfloat r_HE = MJ * (ZETAx * fluxE_x + ZETAy * fluxE_y + ZETAz * fluxE_z);
// one shared access per 10 flops
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dkn = s_D[k][n];
r_rhsR[n] += Dkn * r_HR;
r_rhsU[n] += Dkn * r_HU;
r_rhsV[n] += Dkn * r_HV;
r_rhsW[n] += Dkn * r_HW;
r_rhsE[n] += Dkn * r_HE;
}
r_rhsW[k] -= MJ * R * gravity;
__syncthreads();
// loop of XI-grid lines
#pragma unroll Nq
for(int64_t n=0;n<Nq;++n){
dfloat Dni = s_D[n][i];
dfloat Dnj = s_D[n][j];
r_rhsR[k] += Dni * s_F[n][j][_R];
r_rhsR[k] += Dnj * s_G[i][n][_R];
r_rhsU[k] += Dni * s_F[n][j][_U];
r_rhsU[k] += Dnj * s_G[i][n][_U];
r_rhsV[k] += Dni * s_F[n][j][_V];
r_rhsV[k] += Dnj * s_G[i][n][_V];
r_rhsW[k] += Dni * s_F[n][j][_W];
r_rhsW[k] += Dnj * s_G[i][n][_W];
r_rhsE[k] += Dni * s_F[n][j][_E];
r_rhsE[k] += Dnj * s_G[i][n][_E];
}
}
#pragma unroll Nq
for(int64_t k=0;k<Nq;++k){
int64_t gid = i + j*Nq + k*Nq*Nq + e*Np*_nvgeo;
dfloat MJI = vgeo[gid + _MJI*Np];
int64_t qid = i + j*Nq + k*Nq*Nq + e*Np*nvar;
rhs[qid+_U*Np] += MJI*r_rhsU[k];
rhs[qid+_V*Np] += MJI*r_rhsV[k];
rhs[qid+_W*Np] += MJI*r_rhsW[k];
rhs[qid+_R*Np] += MJI*r_rhsR[k];
rhs[qid+_E*Np] += MJI*r_rhsE[k];
}
}
void randArray(int64_t N, dfloat base, dfloat range, dfloat **q, dfloat **c_q){
*q = (dfloat*) calloc(N, sizeof(dfloat));
cudaMalloc(c_q, N*sizeof(dfloat));
for(int64_t n=0;n<N;++n){
q[0][n] = base + drand48()*range;
}
cudaMemcpy(c_q[0], q[0], N*sizeof(dfloat), cudaMemcpyHostToDevice);
}
int main(int argc, char **argv){
srand48(1234);
const int64_t N = POLYNOMIAL_ORDER;
const int64_t nelem = 4000;
const int64_t Nq = N+1;
const int64_t Np = Nq*Nq*Nq;
const int64_t Ntotal = Np*nelem*_nstate;
dfloat *Q, *c_Q;
randArray(Ntotal, 0., 1., &Q, &c_Q);
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idR = n + _R*Np + e*_nstate*Np;
int64_t idE = n + _E*Np + e*_nstate*Np;
Q[idR] += 2.;
Q[idE] += 20.;
}
}
cudaMemcpy(c_Q, Q, nelem*_nstate*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
const int64_t Gtotal = Np*nelem*_nvgeo;
dfloat *vgeo, *c_vgeo;
randArray(Gtotal, 0, 1., &vgeo, &c_vgeo);
// Make sure the entries of the mass matrix satisfy the inverse relation
for(int64_t e=0;e<nelem;++e){
for(int64_t n=0;n<Np;++n){
int64_t idMJ = n + _MJ*Np + e*_nvgeo*Np;
int64_t idMJI = n + _MJI*Np + e*_nvgeo*Np;
vgeo[idMJ] += 3;
vgeo[idMJI] = 1./vgeo[idMJ];
}
}
cudaMemcpy(c_vgeo, vgeo, nelem*_nvgeo*Np*sizeof(dfloat), cudaMemcpyHostToDevice);
dfloat *D, *c_D;
randArray(Nq*Nq, 1., 1., &D, &c_D);
dfloat *rhs, *c_rhs;
srand48(1234);
randArray(Ntotal, 1., 1., &rhs, &c_rhs);
dim3 G(nelem,1,1);
dim3 B2(Nq,Nq,Nq);
dim3 B3(Nq,Nq,1);
volumerhs<Nq, Np, _nstate> <<< G, B3 >>> (c_rhs, c_Q, c_vgeo, grav, c_D, nelem);
cudaDeviceSynchronize();
exit(0);
return 0;
}
|
a9f1809c66513b56acbef14b851f0d0c968b94a8.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/zmath.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <c10/macros/Macros.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void add_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, iter.common_dtype(), "add_cuda/sub_cuda", [&]() {
auto alpha = alpha_scalar.to<scalar_t>();
gpu_kernel_with_scalars(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a + alpha * b;
});
});
}
static void sub_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
add_kernel_cuda(iter, -alpha_scalar);
}
void div_kernel_cuda(TensorIterator& iter) {
if (!isIntegralType(iter.common_dtype(), /*includeBool*/ false) && iter.is_cpu_scalar(2)) {
// optimization for floating-point types: if the second operand is a CPU
// scalar, compute a * reciprocal(b). Note that this may lose one bit of
// precision compared to computing the division.
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
auto inv_b = thrust_t(1.0) / thrust_t(iter.scalar_value<scalar_t>(2));
iter.remove_operand(2);
gpu_kernel(iter, [inv_b]GPU_LAMBDA(thrust_t a) -> thrust_t {
return a * inv_b;
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_C10_COMPLEX_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a / b;
});
});
}
}
void mul_kernel_cuda(TensorIterator& iter) {
if (iter.common_dtype() == ScalarType::Bool) {
// Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context]
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(bool a, bool b) -> bool {
return a && b;
});
} else {
AT_DISPATCH_ALL_TYPES_AND_C10_COMPLEX_AND2(kHalf, kBFloat16, iter.common_dtype(), "mul_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * b;
});
});
}
}
void remainder_kernel_cuda(TensorIterator& iter) {
if (isIntegralType(iter.dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t r = a % b;
if ((r != 0) && ((r < 0) != (b < 0))) {
r += b;
}
return r;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
return a - b * static_cast<scalar_t>(::floor(a / b));
});
});
}
}
REGISTER_DISPATCH(add_stub, &add_kernel_cuda);
REGISTER_DISPATCH(sub_stub, &sub_kernel_cuda);
REGISTER_DISPATCH(div_stub, &div_kernel_cuda);
REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda);
REGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda);
}} // namespace at::native
|
a9f1809c66513b56acbef14b851f0d0c968b94a8.cu
|
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/zmath.cuh>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/BinaryOps.h>
#include <c10/macros/Macros.h>
// NOTE: CUDA on Windows requires that the enclosing function
// of a __device__ lambda not have internal linkage.
namespace at { namespace native {
void add_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
AT_DISPATCH_ALL_TYPES_AND_COMPLEX_AND3(kHalf, kBool, kBFloat16, iter.common_dtype(), "add_cuda/sub_cuda", [&]() {
auto alpha = alpha_scalar.to<scalar_t>();
gpu_kernel_with_scalars(iter, [alpha]GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a + alpha * b;
});
});
}
static void sub_kernel_cuda(TensorIterator& iter, Scalar alpha_scalar) {
add_kernel_cuda(iter, -alpha_scalar);
}
void div_kernel_cuda(TensorIterator& iter) {
if (!isIntegralType(iter.common_dtype(), /*includeBool*/ false) && iter.is_cpu_scalar(2)) {
// optimization for floating-point types: if the second operand is a CPU
// scalar, compute a * reciprocal(b). Note that this may lose one bit of
// precision compared to computing the division.
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() {
using thrust_t = typename ztype_cuda<scalar_t>::thrust_t;
auto inv_b = thrust_t(1.0) / thrust_t(iter.scalar_value<scalar_t>(2));
iter.remove_operand(2);
gpu_kernel(iter, [inv_b]GPU_LAMBDA(thrust_t a) -> thrust_t {
return a * inv_b;
});
});
} else {
AT_DISPATCH_ALL_TYPES_AND_C10_COMPLEX_AND2(kHalf, kBFloat16, iter.common_dtype(), "div_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a / b;
});
});
}
}
void mul_kernel_cuda(TensorIterator& iter) {
if (iter.common_dtype() == ScalarType::Bool) {
// Workaround for the error: '*' in boolean context, suggest '&&' instead [-Werror=int-in-bool-context]
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(bool a, bool b) -> bool {
return a && b;
});
} else {
AT_DISPATCH_ALL_TYPES_AND_C10_COMPLEX_AND2(kHalf, kBFloat16, iter.common_dtype(), "mul_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
return a * b;
});
});
}
}
void remainder_kernel_cuda(TensorIterator& iter) {
if (isIntegralType(iter.dtype(), /*includeBool*/ false)) {
AT_DISPATCH_INTEGRAL_TYPES(iter.dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter, []GPU_LAMBDA(scalar_t a, scalar_t b) -> scalar_t {
scalar_t r = a % b;
if ((r != 0) && ((r < 0) != (b < 0))) {
r += b;
}
return r;
});
});
} else {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.dtype(), "remainder_cuda", [&]() {
gpu_kernel_with_scalars(iter,
[]GPU_LAMBDA(scalar_t a, scalar_t b) __ubsan_ignore_float_divide_by_zero__ -> scalar_t {
return a - b * static_cast<scalar_t>(std::floor(a / b));
});
});
}
}
REGISTER_DISPATCH(add_stub, &add_kernel_cuda);
REGISTER_DISPATCH(sub_stub, &sub_kernel_cuda);
REGISTER_DISPATCH(div_stub, &div_kernel_cuda);
REGISTER_DISPATCH(mul_stub, &mul_kernel_cuda);
REGISTER_DISPATCH(remainder_stub, &remainder_kernel_cuda);
}} // namespace at::native
|
7c78ea5041c6d1615c66b0233dd94520f2e98215.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This file is part of the EPPM source code package.
*
* Copyright (c) 2013-2016 Linchao Bao ([email protected])
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "bao_basic_cuda.h"
#include "defs.h" //for parameters
#include "3rdparty/nv-cuda-v5.0/bicubicTexture_kernel.cuh"
texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaImg1Tex; //in texture memory
texture<uchar4, 2, hipReadModeNormalizedFloat> rgbaImg2Tex; //in texture memory
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 16
__device__ unsigned char _d_is_larger(float4 a, float4 b)
{
if (0.3f*a.x + 0.6f*a.y + 0.1f*a.z > 0.3f*b.x + 0.6f*b.y + 0.1f*b.z) return 0x1;
else return 0x0;
}
__global__ void d_census_transform3x3(unsigned char* d_census1, unsigned char* d_census2, int w, int h, size_t census_mem_w)
{
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
if (id_x >= w || id_y >= h) return;
float4 centerPix = tex2D(rgbaImg1Tex, id_x, id_y);
float4 surroundPix[8];
surroundPix[0] = tex2D(rgbaImg1Tex, id_x-1, id_y-1);
surroundPix[1] = tex2D(rgbaImg1Tex, id_x, id_y-1);
surroundPix[2] = tex2D(rgbaImg1Tex, id_x+1, id_y-1);
surroundPix[3] = tex2D(rgbaImg1Tex, id_x-1, id_y);
surroundPix[4] = tex2D(rgbaImg1Tex, id_x+1, id_y);
surroundPix[5] = tex2D(rgbaImg1Tex, id_x-1, id_y+1);
surroundPix[6] = tex2D(rgbaImg1Tex, id_x, id_y+1);
surroundPix[7] = tex2D(rgbaImg1Tex, id_x+1, id_y+1);
unsigned char censusRes = _d_is_larger(surroundPix[0],centerPix);
censusRes += (_d_is_larger(surroundPix[1],centerPix) << 1);
censusRes += (_d_is_larger(surroundPix[2],centerPix) << 2);
censusRes += (_d_is_larger(surroundPix[3],centerPix) << 3);
censusRes += (_d_is_larger(surroundPix[4],centerPix) << 4);
censusRes += (_d_is_larger(surroundPix[5],centerPix) << 5);
censusRes += (_d_is_larger(surroundPix[6],centerPix) << 6);
censusRes += (_d_is_larger(surroundPix[7],centerPix) << 7);
d_census1[id_y*census_mem_w + id_x] = censusRes;
//the second image
centerPix = tex2D(rgbaImg2Tex, id_x, id_y);
surroundPix[0] = tex2D(rgbaImg2Tex, id_x-1, id_y-1);
surroundPix[1] = tex2D(rgbaImg2Tex, id_x, id_y-1);
surroundPix[2] = tex2D(rgbaImg2Tex, id_x+1, id_y-1);
surroundPix[3] = tex2D(rgbaImg2Tex, id_x-1, id_y);
surroundPix[4] = tex2D(rgbaImg2Tex, id_x+1, id_y);
surroundPix[5] = tex2D(rgbaImg2Tex, id_x-1, id_y+1);
surroundPix[6] = tex2D(rgbaImg2Tex, id_x, id_y+1);
surroundPix[7] = tex2D(rgbaImg2Tex, id_x+1, id_y+1);
censusRes = _d_is_larger(surroundPix[0],centerPix);
censusRes += (_d_is_larger(surroundPix[1],centerPix) << 1);
censusRes += (_d_is_larger(surroundPix[2],centerPix) << 2);
censusRes += (_d_is_larger(surroundPix[3],centerPix) << 3);
censusRes += (_d_is_larger(surroundPix[4],centerPix) << 4);
censusRes += (_d_is_larger(surroundPix[5],centerPix) << 5);
censusRes += (_d_is_larger(surroundPix[6],centerPix) << 6);
censusRes += (_d_is_larger(surroundPix[7],centerPix) << 7);
d_census2[id_y*census_mem_w + id_x] = censusRes;
}
extern "C"
void baoCudaCensusTransform(unsigned char* d_census1, unsigned char* d_census2, uchar4* d_img1, uchar4* d_img2, int w, int h, size_t img_pitch, size_t census_pitch)
{
//bind imgs
hipChannelFormatDesc desc_img = hipCreateChannelDesc<uchar4>();
checkCudaErrors(hipBindTexture2D(0, rgbaImg1Tex, d_img1, desc_img, w, h, img_pitch));
checkCudaErrors(hipBindTexture2D(0, rgbaImg2Tex, d_img2, desc_img, w, h, img_pitch));
// getLastCudaError("Census Bind Texture FAILED");
//compute census transform
dim3 gridSize(bao_div_ceil(w,BLOCK_DIM_X),bao_div_ceil(h,BLOCK_DIM_Y));
dim3 blockSize(BLOCK_DIM_X,BLOCK_DIM_Y);
size_t census_mem_w = census_pitch/sizeof(unsigned char);
// bao_timer_gpu timer;
// timer.start();
hipLaunchKernelGGL(( d_census_transform3x3), dim3(gridSize), dim3(blockSize), 0, 0, d_census1,d_census2,w,h,census_mem_w); //0.075ms
// timer.time_display("Pre: Census Transform");
// getLastCudaError("Census Transform FAILED");
}
__global__ void d_census_transform3x3_bicubic(unsigned char* d_census1, unsigned char* d_census2, int w, int h, size_t census_mem_w, float up_factor)
{
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
if (id_x >= w || id_y >= h) return;
float4 centerPix = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, id_x*up_factor, id_y*up_factor);
float4 surroundPix[8];
surroundPix[0] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x-1)*up_factor, (id_y-1)*up_factor);
surroundPix[1] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x)*up_factor, (id_y-1)*up_factor);
surroundPix[2] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x+1)*up_factor, (id_y-1)*up_factor);
surroundPix[3] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x-1)*up_factor, (id_y)*up_factor);
surroundPix[4] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x+1)*up_factor, (id_y)*up_factor);
surroundPix[5] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x-1)*up_factor, (id_y+1)*up_factor);
surroundPix[6] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x)*up_factor, (id_y+1)*up_factor);
surroundPix[7] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x+1)*up_factor, (id_y+1)*up_factor);
unsigned char censusRes = _d_is_larger(surroundPix[0],centerPix);
censusRes += (_d_is_larger(surroundPix[1],centerPix) << 1);
censusRes += (_d_is_larger(surroundPix[2],centerPix) << 2);
censusRes += (_d_is_larger(surroundPix[3],centerPix) << 3);
censusRes += (_d_is_larger(surroundPix[4],centerPix) << 4);
censusRes += (_d_is_larger(surroundPix[5],centerPix) << 5);
censusRes += (_d_is_larger(surroundPix[6],centerPix) << 6);
censusRes += (_d_is_larger(surroundPix[7],centerPix) << 7);
d_census1[id_y*census_mem_w + id_x] = censusRes;
//the second image
centerPix = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, id_x*up_factor, id_y*up_factor);
surroundPix[0] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x-1)*up_factor, (id_y-1)*up_factor);
surroundPix[1] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x)*up_factor, (id_y-1)*up_factor);
surroundPix[2] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x+1)*up_factor, (id_y-1)*up_factor);
surroundPix[3] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x-1)*up_factor, (id_y)*up_factor);
surroundPix[4] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x+1)*up_factor, (id_y)*up_factor);
surroundPix[5] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x-1)*up_factor, (id_y+1)*up_factor);
surroundPix[6] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x)*up_factor, (id_y+1)*up_factor);
surroundPix[7] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x+1)*up_factor, (id_y+1)*up_factor);
censusRes = _d_is_larger(surroundPix[0],centerPix);
censusRes += (_d_is_larger(surroundPix[1],centerPix) << 1);
censusRes += (_d_is_larger(surroundPix[2],centerPix) << 2);
censusRes += (_d_is_larger(surroundPix[3],centerPix) << 3);
censusRes += (_d_is_larger(surroundPix[4],centerPix) << 4);
censusRes += (_d_is_larger(surroundPix[5],centerPix) << 5);
censusRes += (_d_is_larger(surroundPix[6],centerPix) << 6);
censusRes += (_d_is_larger(surroundPix[7],centerPix) << 7);
d_census2[id_y*census_mem_w + id_x] = censusRes;
}
extern "C"
void baoCudaCensusTransform_Bicubic(unsigned char* d_census1, unsigned char* d_census2, int w_up, int h_up, size_t census_pitch, uchar4* d_img1, uchar4* d_img2, int w, int h, size_t img_pitch)
{
//bind imgs
hipChannelFormatDesc desc_img = hipCreateChannelDesc<uchar4>();
checkCudaErrors(hipBindTexture2D(0, rgbaImg1Tex, d_img1, desc_img, w, h, img_pitch));
checkCudaErrors(hipBindTexture2D(0, rgbaImg2Tex, d_img2, desc_img, w, h, img_pitch));
//compute census transform
dim3 gridSize(bao_div_ceil(w_up,BLOCK_DIM_X),bao_div_ceil(h_up,BLOCK_DIM_Y));
dim3 blockSize(BLOCK_DIM_X,BLOCK_DIM_Y);
float up_factor = float(w) / float(w_up); //e.g., 0.5f
size_t census_mem_w = census_pitch/sizeof(unsigned char);
bao_timer_gpu timer;
timer.start();
hipLaunchKernelGGL(( d_census_transform3x3_bicubic), dim3(gridSize), dim3(blockSize), 0, 0, d_census1,d_census2,w_up,h_up,census_mem_w,up_factor); //0.075ms
timer.time_display("Pre: Census Transform");
getLastCudaError("Census Transform FAILED");
}
|
7c78ea5041c6d1615c66b0233dd94520f2e98215.cu
|
/* This file is part of the EPPM source code package.
*
* Copyright (c) 2013-2016 Linchao Bao ([email protected])
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use,
* copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following
* conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
* OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
* HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
* WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*/
#include "bao_basic_cuda.h"
#include "defs.h" //for parameters
#include "3rdparty/nv-cuda-v5.0/bicubicTexture_kernel.cuh"
texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaImg1Tex; //in texture memory
texture<uchar4, 2, cudaReadModeNormalizedFloat> rgbaImg2Tex; //in texture memory
#define BLOCK_DIM_X 16
#define BLOCK_DIM_Y 16
__device__ unsigned char _d_is_larger(float4 a, float4 b)
{
if (0.3f*a.x + 0.6f*a.y + 0.1f*a.z > 0.3f*b.x + 0.6f*b.y + 0.1f*b.z) return 0x1;
else return 0x0;
}
__global__ void d_census_transform3x3(unsigned char* d_census1, unsigned char* d_census2, int w, int h, size_t census_mem_w)
{
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
if (id_x >= w || id_y >= h) return;
float4 centerPix = tex2D(rgbaImg1Tex, id_x, id_y);
float4 surroundPix[8];
surroundPix[0] = tex2D(rgbaImg1Tex, id_x-1, id_y-1);
surroundPix[1] = tex2D(rgbaImg1Tex, id_x, id_y-1);
surroundPix[2] = tex2D(rgbaImg1Tex, id_x+1, id_y-1);
surroundPix[3] = tex2D(rgbaImg1Tex, id_x-1, id_y);
surroundPix[4] = tex2D(rgbaImg1Tex, id_x+1, id_y);
surroundPix[5] = tex2D(rgbaImg1Tex, id_x-1, id_y+1);
surroundPix[6] = tex2D(rgbaImg1Tex, id_x, id_y+1);
surroundPix[7] = tex2D(rgbaImg1Tex, id_x+1, id_y+1);
unsigned char censusRes = _d_is_larger(surroundPix[0],centerPix);
censusRes += (_d_is_larger(surroundPix[1],centerPix) << 1);
censusRes += (_d_is_larger(surroundPix[2],centerPix) << 2);
censusRes += (_d_is_larger(surroundPix[3],centerPix) << 3);
censusRes += (_d_is_larger(surroundPix[4],centerPix) << 4);
censusRes += (_d_is_larger(surroundPix[5],centerPix) << 5);
censusRes += (_d_is_larger(surroundPix[6],centerPix) << 6);
censusRes += (_d_is_larger(surroundPix[7],centerPix) << 7);
d_census1[id_y*census_mem_w + id_x] = censusRes;
//the second image
centerPix = tex2D(rgbaImg2Tex, id_x, id_y);
surroundPix[0] = tex2D(rgbaImg2Tex, id_x-1, id_y-1);
surroundPix[1] = tex2D(rgbaImg2Tex, id_x, id_y-1);
surroundPix[2] = tex2D(rgbaImg2Tex, id_x+1, id_y-1);
surroundPix[3] = tex2D(rgbaImg2Tex, id_x-1, id_y);
surroundPix[4] = tex2D(rgbaImg2Tex, id_x+1, id_y);
surroundPix[5] = tex2D(rgbaImg2Tex, id_x-1, id_y+1);
surroundPix[6] = tex2D(rgbaImg2Tex, id_x, id_y+1);
surroundPix[7] = tex2D(rgbaImg2Tex, id_x+1, id_y+1);
censusRes = _d_is_larger(surroundPix[0],centerPix);
censusRes += (_d_is_larger(surroundPix[1],centerPix) << 1);
censusRes += (_d_is_larger(surroundPix[2],centerPix) << 2);
censusRes += (_d_is_larger(surroundPix[3],centerPix) << 3);
censusRes += (_d_is_larger(surroundPix[4],centerPix) << 4);
censusRes += (_d_is_larger(surroundPix[5],centerPix) << 5);
censusRes += (_d_is_larger(surroundPix[6],centerPix) << 6);
censusRes += (_d_is_larger(surroundPix[7],centerPix) << 7);
d_census2[id_y*census_mem_w + id_x] = censusRes;
}
extern "C"
void baoCudaCensusTransform(unsigned char* d_census1, unsigned char* d_census2, uchar4* d_img1, uchar4* d_img2, int w, int h, size_t img_pitch, size_t census_pitch)
{
//bind imgs
cudaChannelFormatDesc desc_img = cudaCreateChannelDesc<uchar4>();
checkCudaErrors(cudaBindTexture2D(0, rgbaImg1Tex, d_img1, desc_img, w, h, img_pitch));
checkCudaErrors(cudaBindTexture2D(0, rgbaImg2Tex, d_img2, desc_img, w, h, img_pitch));
// getLastCudaError("Census Bind Texture FAILED");
//compute census transform
dim3 gridSize(bao_div_ceil(w,BLOCK_DIM_X),bao_div_ceil(h,BLOCK_DIM_Y));
dim3 blockSize(BLOCK_DIM_X,BLOCK_DIM_Y);
size_t census_mem_w = census_pitch/sizeof(unsigned char);
// bao_timer_gpu timer;
// timer.start();
d_census_transform3x3<<<gridSize, blockSize>>>(d_census1,d_census2,w,h,census_mem_w); //0.075ms
// timer.time_display("Pre: Census Transform");
// getLastCudaError("Census Transform FAILED");
}
__global__ void d_census_transform3x3_bicubic(unsigned char* d_census1, unsigned char* d_census2, int w, int h, size_t census_mem_w, float up_factor)
{
int id_x = threadIdx.x + blockIdx.x * blockDim.x;
int id_y = threadIdx.y + blockIdx.y * blockDim.y;
if (id_x >= w || id_y >= h) return;
float4 centerPix = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, id_x*up_factor, id_y*up_factor);
float4 surroundPix[8];
surroundPix[0] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x-1)*up_factor, (id_y-1)*up_factor);
surroundPix[1] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x)*up_factor, (id_y-1)*up_factor);
surroundPix[2] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x+1)*up_factor, (id_y-1)*up_factor);
surroundPix[3] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x-1)*up_factor, (id_y)*up_factor);
surroundPix[4] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x+1)*up_factor, (id_y)*up_factor);
surroundPix[5] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x-1)*up_factor, (id_y+1)*up_factor);
surroundPix[6] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x)*up_factor, (id_y+1)*up_factor);
surroundPix[7] = tex2DBicubic<uchar4,float4>(rgbaImg1Tex, (id_x+1)*up_factor, (id_y+1)*up_factor);
unsigned char censusRes = _d_is_larger(surroundPix[0],centerPix);
censusRes += (_d_is_larger(surroundPix[1],centerPix) << 1);
censusRes += (_d_is_larger(surroundPix[2],centerPix) << 2);
censusRes += (_d_is_larger(surroundPix[3],centerPix) << 3);
censusRes += (_d_is_larger(surroundPix[4],centerPix) << 4);
censusRes += (_d_is_larger(surroundPix[5],centerPix) << 5);
censusRes += (_d_is_larger(surroundPix[6],centerPix) << 6);
censusRes += (_d_is_larger(surroundPix[7],centerPix) << 7);
d_census1[id_y*census_mem_w + id_x] = censusRes;
//the second image
centerPix = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, id_x*up_factor, id_y*up_factor);
surroundPix[0] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x-1)*up_factor, (id_y-1)*up_factor);
surroundPix[1] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x)*up_factor, (id_y-1)*up_factor);
surroundPix[2] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x+1)*up_factor, (id_y-1)*up_factor);
surroundPix[3] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x-1)*up_factor, (id_y)*up_factor);
surroundPix[4] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x+1)*up_factor, (id_y)*up_factor);
surroundPix[5] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x-1)*up_factor, (id_y+1)*up_factor);
surroundPix[6] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x)*up_factor, (id_y+1)*up_factor);
surroundPix[7] = tex2DBicubic<uchar4,float4>(rgbaImg2Tex, (id_x+1)*up_factor, (id_y+1)*up_factor);
censusRes = _d_is_larger(surroundPix[0],centerPix);
censusRes += (_d_is_larger(surroundPix[1],centerPix) << 1);
censusRes += (_d_is_larger(surroundPix[2],centerPix) << 2);
censusRes += (_d_is_larger(surroundPix[3],centerPix) << 3);
censusRes += (_d_is_larger(surroundPix[4],centerPix) << 4);
censusRes += (_d_is_larger(surroundPix[5],centerPix) << 5);
censusRes += (_d_is_larger(surroundPix[6],centerPix) << 6);
censusRes += (_d_is_larger(surroundPix[7],centerPix) << 7);
d_census2[id_y*census_mem_w + id_x] = censusRes;
}
extern "C"
void baoCudaCensusTransform_Bicubic(unsigned char* d_census1, unsigned char* d_census2, int w_up, int h_up, size_t census_pitch, uchar4* d_img1, uchar4* d_img2, int w, int h, size_t img_pitch)
{
//bind imgs
cudaChannelFormatDesc desc_img = cudaCreateChannelDesc<uchar4>();
checkCudaErrors(cudaBindTexture2D(0, rgbaImg1Tex, d_img1, desc_img, w, h, img_pitch));
checkCudaErrors(cudaBindTexture2D(0, rgbaImg2Tex, d_img2, desc_img, w, h, img_pitch));
//compute census transform
dim3 gridSize(bao_div_ceil(w_up,BLOCK_DIM_X),bao_div_ceil(h_up,BLOCK_DIM_Y));
dim3 blockSize(BLOCK_DIM_X,BLOCK_DIM_Y);
float up_factor = float(w) / float(w_up); //e.g., 0.5f
size_t census_mem_w = census_pitch/sizeof(unsigned char);
bao_timer_gpu timer;
timer.start();
d_census_transform3x3_bicubic<<<gridSize, blockSize>>>(d_census1,d_census2,w_up,h_up,census_mem_w,up_factor); //0.075ms
timer.time_display("Pre: Census Transform");
getLastCudaError("Census Transform FAILED");
}
|
efd4e5e47ddd400a65d319b208c073dee4bec0bd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/hip/HIPBlas.h>
#include <ATen/hip/HIPContext.h>
#include <ATen/hip/HIPApplyUtils.cuh>
#include <ATen/native/hip/im2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_height,
int kernel_width,
int stride_height,
int stride_width,
int pad_height,
int pad_width,
int output_padding_height,
int output_padding_width,
int dilation_height,
int dilation_width,
bool weight_nullable) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation, ",
"but got output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && (weight.dim() == 2 || weight.dim() == 4),
"non-empty 2D or 4D weight tensor expected, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(
input.numel() != 0 && (ndim == 3 || ndim == 4),
"non-empty 3D or 4D input tensor expected but got a tensor with size ",
input.sizes());
int64_t input_height = input.size(dimh);
int64_t input_width = input.size(dimw);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_height,
" x ",
input_width,
"). Calculated output spatial size per channel: (",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (weight.defined()) {
int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
if (grad_output.defined()) {
if (weight.defined()) {
int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias_,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& columns_,
Tensor& ones_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose2d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
Tensor columns = columns_;
Tensor ones = ones_;
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
slow_conv_transpose2d_shape_check(
input_,
Tensor(),
weight_,
bias_,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
Tensor bias = Tensor();
if (bias_.defined()) {
bias = bias_.contiguous();
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
}
int64_t input_height = input.size(2);
int64_t input_width = input.size(3);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_({batch_size, n_output_plane, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "slow_conv_transpose2d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(1) * weight.size(2) * weight.size(3);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
'n',
't',
n,
m,
k,
1,
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
0,
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
col2im<scalar_t, accscalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
't',
'n',
n_,
m_,
k_,
1,
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
1,
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
static void slow_conv_transpose2d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& grad_columns_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns_, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"slow_conv_transpose2d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor grad_columns = grad_columns_;
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_({batch_size, n_input_plane, input_height, input_width});
// Resize temporary columns
grad_columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "slow_conv_transpose2d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n = Tensor();
Tensor grad_output_n = Tensor();
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
// Extract columns:
im2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k = weight.size(1) * weight.size(2) * weight.size(3);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
'n',
'n',
n,
m,
k,
1,
grad_columns.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
k,
0,
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
grad_input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
void slow_conv_transpose2d_acc_grad_parameters_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& columns_,
const Tensor& ones_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose2d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor columns = columns_;
Tensor ones = ones_;
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
true);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
int64_t n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
TORCH_CHECK(columns.is_contiguous(), "columns needs to be contiguous");
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1); // or static_cast<scalar_t>(1)
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "slow_conv_transpose2d_acc_grad_parameters_cuda", [&] {
// Helpers
Tensor input_n = Tensor();
Tensor grad_output_n = Tensor();
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
// Extract columns:
im2col<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
't',
'n',
n,
m,
k,
scale,
columns.data_ptr<scalar_t>(),
k,
input_n.data_ptr<scalar_t>(),
k,
1,
grad_weight.data_ptr<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
at::hip::getCurrentHIPStreamMasqueradingAsCUDA(),
't',
k_,
m_,
scale,
grad_output_n.data_ptr<scalar_t>(),
k_,
ones.data_ptr<scalar_t>(),
1,
1,
grad_bias.data_ptr<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({input.size(1), input_height, input_width});
}
}); // end of dispatch
}
} // namespace
Tensor& slow_conv_transpose2d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor columns = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor ones = at::empty_like(input, at::MemoryFormat::Contiguous);
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
Tensor slow_conv_transpose2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor columns = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor ones = at::empty_like(input, at::MemoryFormat::Contiguous);
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones) {
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
efd4e5e47ddd400a65d319b208c073dee4bec0bd.cu
|
#include <ATen/ATen.h>
#include <ATen/AccumulateType.h>
#include <ATen/NativeFunctions.h>
#include <ATen/TensorUtils.h>
#include <ATen/Utils.h>
#include <ATen/cuda/CUDABlas.h>
#include <ATen/cuda/CUDAContext.h>
#include <ATen/cuda/CUDAApplyUtils.cuh>
#include <ATen/native/cuda/im2col.cuh>
namespace at {
namespace native {
namespace {
static inline void slow_conv_transpose2d_shape_check(
const Tensor& input,
const Tensor& grad_output,
const Tensor& weight,
const Tensor& bias,
int kernel_height,
int kernel_width,
int stride_height,
int stride_width,
int pad_height,
int pad_width,
int output_padding_height,
int output_padding_width,
int dilation_height,
int dilation_width,
bool weight_nullable) {
TORCH_CHECK(
kernel_width > 0 && kernel_height > 0,
"kernel size should be greater than zero, but got kernel_height: ",
kernel_height,
" kernel_width: ",
kernel_width);
TORCH_CHECK(
stride_width > 0 && stride_height > 0,
"stride should be greater than zero, but got stride_height: ",
stride_height,
" stride_width: ",
stride_width);
TORCH_CHECK(
dilation_width > 0 && dilation_height > 0,
"dilation should be greater than zero, but got dilation_height: ",
dilation_height,
", dilation_width: ",
dilation_width);
TORCH_CHECK(
(output_padding_width < stride_width ||
output_padding_width < dilation_width) &&
(output_padding_height < stride_height ||
output_padding_height < dilation_height),
"output padding must be smaller than either stride or dilation, ",
"but got output_padding_height: ",
output_padding_height,
" output_padding_width: ",
output_padding_width,
" stride_height: ",
stride_height,
" stride_width: ",
stride_width,
" dilation_height: ",
dilation_height,
" dilation_width: ",
dilation_width);
if (weight.defined()) {
TORCH_CHECK(
weight.numel() != 0 && (weight.dim() == 2 || weight.dim() == 4),
"non-empty 2D or 4D weight tensor expected, but got: ",
weight.sizes());
if (bias.defined()) {
check_dim_size(bias, 1, 0, weight.size(1));
}
} else if (!weight_nullable) {
AT_ERROR("weight tensor is expected to be non-nullable");
}
int ndim = input.dim();
int dimf = 0;
int dimh = 1;
int dimw = 2;
if (ndim == 4) {
dimf++;
dimh++;
dimw++;
}
TORCH_CHECK(
input.numel() != 0 && (ndim == 3 || ndim == 4),
"non-empty 3D or 4D input tensor expected but got a tensor with size ",
input.sizes());
int64_t input_height = input.size(dimh);
int64_t input_width = input.size(dimw);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
if (output_width < 1 || output_height < 1) {
AT_ERROR(
"Given input size per channel: (",
input_height,
" x ",
input_width,
"). Calculated output spatial size per channel: (",
output_height,
" x ",
output_width,
"). Output size is too small");
}
if (weight.defined()) {
int64_t n_input_plane = weight.size(0);
check_dim_size(input, ndim, dimf, n_input_plane);
}
if (grad_output.defined()) {
if (weight.defined()) {
int64_t n_output_plane = weight.size(1);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
} else if (bias.defined()) {
int64_t n_output_plane = bias.size(0);
check_dim_size(grad_output, ndim, dimf, n_output_plane);
}
check_dim_size(grad_output, ndim, dimh, output_height);
check_dim_size(grad_output, ndim, dimw, output_width);
}
}
void slow_conv_transpose2d_out_cuda_template(
Tensor& output,
const Tensor& input_,
const Tensor& weight_,
IntArrayRef kernel_size,
const Tensor& bias_,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
Tensor& columns_,
Tensor& ones_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1}, output_arg{output, "output", 2},
weight_arg{weight_, "weight", 3}, bias_arg{bias_, "bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose2d_out_cuda",
{input_arg, output_arg, weight_arg, bias_arg, columns_arg, ones_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
Tensor columns = columns_;
Tensor ones = ones_;
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
slow_conv_transpose2d_shape_check(
input_,
Tensor(),
weight_,
bias_,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor weight = weight_.contiguous();
Tensor bias = Tensor();
if (bias_.defined()) {
bias = bias_.contiguous();
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
}
int64_t input_height = input.size(2);
int64_t input_width = input.size(3);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
output.resize_({batch_size, n_output_plane, output_height, output_width});
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
// Define a buffer of ones, for bias accumulation
// Note: this buffer can be shared with other modules, it only ever gets
// increased, and always contains ones.
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1);
}
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "slow_conv_transpose2d_out_cuda", [&] {
using accscalar_t = at::acc_type<scalar_t, true>;
// Helpers
Tensor input_n;
Tensor output_n;
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
output_n = output.select(0, elt);
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(1) * weight.size(2) * weight.size(3);
int64_t n = columns.size(1);
int64_t k = weight.size(0);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
'n',
't',
n,
m,
k,
1,
input_n.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
m,
0,
columns.data_ptr<scalar_t>(),
n);
// Unpack columns back into input:
col2im<scalar_t, accscalar_t>(
at::cuda::getCurrentCUDAStream(),
columns.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
output_n.data_ptr<scalar_t>());
// Do Bias after:
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t n_ = output_height * output_width;
int64_t k_ = 1;
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
if (bias.defined()) {
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
't',
'n',
n_,
m_,
k_,
1,
ones.data_ptr<scalar_t>(),
k_,
bias.data_ptr<scalar_t>(),
k_,
1,
output_n.data_ptr<scalar_t>(),
n_);
}
}
// Resize output
if (is_batch) {
output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
static void slow_conv_transpose2d_backward_out_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_input,
const Tensor& weight_,
const Tensor& grad_columns_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
weight_arg{weight_, "weight", 3},
grad_columns_arg{grad_columns_, "grad_columns", 4},
grad_input_arg{grad_input, "grad_input", 5};
checkAllSameGPU(
"slow_conv_transpose2d_backward_out_cuda",
{input_arg,
grad_output_arg,
weight_arg,
grad_columns_arg,
grad_input_arg});
int n_input_plane = weight_.size(0);
int n_output_plane = weight_.size(1);
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor grad_columns = grad_columns_;
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
weight_,
Tensor(),
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
false);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
Tensor weight = weight_.contiguous();
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Resize output
grad_input.resize_({batch_size, n_input_plane, input_height, input_width});
// Resize temporary columns
grad_columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
grad_output.scalar_type(), "slow_conv_transpose2d_backward_out_cuda", [&] {
// Helpers
Tensor grad_input_n = Tensor();
Tensor grad_output_n = Tensor();
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per sample:
grad_input_n = grad_input.select(0, elt);
grad_output_n = grad_output.select(0, elt);
// Extract columns:
im2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
grad_columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m = weight.size(0);
int64_t n = grad_columns.size(1);
int64_t k = weight.size(1) * weight.size(2) * weight.size(3);
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
'n',
'n',
n,
m,
k,
1,
grad_columns.data_ptr<scalar_t>(),
n,
weight.data_ptr<scalar_t>(),
k,
0,
grad_input_n.data_ptr<scalar_t>(),
n);
}
// Resize output
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({n_input_plane, input_height, input_width});
grad_input.resize_({n_input_plane, input_height, input_width});
}
}); // end of dispatch
}
void slow_conv_transpose2d_acc_grad_parameters_cuda_template(
const Tensor& input_,
const Tensor& grad_output_,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& columns_,
const Tensor& ones_,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
int scale_) {
TORCH_CHECK(
kernel_size.size() == 2,
"It is expected kernel_size equals to 2, but got size ",
kernel_size.size());
TORCH_CHECK(
dilation.size() == 2,
"It is expected dilation equals to 2, but got size ",
dilation.size());
TORCH_CHECK(
padding.size() == 2,
"It is expected padding equals to 2, but got size ",
padding.size());
TORCH_CHECK(
stride.size() == 2,
"It is expected stride equals to 2, but got size ",
stride.size());
TORCH_CHECK(
output_padding.size() == 2,
"It is expected stride equals to 2, but got size ",
output_padding.size());
TensorArg input_arg{input_, "input", 1},
grad_output_arg{grad_output_, "grad_output", 2},
grad_weight_arg{grad_weight, "grad_weight", 3},
grad_bias_arg{grad_bias, "grad_bias", 4},
columns_arg{columns_, "columns", 5}, ones_arg{ones_, "ones", 6};
checkAllSameGPU(
"slow_conv_transpose2d_acc_grad_parameters_cuda",
{input_arg,
grad_output_arg,
grad_weight_arg,
grad_bias_arg,
columns_arg,
ones_arg});
int64_t kernel_height = kernel_size[0];
int64_t kernel_width = kernel_size[1];
int64_t dilation_height = dilation[0];
int64_t dilation_width = dilation[1];
int64_t pad_height = padding[0];
int64_t pad_width = padding[1];
int64_t stride_height = stride[0];
int64_t stride_width = stride[1];
int64_t output_padding_height = output_padding[0];
int64_t output_padding_width = output_padding[1];
Tensor columns = columns_;
Tensor ones = ones_;
slow_conv_transpose2d_shape_check(
input_,
grad_output_,
grad_weight,
grad_bias,
kernel_height,
kernel_width,
stride_height,
stride_width,
pad_height,
pad_width,
output_padding_height,
output_padding_width,
dilation_height,
dilation_width,
true);
Tensor input = input_.contiguous();
Tensor grad_output = grad_output_.contiguous();
int64_t n_output_plane;
if (grad_weight.defined()) {
n_output_plane = grad_weight.size(1);
} else if (grad_bias.defined()) {
n_output_plane = grad_bias.size(0);
} else {
return;
}
if (grad_weight.defined()) {
TORCH_CHECK(
grad_weight.is_contiguous(), "grad_weight needs to be contiguous");
}
TORCH_CHECK(columns.is_contiguous(), "columns needs to be contiguous");
if (grad_bias.defined()) {
TORCH_CHECK(grad_bias.is_contiguous(), "grad_bias needs to be contiguous");
TORCH_CHECK(ones.is_contiguous(), "ones needs to be contiguous");
}
bool is_batch = false;
if (input.dim() == 3) {
// Force batch
is_batch = true;
input.resize_({1, input.size(0), input.size(1), input.size(2)});
grad_output.resize_(
{1, grad_output.size(0), grad_output.size(1), grad_output.size(2)});
}
int64_t input_width = input.size(3);
int64_t input_height = input.size(2);
int64_t output_height = (input_height - 1) * stride_height - 2 * pad_height +
(dilation_height * (kernel_height - 1) + 1) + output_padding_height;
int64_t output_width = (input_width - 1) * stride_width - 2 * pad_width +
(dilation_width * (kernel_width - 1) + 1) + output_padding_width;
// Batch size + input planes
int64_t batch_size = input.size(0);
// Define a buffer of ones, for bias accumulation
if (ones.dim() != 2 ||
ones.size(0) * ones.size(1) < output_height * output_width) {
// Resize plane and fill with ones...
ones.resize_({output_height, output_width});
ones.fill_(1); // or static_cast<scalar_t>(1)
}
// Resize temporary columns
columns.resize_({n_output_plane * kernel_width * kernel_height,
input_height * input_width});
AT_DISPATCH_FLOATING_TYPES_AND_HALF(
input.scalar_type(), "slow_conv_transpose2d_acc_grad_parameters_cuda", [&] {
// Helpers
Tensor input_n = Tensor();
Tensor grad_output_n = Tensor();
scalar_t scale = static_cast<scalar_t>(scale_);
// For each elt in batch, do:
for (int elt = 0; elt < batch_size; elt++) {
// Matrix mulitply per output:
grad_output_n = grad_output.select(0, elt);
// Do Weight:
if (grad_weight.defined()) {
// Matrix mulitply per output:
input_n = input.select(0, elt);
// Extract columns:
im2col<scalar_t>(
at::cuda::getCurrentCUDAStream(),
grad_output_n.data_ptr<scalar_t>(),
n_output_plane,
output_height,
output_width,
input_height,
input_width,
kernel_height,
kernel_width,
pad_height,
pad_width,
stride_height,
stride_width,
dilation_height,
dilation_width,
columns.data_ptr<scalar_t>());
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t n = columns.size(0); // n_output_plane * kh * kw
int64_t m = input_n.size(0); // n_input_plane
int64_t k = columns.size(1); // input_height * input_width
// Do GEMM (note: this is a bit confusing because gemm assumes
// column-major matrices)
at::cuda::blas::gemm<scalar_t>(
at::cuda::getCurrentCUDAStream(),
't',
'n',
n,
m,
k,
scale,
columns.data_ptr<scalar_t>(),
k,
input_n.data_ptr<scalar_t>(),
k,
1,
grad_weight.data_ptr<scalar_t>(),
n);
}
// Do Bias:
if (grad_bias.defined()) {
// M,N,K are dims of matrix A and B
// (see http://docs.nvidia.com/cuda/cublas/#cublas-lt-t-gt-gemm)
int64_t m_ = n_output_plane;
int64_t k_ = output_height * output_width;
// Do GEMV (note: this is a bit confusing because gemv assumes
// column-major matrices)
at::cuda::blas::gemv<scalar_t>(
at::cuda::getCurrentCUDAStream(),
't',
k_,
m_,
scale,
grad_output_n.data_ptr<scalar_t>(),
k_,
ones.data_ptr<scalar_t>(),
1,
1,
grad_bias.data_ptr<scalar_t>(),
1);
}
}
// Resize
if (is_batch) {
grad_output.resize_({n_output_plane, output_height, output_width});
input.resize_({input.size(1), input_height, input_width});
}
}); // end of dispatch
}
} // namespace
Tensor& slow_conv_transpose2d_out_cuda(
Tensor& output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor columns = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor ones = at::empty_like(input, at::MemoryFormat::Contiguous);
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
Tensor slow_conv_transpose2d_cuda(
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
const Tensor& bias,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation) {
Tensor output = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor columns = at::empty_like(input, at::MemoryFormat::Contiguous);
Tensor ones = at::empty_like(input, at::MemoryFormat::Contiguous);
slow_conv_transpose2d_out_cuda_template(
output,
input,
weight,
kernel_size,
bias,
stride,
padding,
output_padding,
dilation,
columns,
ones);
return output;
}
std::tuple<Tensor&, Tensor&, Tensor&> slow_conv_transpose2d_backward_out_cuda(
Tensor& grad_input,
Tensor& grad_weight,
Tensor& grad_bias,
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones) {
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor&, Tensor&, Tensor&>(
grad_input, grad_weight, grad_bias);
}
std::tuple<Tensor, Tensor, Tensor> slow_conv_transpose2d_backward_cuda(
const Tensor& grad_output,
const Tensor& input,
const Tensor& weight,
IntArrayRef kernel_size,
IntArrayRef stride,
IntArrayRef padding,
IntArrayRef output_padding,
IntArrayRef dilation,
const Tensor& columns,
const Tensor& ones,
std::array<bool, 3> output_mask) {
Tensor grad_input;
Tensor grad_weight;
Tensor grad_bias;
if (output_mask[0]) {
grad_input = at::empty({0}, grad_output.options());
} else {
grad_input = Tensor();
}
if (output_mask[1]) {
grad_weight = at::empty({0}, grad_output.options());
} else {
grad_weight = Tensor();
}
if (output_mask[2]) {
grad_bias = at::empty({0}, grad_output.options());
} else {
grad_bias = Tensor();
}
if (grad_input.defined()) {
slow_conv_transpose2d_backward_out_cuda_template(
input,
grad_output,
grad_input,
weight,
columns,
kernel_size,
stride,
padding,
output_padding,
dilation);
}
if (grad_weight.defined()) {
grad_weight.resize_(weight.sizes());
grad_weight.zero_();
}
if (grad_bias.defined()) {
grad_bias.resize_({weight.size(1)});
grad_bias.zero_();
}
if (grad_weight.defined() || grad_bias.defined()) {
slow_conv_transpose2d_acc_grad_parameters_cuda_template(
input,
grad_output,
grad_weight,
grad_bias,
columns,
ones,
kernel_size,
stride,
padding,
output_padding,
dilation,
1);
}
return std::tuple<Tensor, Tensor, Tensor>(grad_input, grad_weight, grad_bias);
}
} // namespace native
} // namespace at
|
36eff55287bc13fa99e1161cb5d6c060c10376c9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <sys/time.h>
__global__ void square( int * d_in,int n){
int totalSum;
if (threadIdx.x == 0) totalSum = 0;
__syncthreads();
int localVal = d_in[threadIdx.x];
for(int i=0;i<n;i++)
atomicAdd(&totalSum, 1);
__syncthreads();
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = i;
}
int * d_in;
hipMalloc((void**) &d_in, ARRAY_BYTES);
// hipMalloc((void*) &totalSum, sizeof(float));
hipMemcpy(d_in, h_in, ARRAY_BYTES, hipMemcpyHostToDevice);
for(int i=100;i<1000;i+=10){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(int j=0;j<1000000;j++)
hipLaunchKernelGGL(( square), dim3(1), dim3(64), 0, 0, d_in,i);
gettimeofday(&tv2, NULL);
printf ("%d\t%f\n",i,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
}
// hipMemcpy(ans, totalSum, sizeof(float), hipMemcpyDeviceToHost);
// printf("%f\n",ans);
hipFree(d_in);
return 0;
}
|
36eff55287bc13fa99e1161cb5d6c060c10376c9.cu
|
#include <stdio.h>
#include <sys/time.h>
__global__ void square( int * d_in,int n){
int totalSum;
if (threadIdx.x == 0) totalSum = 0;
__syncthreads();
int localVal = d_in[threadIdx.x];
for(int i=0;i<n;i++)
atomicAdd(&totalSum, 1);
__syncthreads();
}
int main(int argc, char ** argv) {
const int ARRAY_SIZE = 64;
const int ARRAY_BYTES = ARRAY_SIZE * sizeof(float);
// generate the input array on the host
int h_in[ARRAY_SIZE];
for (int i = 0; i < ARRAY_SIZE; i++) {
h_in[i] = i;
}
int * d_in;
cudaMalloc((void**) &d_in, ARRAY_BYTES);
// cudaMalloc((void*) &totalSum, sizeof(float));
cudaMemcpy(d_in, h_in, ARRAY_BYTES, cudaMemcpyHostToDevice);
for(int i=100;i<1000;i+=10){
struct timeval tv1, tv2;
gettimeofday(&tv1, NULL);
for(int j=0;j<1000000;j++)
square<<<1, 64>>>(d_in,i);
gettimeofday(&tv2, NULL);
printf ("%d\t%f\n",i,
(double) (tv2.tv_usec - tv1.tv_usec) / 1000000 +
(double) (tv2.tv_sec - tv1.tv_sec));
}
// cudaMemcpy(ans, totalSum, sizeof(float), cudaMemcpyDeviceToHost);
// printf("%f\n",ans);
cudaFree(d_in);
return 0;
}
|
4135f57cc4f04e04b5ec7245cfa99101631c018b.hip
|
// !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* C code for creating the Q data structure for fast convolution-based
* Hessian multiplication for arbitrary k-space trajectories.
*
* Inputs:
* kx - VECTOR of kx values, same length as ky and kz
* ky - VECTOR of ky values, same length as kx and kz
* kz - VECTOR of kz values, same length as kx and ky
* x - VECTOR of x values, same length as y and z
* y - VECTOR of y values, same length as x and z
* z - VECTOR of z values, same length as x and y
* phi - VECTOR of the Fourier transform of the spatial basis
* function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz.
*
* recommended g++ options:
* -O3 -lm -ffast-math -funroll-all-loops
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
#include <malloc.h>
#include <parboil.h>
#include "file.h"
#include "computeQ.hip"
static void
setupMemoryGPU(int num, int size, float*& dev_ptr, float*& host_ptr)
{
hipMalloc ((void **) &dev_ptr, num * size);
CUDA_ERRCK;
hipMemcpy (dev_ptr, host_ptr, num * size, hipMemcpyHostToDevice);
CUDA_ERRCK;
}
static void
cleanupMemoryGPU(int num, int size, float *& dev_ptr, float * host_ptr)
{
hipMemcpy (host_ptr, dev_ptr, num * size, hipMemcpyDeviceToHost);
CUDA_ERRCK;
hipFree(dev_ptr);
CUDA_ERRCK;
}
int
main (int argc, char *argv[]) {
int numX, numK; /* Number of X and K values */
int original_numK; /* Number of K values in input file */
float *kx, *ky, *kz; /* K trajectory (3D vectors) */
float *x, *y, *z; /* X coordinates (3D vectors) */
float *phiR, *phiI; /* Phi values (complex) */
float *phiMag; /* Magnitude of Phi */
float *Qr, *Qi; /* Q signal (complex) */
struct kValues* kVals;
struct pb_Parameters *params;
struct pb_TimerSet timers;
pb_InitializeTimerSet(&timers);
/* Read command line */
params = pb_ReadParameters(&argc, argv);
if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL))
{
fprintf(stderr, "Expecting one input filename\n");
exit(-1);
}
/* Read in data */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
inputData(params->inpFiles[0],
&original_numK, &numX,
&kx, &ky, &kz,
&x, &y, &z,
&phiR, &phiI);
/* Reduce the number of k-space samples if a number is given
* on the command line */
if (argc < 2)
numK = original_numK;
else
{
int inputK;
char *end;
inputK = strtol(argv[1], &end, 10);
if (end == argv[1])
{
fprintf(stderr, "Expecting an integer parameter\n");
exit(-1);
}
numK = MIN(inputK, original_numK);
}
printf("%d pixels in output; %d samples in trajectory; using %d samples\n",
numX, original_numK, numK);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
/* Create CPU data structures */
createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi);
/* GPU section 1 (precompute PhiMag) */
{
/* Mirror several data structures on the device */
float *phiR_d, *phiI_d;
float *phiMag_d;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
setupMemoryGPU(numK, sizeof(float), phiR_d, phiR);
setupMemoryGPU(numK, sizeof(float), phiI_d, phiI);
hipMalloc((void **)&phiMag_d, numK * sizeof(float));
CUDA_ERRCK;
hipDeviceSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
computePhiMag_GPU(numK, phiR_d, phiI_d, phiMag_d);
hipDeviceSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cleanupMemoryGPU(numK, sizeof(float), phiMag_d, phiMag);
hipFree(phiR_d);
hipFree(phiI_d);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
kVals = (struct kValues*)calloc(numK, sizeof (struct kValues));
for (int k = 0; k < numK; k++) {
kVals[k].Kx = kx[k];
kVals[k].Ky = ky[k];
kVals[k].Kz = kz[k];
kVals[k].PhiMag = phiMag[k];
}
free(phiMag);
/* GPU section 2 */
{
float *x_d, *y_d, *z_d;
float *Qr_d, *Qi_d;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
setupMemoryGPU(numX, sizeof(float), x_d, x);
setupMemoryGPU(numX, sizeof(float), y_d, y);
setupMemoryGPU(numX, sizeof(float), z_d, z);
hipMalloc((void **)&Qr_d, numX * sizeof(float));
CUDA_ERRCK;
hipMemset((void *)Qr_d, 0, numX * sizeof(float));
hipMalloc((void **)&Qi_d, numX * sizeof(float));
CUDA_ERRCK;
hipMemset((void *)Qi_d, 0, numX * sizeof(float));
hipDeviceSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
computeQ_GPU(numK, numX, x_d, y_d, z_d, kVals, Qr_d, Qi_d);
hipDeviceSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
hipFree(x_d);
hipFree(y_d);
hipFree(z_d);
cleanupMemoryGPU(numX, sizeof(float), Qr_d, Qr);
cleanupMemoryGPU(numX, sizeof(float), Qi_d, Qi);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if (params->outFile)
{
/* Write Q to file */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(params->outFile, Qr, Qi, numX);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
free (kx);
free (ky);
free (kz);
free (x);
free (y);
free (z);
free (phiR);
free (phiI);
free (kVals);
free (Qr);
free (Qi);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(params);
return 0;
}
|
4135f57cc4f04e04b5ec7245cfa99101631c018b.cu
|
/***************************************************************************
*cr
*cr (C) Copyright 2007 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*
* C code for creating the Q data structure for fast convolution-based
* Hessian multiplication for arbitrary k-space trajectories.
*
* Inputs:
* kx - VECTOR of kx values, same length as ky and kz
* ky - VECTOR of ky values, same length as kx and kz
* kz - VECTOR of kz values, same length as kx and ky
* x - VECTOR of x values, same length as y and z
* y - VECTOR of y values, same length as x and z
* z - VECTOR of z values, same length as x and y
* phi - VECTOR of the Fourier transform of the spatial basis
* function, evaluated at [kx, ky, kz]. Same length as kx, ky, and kz.
*
* recommended g++ options:
* -O3 -lm -ffast-math -funroll-all-loops
*/
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <sys/time.h>
#include <malloc.h>
#include <parboil.h>
#include "file.h"
#include "computeQ.cu"
static void
setupMemoryGPU(int num, int size, float*& dev_ptr, float*& host_ptr)
{
cudaMalloc ((void **) &dev_ptr, num * size);
CUDA_ERRCK;
cudaMemcpy (dev_ptr, host_ptr, num * size, cudaMemcpyHostToDevice);
CUDA_ERRCK;
}
static void
cleanupMemoryGPU(int num, int size, float *& dev_ptr, float * host_ptr)
{
cudaMemcpy (host_ptr, dev_ptr, num * size, cudaMemcpyDeviceToHost);
CUDA_ERRCK;
cudaFree(dev_ptr);
CUDA_ERRCK;
}
int
main (int argc, char *argv[]) {
int numX, numK; /* Number of X and K values */
int original_numK; /* Number of K values in input file */
float *kx, *ky, *kz; /* K trajectory (3D vectors) */
float *x, *y, *z; /* X coordinates (3D vectors) */
float *phiR, *phiI; /* Phi values (complex) */
float *phiMag; /* Magnitude of Phi */
float *Qr, *Qi; /* Q signal (complex) */
struct kValues* kVals;
struct pb_Parameters *params;
struct pb_TimerSet timers;
pb_InitializeTimerSet(&timers);
/* Read command line */
params = pb_ReadParameters(&argc, argv);
if ((params->inpFiles[0] == NULL) || (params->inpFiles[1] != NULL))
{
fprintf(stderr, "Expecting one input filename\n");
exit(-1);
}
/* Read in data */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
inputData(params->inpFiles[0],
&original_numK, &numX,
&kx, &ky, &kz,
&x, &y, &z,
&phiR, &phiI);
/* Reduce the number of k-space samples if a number is given
* on the command line */
if (argc < 2)
numK = original_numK;
else
{
int inputK;
char *end;
inputK = strtol(argv[1], &end, 10);
if (end == argv[1])
{
fprintf(stderr, "Expecting an integer parameter\n");
exit(-1);
}
numK = MIN(inputK, original_numK);
}
printf("%d pixels in output; %d samples in trajectory; using %d samples\n",
numX, original_numK, numK);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
/* Create CPU data structures */
createDataStructsCPU(numK, numX, &phiMag, &Qr, &Qi);
/* GPU section 1 (precompute PhiMag) */
{
/* Mirror several data structures on the device */
float *phiR_d, *phiI_d;
float *phiMag_d;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
setupMemoryGPU(numK, sizeof(float), phiR_d, phiR);
setupMemoryGPU(numK, sizeof(float), phiI_d, phiI);
cudaMalloc((void **)&phiMag_d, numK * sizeof(float));
CUDA_ERRCK;
cudaThreadSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
computePhiMag_GPU(numK, phiR_d, phiI_d, phiMag_d);
cudaThreadSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cleanupMemoryGPU(numK, sizeof(float), phiMag_d, phiMag);
cudaFree(phiR_d);
cudaFree(phiI_d);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
kVals = (struct kValues*)calloc(numK, sizeof (struct kValues));
for (int k = 0; k < numK; k++) {
kVals[k].Kx = kx[k];
kVals[k].Ky = ky[k];
kVals[k].Kz = kz[k];
kVals[k].PhiMag = phiMag[k];
}
free(phiMag);
/* GPU section 2 */
{
float *x_d, *y_d, *z_d;
float *Qr_d, *Qi_d;
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
setupMemoryGPU(numX, sizeof(float), x_d, x);
setupMemoryGPU(numX, sizeof(float), y_d, y);
setupMemoryGPU(numX, sizeof(float), z_d, z);
cudaMalloc((void **)&Qr_d, numX * sizeof(float));
CUDA_ERRCK;
cudaMemset((void *)Qr_d, 0, numX * sizeof(float));
cudaMalloc((void **)&Qi_d, numX * sizeof(float));
CUDA_ERRCK;
cudaMemset((void *)Qi_d, 0, numX * sizeof(float));
cudaThreadSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_KERNEL);
computeQ_GPU(numK, numX, x_d, y_d, z_d, kVals, Qr_d, Qi_d);
cudaThreadSynchronize();
pb_SwitchToTimer(&timers, pb_TimerID_COPY);
cudaFree(x_d);
cudaFree(y_d);
cudaFree(z_d);
cleanupMemoryGPU(numX, sizeof(float), Qr_d, Qr);
cleanupMemoryGPU(numX, sizeof(float), Qi_d, Qi);
}
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
if (params->outFile)
{
/* Write Q to file */
pb_SwitchToTimer(&timers, pb_TimerID_IO);
outputData(params->outFile, Qr, Qi, numX);
pb_SwitchToTimer(&timers, pb_TimerID_COMPUTE);
}
free (kx);
free (ky);
free (kz);
free (x);
free (y);
free (z);
free (phiR);
free (phiI);
free (kVals);
free (Qr);
free (Qi);
pb_SwitchToTimer(&timers, pb_TimerID_NONE);
pb_PrintTimerSet(&timers);
pb_FreeParameters(params);
return 0;
}
|
f20f65943321de47d419ec7bd015227bf999efad.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "CUDAContextScheduler.cuh"
namespace HC
{
__global__ void k_ComputeSurfacePixels(ComputeVertex* buffer, int nPixels, int surfaceW, int surfaceH) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid > nPixels) {
return;
}
int pixelY = tid / surfaceW;
int pixelX = tid - pixelY * surfaceW;
float r = (float)pixelX / (float)surfaceW;;
float g = (float)pixelY / (float)surfaceH;
float b = 0.2f;
float4 vColor { r,g,b,1.0 };
buffer[tid].position = float4 { pixelX, pixelY, 0.5, 1.0 };
buffer[tid].color = vColor;
}
//Compute GFX Test
__global__ void k_ComputeBasicTriangle(ComputeVertex* buffer) {
buffer[0].position = float4{ 0.0f, 0.0f, 1.0f, 1.0f };
buffer[0].color = float4{ 1.0f, 0.0f, 0.0f, 1.0f };
buffer[1].position = float4{ 1000.0f, 0.0f, 1.0f, 1.0f };
buffer[1].color = float4{ 0.0f, 1.0f, 0.0f, 1.0f };
buffer[2].position = float4{ 0.0f, 0.0005f, 1.0f, 1.0f };
buffer[2].color = float4{ 0.0f, 0.0f, 1.0f, 1.0f };
}
//Pending compute concurrency implementation through CUDA streams (local async engines = 6)
__host__ void InvokeCBTKernel(ComputeVertex** buffer) {
k_ComputeBasicTriangle << <1, 1 >> > (*buffer);
}
__host__ void InvokeCSPKernel(ComputeVertex** surfaceBuffer, size_t* bufferSize, int surfaceW, int surfaceH) {
int nPixels = surfaceW * surfaceH;
int CTASize = 64;
int gridSize = nPixels / CTASize + 1;
//ComputeVertex* h_surfaceBuffer = (ComputeVertex*)malloc(*bufferSize);
k_ComputeSurfacePixels << <gridSize, CTASize, 0, 0 >> >
(*surfaceBuffer, nPixels, surfaceW, surfaceH);
//ProfileCUDA(hipMemcpy(h_surfaceBuffer, *surfaceBuffer, *bufferSize, hipMemcpyDeviceToHost));
//GenPPMFile("GfxExp", h_surfaceBuffer, surfaceW, surfaceH);
//free(h_surfaceBuffer);
}
__host__ __device__ void CheckError(hipError_t result, char const* const func, const char* const file, int const line) {
#if defined(_DEBUG)
if (result) {
unsigned int errId = static_cast<unsigned int>(result);
const char* errName = hipGetErrorName(result);
const char* errDesc = hipGetErrorString(result);
std::string errStr =
std::string("CUDA Error: ") + std::to_string(errId) + "\n" +
std::string(errName) + ": " + std::string(errDesc) +
std::string("\nFile: ") + file +
std::string("\nLine: ") + std::to_string(line);
hipError_t resetErr = hipDeviceReset();
if (resetErr) {
std::string resetErrStr =
std::string("CUDA Reset Error: ") + std::to_string(errId) + "\n" +
std::string(errName) + ": " + std::string(errDesc) +
std::string("\nFile: ") + file +
std::string("\nLine: ") + std::to_string(line);
errStr.append(resetErrStr);
}
StreamOutputToConsole(errStr.c_str(), 3000, stderr);
exit(99);
}
#endif
}
__host__ hipDeviceProp_t QueryDeviceProperties(int dIndex) {
hipDeviceProp_t dProps;
ProfileCUDA(hipGetDeviceProperties(&dProps, dIndex));
return dProps;
}
__host__ float ComputeSPEffectiveBandwith(int actThr, float kExecMs)
{
return (actThr * sizeof(float) * 3 / kExecMs / 1e6);
}
__host__ float ComputeComputationalThroughput(int nFlops, int actThr, float kExecS)
{
return (nFlops * actThr / (kExecS * 1e9));
}
__host__ float ComputeHostToDeviceBandwith(unsigned int bytes, float elpsdMs)
{
return (bytes * 1e6 / elpsdMs);
}
__host__ float ComputeDeviceToHostBandwith(unsigned int bytes, float elpsdMs)
{
return (bytes * 1e6 / elpsdMs);
}
__host__ std::string GetPerformanceMetrics(
float* kExecMs,
float* efBw,
float* compThr,
float* htdBw,
float* dthBw,
unsigned int conSleepMs)
{
std::string perfStr;
if (kExecMs) {
std::string kExecStr = "Kernel Execution Speed (MS): " + std::to_string(*kExecMs);
perfStr.append(kExecStr);
}
if (efBw) {
std::string efBwStr = "\nEffective Bandwith (GB/s): " + std::to_string(*efBw);
perfStr.append(efBwStr);
}
if (compThr) {
std::string compThrStr = "\nComputation Throughput (FLOPS/s): " + std::to_string(*compThr);
perfStr.append(compThrStr);
}
if (htdBw) {
std::string htdBwStr = "\nHost to Device bandwith (GB/s): " + std::to_string(*htdBw);
perfStr.append(htdBwStr);
}
if (dthBw) {
std::string dthBwStr = "\nDevice to Host bandwith (GB/s): " + std::to_string(*dthBw);
perfStr.append(dthBwStr);
}
StreamOutputToConsole(perfStr.c_str(), conSleepMs);
return perfStr;
}
__host__ void GenPPMFile(const char* fileName, HC::ComputeVertex* buffer, const int imgW, const int imgH) {
std::string fn = std::string("./") + fileName + ".ppm";
std::ofstream ofsGpu(fn.c_str(), std::ios::out | std::ios::binary);
ofsGpu << "P6\n" << imgW << " " << imgH << "\n255\n";
int nPixels = imgW * imgH;
for (int i = 0; i < nPixels; ++i) {
float4 v = buffer[i].color;
int r = int(255.99f * v.x * v.w);
int g = int(255.99f * v.y * v.w);
int b = int(255.99f * v.z * v.w);
ofsGpu << (unsigned char)r << (unsigned char)g << (unsigned char)b;
}
ofsGpu.close();
}
}
|
f20f65943321de47d419ec7bd015227bf999efad.cu
|
#include "CUDAContextScheduler.cuh"
namespace HC
{
__global__ void k_ComputeSurfacePixels(ComputeVertex* buffer, int nPixels, int surfaceW, int surfaceH) {
int tid = threadIdx.x + blockIdx.x * blockDim.x;
if (tid > nPixels) {
return;
}
int pixelY = tid / surfaceW;
int pixelX = tid - pixelY * surfaceW;
float r = (float)pixelX / (float)surfaceW;;
float g = (float)pixelY / (float)surfaceH;
float b = 0.2f;
float4 vColor { r,g,b,1.0 };
buffer[tid].position = float4 { pixelX, pixelY, 0.5, 1.0 };
buffer[tid].color = vColor;
}
//Compute GFX Test
__global__ void k_ComputeBasicTriangle(ComputeVertex* buffer) {
buffer[0].position = float4{ 0.0f, 0.0f, 1.0f, 1.0f };
buffer[0].color = float4{ 1.0f, 0.0f, 0.0f, 1.0f };
buffer[1].position = float4{ 1000.0f, 0.0f, 1.0f, 1.0f };
buffer[1].color = float4{ 0.0f, 1.0f, 0.0f, 1.0f };
buffer[2].position = float4{ 0.0f, 0.0005f, 1.0f, 1.0f };
buffer[2].color = float4{ 0.0f, 0.0f, 1.0f, 1.0f };
}
//Pending compute concurrency implementation through CUDA streams (local async engines = 6)
__host__ void InvokeCBTKernel(ComputeVertex** buffer) {
k_ComputeBasicTriangle << <1, 1 >> > (*buffer);
}
__host__ void InvokeCSPKernel(ComputeVertex** surfaceBuffer, size_t* bufferSize, int surfaceW, int surfaceH) {
int nPixels = surfaceW * surfaceH;
int CTASize = 64;
int gridSize = nPixels / CTASize + 1;
//ComputeVertex* h_surfaceBuffer = (ComputeVertex*)malloc(*bufferSize);
k_ComputeSurfacePixels << <gridSize, CTASize, 0, 0 >> >
(*surfaceBuffer, nPixels, surfaceW, surfaceH);
//ProfileCUDA(cudaMemcpy(h_surfaceBuffer, *surfaceBuffer, *bufferSize, cudaMemcpyDeviceToHost));
//GenPPMFile("GfxExp", h_surfaceBuffer, surfaceW, surfaceH);
//free(h_surfaceBuffer);
}
__host__ __device__ void CheckError(cudaError_t result, char const* const func, const char* const file, int const line) {
#if defined(_DEBUG)
if (result) {
unsigned int errId = static_cast<unsigned int>(result);
const char* errName = cudaGetErrorName(result);
const char* errDesc = cudaGetErrorString(result);
std::string errStr =
std::string("CUDA Error: ") + std::to_string(errId) + "\n" +
std::string(errName) + ": " + std::string(errDesc) +
std::string("\nFile: ") + file +
std::string("\nLine: ") + std::to_string(line);
cudaError_t resetErr = cudaDeviceReset();
if (resetErr) {
std::string resetErrStr =
std::string("CUDA Reset Error: ") + std::to_string(errId) + "\n" +
std::string(errName) + ": " + std::string(errDesc) +
std::string("\nFile: ") + file +
std::string("\nLine: ") + std::to_string(line);
errStr.append(resetErrStr);
}
StreamOutputToConsole(errStr.c_str(), 3000, stderr);
exit(99);
}
#endif
}
__host__ cudaDeviceProp QueryDeviceProperties(int dIndex) {
cudaDeviceProp dProps;
ProfileCUDA(cudaGetDeviceProperties(&dProps, dIndex));
return dProps;
}
__host__ float ComputeSPEffectiveBandwith(int actThr, float kExecMs)
{
return (actThr * sizeof(float) * 3 / kExecMs / 1e6);
}
__host__ float ComputeComputationalThroughput(int nFlops, int actThr, float kExecS)
{
return (nFlops * actThr / (kExecS * 1e9));
}
__host__ float ComputeHostToDeviceBandwith(unsigned int bytes, float elpsdMs)
{
return (bytes * 1e6 / elpsdMs);
}
__host__ float ComputeDeviceToHostBandwith(unsigned int bytes, float elpsdMs)
{
return (bytes * 1e6 / elpsdMs);
}
__host__ std::string GetPerformanceMetrics(
float* kExecMs,
float* efBw,
float* compThr,
float* htdBw,
float* dthBw,
unsigned int conSleepMs)
{
std::string perfStr;
if (kExecMs) {
std::string kExecStr = "Kernel Execution Speed (MS): " + std::to_string(*kExecMs);
perfStr.append(kExecStr);
}
if (efBw) {
std::string efBwStr = "\nEffective Bandwith (GB/s): " + std::to_string(*efBw);
perfStr.append(efBwStr);
}
if (compThr) {
std::string compThrStr = "\nComputation Throughput (FLOPS/s): " + std::to_string(*compThr);
perfStr.append(compThrStr);
}
if (htdBw) {
std::string htdBwStr = "\nHost to Device bandwith (GB/s): " + std::to_string(*htdBw);
perfStr.append(htdBwStr);
}
if (dthBw) {
std::string dthBwStr = "\nDevice to Host bandwith (GB/s): " + std::to_string(*dthBw);
perfStr.append(dthBwStr);
}
StreamOutputToConsole(perfStr.c_str(), conSleepMs);
return perfStr;
}
__host__ void GenPPMFile(const char* fileName, HC::ComputeVertex* buffer, const int imgW, const int imgH) {
std::string fn = std::string("./") + fileName + ".ppm";
std::ofstream ofsGpu(fn.c_str(), std::ios::out | std::ios::binary);
ofsGpu << "P6\n" << imgW << " " << imgH << "\n255\n";
int nPixels = imgW * imgH;
for (int i = 0; i < nPixels; ++i) {
float4 v = buffer[i].color;
int r = int(255.99f * v.x * v.w);
int g = int(255.99f * v.y * v.w);
int b = int(255.99f * v.z * v.w);
ofsGpu << (unsigned char)r << (unsigned char)g << (unsigned char)b;
}
ofsGpu.close();
}
}
|
a6f4023a74ae04cbb0f977360c8ab457af056bec.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void splitNodes(int* octree, int* numNodes, int poolSize, int startNode) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
//Don't do anything if its out of bounds
if (index < poolSize) {
int node = octree[2 * (index+startNode)];
//Split the node if its flagged
if (node & 0x40000000) {
//Get a new node tile
int newNode = atomicAdd(numNodes, 8);
//Point this node at the new tile
octree[2 * (index+startNode)] = (octree[2 * (index+startNode)] & 0xC0000000) | (newNode & 0x3FFFFFFF);
//Initialize new child nodes to 0's
for (int off = 0; off < 8; off++) {
octree[2 * (newNode + off)] = 0;
octree[2 * (newNode + off) + 1] = 0;
}
}
}
}
|
a6f4023a74ae04cbb0f977360c8ab457af056bec.cu
|
#include "includes.h"
__global__ void splitNodes(int* octree, int* numNodes, int poolSize, int startNode) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
//Don't do anything if its out of bounds
if (index < poolSize) {
int node = octree[2 * (index+startNode)];
//Split the node if its flagged
if (node & 0x40000000) {
//Get a new node tile
int newNode = atomicAdd(numNodes, 8);
//Point this node at the new tile
octree[2 * (index+startNode)] = (octree[2 * (index+startNode)] & 0xC0000000) | (newNode & 0x3FFFFFFF);
//Initialize new child nodes to 0's
for (int off = 0; off < 8; off++) {
octree[2 * (newNode + off)] = 0;
octree[2 * (newNode + off) + 1] = 0;
}
}
}
}
|
d02d3930d4d126ec052e7db1548b6a8d420f0a59.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void div_scalar_float(int n,int idx, float dx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] / dx;
}
}
|
d02d3930d4d126ec052e7db1548b6a8d420f0a59.cu
|
#include "includes.h"
__global__ void div_scalar_float(int n,int idx, float dx,float *dy,int incy,float *result) {
for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; i += blockDim.x * gridDim.x) {
if(i >= idx && i % incy == 0)
result[i] = dy[i] / dx;
}
}
|
655d6ef4aedf009f47486c741fd1dedc8e1663a1.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <string>
#include <cstdlib>
#include <limits>
#include <algorithm>
using namespace std;
const int BLOCK_SIZE = 512;
#define idx(i,j,lda) ( (j) + ((i)*(lda)) )
class mySet
{
private:
int size = 4000;
bool N[4000];
int cnt = 4000;
public:
__device__ mySet(){}
__device__ void init(int s)
{
this->cnt = s;
for (int i = 0; i < s; i++)
{
N[i] = true;
}
}
__device__ bool contains(int x)
{
return N[x];
}
__device__ void insert(int x)
{
if (N[x] == true)
return;
N[x] = true;
cnt++;
}
__device__ void erase(int x)
{
if (N[x] == true)
{
N[x] = false;
cnt--;
}
}
__device__ bool empty()
{
return (cnt == 0);
}
__device__ int getCount()
{
return cnt;
}
};
__device__ int getGlobalIdx_1D_1D()
{
return blockIdx.x *blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_1D_2D()
{
return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_2D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_2D_1D()
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_3D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
//zdroj: http://cs.calvin.edu/courses/cs/374/CUDA/CUDA-Thread-Indexing-Cheatsheet.pdf
__global__ void prepareArray(int vertexCnt, int* d)
{
int threads = gridDim.x * gridDim.y * gridDim.z * blockDim.x * blockDim.y * blockDim.z;
int cycleCnt = (vertexCnt / threads > 0 ? vertexCnt / threads : 1);
for (int cycle = 0; cycle < cycleCnt; cycle++)
{
int s = (blockIdx.x * blockDim.x + threadIdx.x) + threads * cycle; // pozice na radku
if(s >= vertexCnt)
return;
for (int i = 0; i < vertexCnt; i++)
{
d[vertexCnt *i+s] = INT_MAX / 2;
}
}
}
__global__ void dijsktra( int* __restrict__ edgeMatrix, int vertexCnt, int* d)
{
int threads = gridDim.x * gridDim.y * gridDim.z * blockDim.x * blockDim.y * blockDim.z;
int cycleCnt = (vertexCnt / threads > 0 ? vertexCnt / threads : 1);
for (int cycle = 0; cycle < cycleCnt; cycle++)
{
int s = (blockIdx.x * blockDim.x + threadIdx.x) + threads * cycle; // pozice na radku
if(s >= vertexCnt)
return;
mySet N;
N.init(vertexCnt);
d[s*vertexCnt + s] = 0;
while (!N.empty())
{
int localMin = INT_MAX;
int cnt = N.getCount();
int u = 0;
int j = 0;
for (int i = 0; i < vertexCnt && j < cnt; i++)
{
if (!N.contains(i)) continue;
if (localMin > d[vertexCnt *i+s])
{
localMin = d[vertexCnt *i+s];
u = i;
}
j++;
}
N.erase(u);
for (int i = 0; i < vertexCnt; i++)
{
if (i == u || !N.contains(i)) continue;
if (edgeMatrix[u + i*vertexCnt] > 0)
{
int alt = d[vertexCnt *u+s] + edgeMatrix[u + i*vertexCnt];
atomicMin((d + vertexCnt * i + s), alt);
}
}
}
}
}
int *readMatrix(const char *path, int &n)
{
ifstream iFile;
iFile.open(path);
if (!iFile.is_open())
return NULL;
string line;
getline(iFile, line);
n = atoi(line.c_str());
int *matrix = new int[n * n];
int i;
for (i = 0; getline(iFile, line); i++)
{
size_t endpos = line.find_last_not_of(" \t\r\n");
if (string::npos != endpos)
line = line.substr(0, endpos + 1);
for (int j = 0; j < n; j++)
{
size_t pos = line.find_first_of(" ");
matrix[idx(i,j,n)] = stoi(line, &pos, 10);
line = line.substr(pos);
}
}
iFile.close();
return matrix;
}
void writeMatrix(const char *path, int n, int *matrix)
{
ofstream oFile;
oFile.open(path, fstream::trunc | fstream::out);
if (oFile.is_open())
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
oFile << matrix[idx(i, j, n)] << (j + 1 < n ? " " : "\n");
}
}
}
oFile.close();
}
int main(int argc, const char* argv[])
{
if (argc < 3 || argc > 4)
{
cout << "Program takes 2 or 3 parameters (matrix and thread count and optional output file)!\n";
return 1;
}
int threadCnt = atoi(argv[2]);
int stc = 0;
int *matrix = readMatrix(argv[1], stc);
// reading input file
if (matrix == NULL){
cout << "File doesn't exists" << endl;
cout << argv[1] << endl;
return 1;
}
hipEvent_t start, stop;
float elapsedTime;
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
int *cumatrix;
int *d;
hipMalloc((void **)&cumatrix, stc * stc * sizeof(int));
hipMemcpy(cumatrix, matrix, stc * stc * sizeof(int), hipMemcpyHostToDevice);
hipMalloc((void **)&d, stc * stc * sizeof(int));
hipLaunchKernelGGL(( prepareArray), dim3(stc / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, stc, d);
hipError_t code = hipDeviceSynchronize();
if (code != hipSuccess)
{
fprintf(stdout, "GPUassert: %s \n", hipGetErrorString(code));
}
hipLaunchKernelGGL(( dijsktra), dim3(stc / BLOCK_SIZE), dim3(BLOCK_SIZE), 0, 0, cumatrix, stc, d);
code = hipDeviceSynchronize();
if (code != hipSuccess)
{
fprintf(stdout, "GPUassert: %s \n", hipGetErrorString(code));
}
int *outM = new int[stc*stc];
hipMemcpy(outM, d, stc * stc * sizeof(int), hipMemcpyDeviceToHost);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
cout << "Time: " << elapsedTime << endl;
if (argc == 4)
{
cout << "Writing results...\n";
writeMatrix(argv[3], stc, outM);
}
hipFree(cumatrix);
hipFree(d);
delete [] matrix;
delete [] outM;
cout << "\nDone\n";
return 0;
}
|
655d6ef4aedf009f47486c741fd1dedc8e1663a1.cu
|
#include <iostream>
#include <fstream>
#include <string>
#include <cstdlib>
#include <limits>
#include <algorithm>
using namespace std;
const int BLOCK_SIZE = 512;
#define idx(i,j,lda) ( (j) + ((i)*(lda)) )
class mySet
{
private:
int size = 4000;
bool N[4000];
int cnt = 4000;
public:
__device__ mySet(){}
__device__ void init(int s)
{
this->cnt = s;
for (int i = 0; i < s; i++)
{
N[i] = true;
}
}
__device__ bool contains(int x)
{
return N[x];
}
__device__ void insert(int x)
{
if (N[x] == true)
return;
N[x] = true;
cnt++;
}
__device__ void erase(int x)
{
if (N[x] == true)
{
N[x] = false;
cnt--;
}
}
__device__ bool empty()
{
return (cnt == 0);
}
__device__ int getCount()
{
return cnt;
}
};
__device__ int getGlobalIdx_1D_1D()
{
return blockIdx.x *blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_1D_2D()
{
return blockIdx.x * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
}
__device__ int getGlobalIdx_2D_2D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x;
int threadId = blockId * (blockDim.x * blockDim.y)
+ (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_2D_1D()
{
int blockId = blockIdx.y * gridDim.x + blockIdx.x;
int threadId = blockId * blockDim.x + threadIdx.x;
return threadId;
}
__device__ int getGlobalIdx_3D_3D()
{
int blockId = blockIdx.x + blockIdx.y * gridDim.x
+ gridDim.x * gridDim.y * blockIdx.z;
int threadId = blockId * (blockDim.x * blockDim.y * blockDim.z)
+ (threadIdx.z * (blockDim.x * blockDim.y))
+ (threadIdx.y * blockDim.x) + threadIdx.x;
return threadId;
}
//zdroj: http://cs.calvin.edu/courses/cs/374/CUDA/CUDA-Thread-Indexing-Cheatsheet.pdf
__global__ void prepareArray(int vertexCnt, int* d)
{
int threads = gridDim.x * gridDim.y * gridDim.z * blockDim.x * blockDim.y * blockDim.z;
int cycleCnt = (vertexCnt / threads > 0 ? vertexCnt / threads : 1);
for (int cycle = 0; cycle < cycleCnt; cycle++)
{
int s = (blockIdx.x * blockDim.x + threadIdx.x) + threads * cycle; // pozice na radku
if(s >= vertexCnt)
return;
for (int i = 0; i < vertexCnt; i++)
{
d[vertexCnt *i+s] = INT_MAX / 2;
}
}
}
__global__ void dijsktra( int* __restrict__ edgeMatrix, int vertexCnt, int* d)
{
int threads = gridDim.x * gridDim.y * gridDim.z * blockDim.x * blockDim.y * blockDim.z;
int cycleCnt = (vertexCnt / threads > 0 ? vertexCnt / threads : 1);
for (int cycle = 0; cycle < cycleCnt; cycle++)
{
int s = (blockIdx.x * blockDim.x + threadIdx.x) + threads * cycle; // pozice na radku
if(s >= vertexCnt)
return;
mySet N;
N.init(vertexCnt);
d[s*vertexCnt + s] = 0;
while (!N.empty())
{
int localMin = INT_MAX;
int cnt = N.getCount();
int u = 0;
int j = 0;
for (int i = 0; i < vertexCnt && j < cnt; i++)
{
if (!N.contains(i)) continue;
if (localMin > d[vertexCnt *i+s])
{
localMin = d[vertexCnt *i+s];
u = i;
}
j++;
}
N.erase(u);
for (int i = 0; i < vertexCnt; i++)
{
if (i == u || !N.contains(i)) continue;
if (edgeMatrix[u + i*vertexCnt] > 0)
{
int alt = d[vertexCnt *u+s] + edgeMatrix[u + i*vertexCnt];
atomicMin((d + vertexCnt * i + s), alt);
}
}
}
}
}
int *readMatrix(const char *path, int &n)
{
ifstream iFile;
iFile.open(path);
if (!iFile.is_open())
return NULL;
string line;
getline(iFile, line);
n = atoi(line.c_str());
int *matrix = new int[n * n];
int i;
for (i = 0; getline(iFile, line); i++)
{
size_t endpos = line.find_last_not_of(" \t\r\n");
if (string::npos != endpos)
line = line.substr(0, endpos + 1);
for (int j = 0; j < n; j++)
{
size_t pos = line.find_first_of(" ");
matrix[idx(i,j,n)] = stoi(line, &pos, 10);
line = line.substr(pos);
}
}
iFile.close();
return matrix;
}
void writeMatrix(const char *path, int n, int *matrix)
{
ofstream oFile;
oFile.open(path, fstream::trunc | fstream::out);
if (oFile.is_open())
{
for (int i = 0; i < n; i++)
{
for (int j = 0; j < n; j++)
{
oFile << matrix[idx(i, j, n)] << (j + 1 < n ? " " : "\n");
}
}
}
oFile.close();
}
int main(int argc, const char* argv[])
{
if (argc < 3 || argc > 4)
{
cout << "Program takes 2 or 3 parameters (matrix and thread count and optional output file)!\n";
return 1;
}
int threadCnt = atoi(argv[2]);
int stc = 0;
int *matrix = readMatrix(argv[1], stc);
// reading input file
if (matrix == NULL){
cout << "File doesn't exists" << endl;
cout << argv[1] << endl;
return 1;
}
cudaEvent_t start, stop;
float elapsedTime;
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
int *cumatrix;
int *d;
cudaMalloc((void **)&cumatrix, stc * stc * sizeof(int));
cudaMemcpy(cumatrix, matrix, stc * stc * sizeof(int), cudaMemcpyHostToDevice);
cudaMalloc((void **)&d, stc * stc * sizeof(int));
prepareArray<<<stc / BLOCK_SIZE, BLOCK_SIZE>>>(stc, d);
cudaError_t code = cudaThreadSynchronize();
if (code != cudaSuccess)
{
fprintf(stdout, "GPUassert: %s \n", cudaGetErrorString(code));
}
dijsktra<<<stc / BLOCK_SIZE, BLOCK_SIZE>>>(cumatrix, stc, d);
code = cudaThreadSynchronize();
if (code != cudaSuccess)
{
fprintf(stdout, "GPUassert: %s \n", cudaGetErrorString(code));
}
int *outM = new int[stc*stc];
cudaMemcpy(outM, d, stc * stc * sizeof(int), cudaMemcpyDeviceToHost);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
cout << "Time: " << elapsedTime << endl;
if (argc == 4)
{
cout << "Writing results...\n";
writeMatrix(argv[3], stc, outM);
}
cudaFree(cumatrix);
cudaFree(d);
delete [] matrix;
delete [] outM;
cout << "\nDone\n";
return 0;
}
|
9c87931cd22a76124da2329a4d758f9c0b136ce9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
#include "common_magmasparse.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgedensereimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
hipLaunchKernelGGL(( zgedensereimsplit_kernel), dim3(grid), dim3(threads), 0, queue ,
m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
9c87931cd22a76124da2329a4d758f9c0b136ce9.cu
|
/*
-- MAGMA (version 1.7.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date September 2015
@precisions normal z -> c d s
*/
#include "common_magma.h"
#include "common_magmasparse.h"
#define BLOCK_SIZE 256
// axpy kernel for matrices stored in the MAGMA format
__global__ void
zgedensereimsplit_kernel(
int num_rows,
int num_cols,
magma_index_t* rowidx,
magmaDoubleComplex * A,
magmaDoubleComplex * ReA,
magmaDoubleComplex * ImA )
{
int row = blockIdx.x*blockDim.x+threadIdx.x;
int j;
if( row<num_rows ){
for( j=0; j<num_cols; j++ ){
ReA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_REAL( A[ j ] ), 0.0 );
ImA[ j ] = MAGMA_Z_MAKE( MAGMA_Z_IMAG( A[ j ] ), 0.0 );
}
}
}
/**
Purpose
-------
This routine takes an input matrix A in DENSE format and located on the GPU
and splits it into two matrixes ReA and ImA containing the real and the
imaginary contributions of A.
The output matrices are allocated within the routine.
Arguments
---------
@param[in]
A magma_z_matrix
input matrix A.
@param[out]
ReA magma_z_matrix*
output matrix contaning real contributions.
@param[out]
ImA magma_z_matrix*
output matrix contaning complex contributions.
@param[in]
queue magma_queue_t
Queue to execute in.
@ingroup magmasparse_zblas
********************************************************************/
extern "C"
magma_int_t
magma_zgedensereimsplit(
magma_z_matrix A,
magma_z_matrix *ReA,
magma_z_matrix *ImA,
magma_queue_t queue )
{
magma_zmtransfer( A, ReA, Magma_DEV, Magma_DEV, queue );
magma_zmtransfer( A, ImA, Magma_DEV, Magma_DEV, queue );
int m = A.num_rows;
int n = A.num_cols;
dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) );
magma_int_t threads = BLOCK_SIZE;
zgedensereimsplit_kernel<<< grid, threads, 0, queue >>>
( m, n, A.row, A.dval, ReA->dval, ImA->dval );
return MAGMA_SUCCESS;
}
|
2c2d84c80c182ffbf8d3385c0257c57ee05a7fae.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// TODO: reduce the apparent redundancy of all the code below.
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pool_op.h"
namespace caffe2 {
namespace {
struct LpPoolFunctor {
explicit LpPoolFunctor(const OperatorBase& /* op */) {}
};
} // namespace
namespace {
using c10::hip::compat::abs;
using c10::hip::compat::pow;
template <typename T>
__global__ void LpPoolForwardNCHW(
const int nthreads,
const T *const bottom_data,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_t,
const int pad_l,
T *const top_data,
const T p) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index;
int pw = n % pooled_width;
n /= pooled_width;
int ph = n % pooled_height;
n /= pooled_height;
int c = n % channels;
n /= channels;
int hstart = ph * stride_h - pad_t;
int wstart = pw * stride_w - pad_l;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
top_data[index] = 0;
int bottom_offset = (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
top_data[index] +=
pow(abs(bottom_data[bottom_offset + h * width + w]), p);
}
}
top_data[index] = pow(top_data[index], static_cast<T>(1.0) / p);
}
}
template <typename T>
__global__ void LpPoolForwardNHWC(
const int nthreads,
const T *const bottom_data,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_t,
const int pad_l,
T *const top_data,
const T p) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int c = index % channels;
int pw = (index / channels) % pooled_width;
int ph = (index / channels / pooled_width) % pooled_height;
int n = index / channels / pooled_width / pooled_height;
int hstart = ph * stride_h - pad_t;
int wstart = pw * stride_w - pad_l;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T output = 0;
int bottom_offset = n * height * width * channels + c;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
output += pow(
abs(bottom_data[bottom_offset + (h * width + w) * channels]), p);
}
}
top_data[index] = pow(output, static_cast<T>(1.0) / p);
}
}
template <typename T>
__global__ void LpPoolBackwardNCHW(
const int nthreads,
const T* const top_diff,
const T* const top_data,
const T* const bottom_data,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_t,
const int pad_l,
T* const bottom_diff,
const int p) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_l;
const int h = (index / width) % height + pad_t;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
T gradient = 0;
const T* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
const T* const top_data_slice =
top_data + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_t;
int wstart = pw * stride_w - pad_l;
hstart = max(hstart, 0);
wstart = max(wstart, 0);
gradient += top_diff_slice[ph * pooled_width + pw] *
bottom_data[index] * pow(abs(bottom_data[index]), p - 2) /
pow(top_data_slice[ph * pooled_width + pw], p - 1);
}
}
bottom_diff[index] = gradient;
}
}
template <typename T>
__global__ void LpPoolBackwardNHWC(
const int nthreads,
const T* const top_diff,
const T* const top_data,
const T* const bottom_data,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_t,
const int pad_l,
T* const bottom_diff,
const T p) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int c = index % channels;
const int w = index / channels % width + pad_l;
const int h = (index / channels / width) % height + pad_t;
const int n = index / channels / width / height;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
T gradient = 0;
const T* const top_diff_slice =
top_diff + n * pooled_height * pooled_width * channels + c;
const T* const top_data_slice =
top_data + n * pooled_height * pooled_width * channels + c;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
gradient += top_diff_slice[(ph * pooled_width + pw) * channels] *
bottom_data[index] * pow(abs(bottom_data[index]), p - 2) /
pow(top_data_slice[(ph * pooled_width + pw) * channels], p - 1);
}
}
bottom_diff[index] = gradient;
}
}
} // namespace
template <>
bool PoolOp<float, CUDAContext, LpPoolFunctor>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto* Y = Output(0);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1));
int output_size = Y->numel();
hipLaunchKernelGGL(( LpPoolForwardNCHW<float>)
, dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
X.data<float>(),
X.dim32(1),
X.dim32(2),
X.dim32(3),
Y->dim32(2),
Y->dim32(3),
kernel_h(),
kernel_w(),
stride_h(),
stride_w(),
pad_t(),
pad_l(),
Y->template mutable_data<float>(),
OperatorBase::GetSingleArgument<float>("p", 2.0));
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool PoolOp<float, CUDAContext, LpPoolFunctor>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto* Y = Output(0);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(3));
int output_size = Y->numel();
hipLaunchKernelGGL(( LpPoolForwardNHWC<float>)
, dim3(CAFFE_GET_BLOCKS(output_size)),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
output_size,
X.data<float>(),
X.dim32(1),
X.dim32(2),
X.dim32(3),
Y->dim32(1),
Y->dim32(2),
kernel_h(),
kernel_w(),
stride_h(),
stride_w(),
pad_t(),
pad_l(),
Y->template mutable_data<float>(),
OperatorBase::GetSingleArgument<float>("p", 2.0));
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool PoolGradientOp<float, CUDAContext, LpPoolFunctor>::
RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
CAFFE_ENFORCE_EQ(dY.dim(), 4);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(2), X.dim32(3)});
hipLaunchKernelGGL(( LpPoolBackwardNCHW<float>)
, dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.numel(),
dY.data<float>(),
Y.data<float>(),
X.data<float>(),
X.dim32(1),
X.dim32(2),
X.dim32(3),
dY.dim32(2),
dY.dim32(3),
kernel_h(),
kernel_w(),
stride_h(),
stride_w(),
pad_t(),
pad_l(),
dX->template mutable_data<float>(),
OperatorBase::GetSingleArgument<float>("p", 2.0));
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool PoolGradientOp<float, CUDAContext, LpPoolFunctor>::
RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
CAFFE_ENFORCE_EQ(dY.dim(), 4);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(1), X.dim32(2)});
hipLaunchKernelGGL(( LpPoolBackwardNHWC<float>)
, dim3(CAFFE_GET_BLOCKS(X.numel())),
dim3(CAFFE_CUDA_NUM_THREADS),
0,
context_.cuda_stream(),
X.numel(),
dY.data<float>(),
Y.data<float>(),
X.data<float>(),
X.dim32(1),
X.dim32(2),
X.dim32(3),
dY.dim32(1),
dY.dim32(2),
kernel_h(),
kernel_w(),
stride_h(),
stride_w(),
pad_t(),
pad_l(),
dX->template mutable_data<float>(),
OperatorBase::GetSingleArgument<float>("p", 2.0));
C10_HIP_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(LpPool, PoolOp<float, CUDAContext, LpPoolFunctor>);
REGISTER_CUDA_OPERATOR(
LpPoolGradient,
PoolGradientOp<float, CUDAContext, LpPoolFunctor>);
}
|
2c2d84c80c182ffbf8d3385c0257c57ee05a7fae.cu
|
// TODO: reduce the apparent redundancy of all the code below.
#include <cfloat>
#include "caffe2/core/context_gpu.h"
#include "caffe2/operators/pool_op.h"
namespace caffe2 {
namespace {
struct LpPoolFunctor {
explicit LpPoolFunctor(const OperatorBase& /* op */) {}
};
} // namespace
namespace {
using c10::cuda::compat::abs;
using c10::cuda::compat::pow;
template <typename T>
__global__ void LpPoolForwardNCHW(
const int nthreads,
const T *const bottom_data,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_t,
const int pad_l,
T *const top_data,
const T p) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int n = index;
int pw = n % pooled_width;
n /= pooled_width;
int ph = n % pooled_height;
n /= pooled_height;
int c = n % channels;
n /= channels;
int hstart = ph * stride_h - pad_t;
int wstart = pw * stride_w - pad_l;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
top_data[index] = 0;
int bottom_offset = (n * channels + c) * height * width;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
top_data[index] +=
pow(abs(bottom_data[bottom_offset + h * width + w]), p);
}
}
top_data[index] = pow(top_data[index], static_cast<T>(1.0) / p);
}
}
template <typename T>
__global__ void LpPoolForwardNHWC(
const int nthreads,
const T *const bottom_data,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_t,
const int pad_l,
T *const top_data,
const T p) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int c = index % channels;
int pw = (index / channels) % pooled_width;
int ph = (index / channels / pooled_width) % pooled_height;
int n = index / channels / pooled_width / pooled_height;
int hstart = ph * stride_h - pad_t;
int wstart = pw * stride_w - pad_l;
int hend = min(hstart + kernel_h, height);
int wend = min(wstart + kernel_w, width);
hstart = max(hstart, 0);
wstart = max(wstart, 0);
T output = 0;
int bottom_offset = n * height * width * channels + c;
for (int h = hstart; h < hend; ++h) {
for (int w = wstart; w < wend; ++w) {
output += pow(
abs(bottom_data[bottom_offset + (h * width + w) * channels]), p);
}
}
top_data[index] = pow(output, static_cast<T>(1.0) / p);
}
}
template <typename T>
__global__ void LpPoolBackwardNCHW(
const int nthreads,
const T* const top_diff,
const T* const top_data,
const T* const bottom_data,
const int channels,
const int height,
const int width,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_t,
const int pad_l,
T* const bottom_diff,
const int p) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int w = index % width + pad_l;
const int h = (index / width) % height + pad_t;
const int c = (index / width / height) % channels;
const int n = index / width / height / channels;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
T gradient = 0;
const T* const top_diff_slice =
top_diff + (n * channels + c) * pooled_height * pooled_width;
const T* const top_data_slice =
top_data + (n * channels + c) * pooled_height * pooled_width;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
int hstart = ph * stride_h - pad_t;
int wstart = pw * stride_w - pad_l;
hstart = max(hstart, 0);
wstart = max(wstart, 0);
gradient += top_diff_slice[ph * pooled_width + pw] *
bottom_data[index] * pow(abs(bottom_data[index]), p - 2) /
pow(top_data_slice[ph * pooled_width + pw], p - 1);
}
}
bottom_diff[index] = gradient;
}
}
template <typename T>
__global__ void LpPoolBackwardNHWC(
const int nthreads,
const T* const top_diff,
const T* const top_data,
const T* const bottom_data,
const int height,
const int width,
const int channels,
const int pooled_height,
const int pooled_width,
const int kernel_h,
const int kernel_w,
const int stride_h,
const int stride_w,
const int pad_t,
const int pad_l,
T* const bottom_diff,
const T p) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
// find out the local index
// find out the local offset
const int c = index % channels;
const int w = index / channels % width + pad_l;
const int h = (index / channels / width) % height + pad_t;
const int n = index / channels / width / height;
const int phstart = (h < kernel_h) ? 0 : (h - kernel_h) / stride_h + 1;
const int phend = min(h / stride_h + 1, pooled_height);
const int pwstart = (w < kernel_w) ? 0 : (w - kernel_w) / stride_w + 1;
const int pwend = min(w / stride_w + 1, pooled_width);
T gradient = 0;
const T* const top_diff_slice =
top_diff + n * pooled_height * pooled_width * channels + c;
const T* const top_data_slice =
top_data + n * pooled_height * pooled_width * channels + c;
for (int ph = phstart; ph < phend; ++ph) {
for (int pw = pwstart; pw < pwend; ++pw) {
// figure out the pooling size
gradient += top_diff_slice[(ph * pooled_width + pw) * channels] *
bottom_data[index] * pow(abs(bottom_data[index]), p - 2) /
pow(top_data_slice[(ph * pooled_width + pw) * channels], p - 1);
}
}
bottom_diff[index] = gradient;
}
}
} // namespace
template <>
bool PoolOp<float, CUDAContext, LpPoolFunctor>::RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto* Y = Output(0);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(1));
int output_size = Y->numel();
LpPoolForwardNCHW<float>
<<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
X.data<float>(),
X.dim32(1),
X.dim32(2),
X.dim32(3),
Y->dim32(2),
Y->dim32(3),
kernel_h(),
kernel_w(),
stride_h(),
stride_w(),
pad_t(),
pad_l(),
Y->template mutable_data<float>(),
OperatorBase::GetSingleArgument<float>("p", 2.0));
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool PoolOp<float, CUDAContext, LpPoolFunctor>::RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto* Y = Output(0);
ConvPoolOpBase<CUDAContext>::SetOutputSize(X, Y, X.dim32(3));
int output_size = Y->numel();
LpPoolForwardNHWC<float>
<<<CAFFE_GET_BLOCKS(output_size),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
output_size,
X.data<float>(),
X.dim32(1),
X.dim32(2),
X.dim32(3),
Y->dim32(1),
Y->dim32(2),
kernel_h(),
kernel_w(),
stride_h(),
stride_w(),
pad_t(),
pad_l(),
Y->template mutable_data<float>(),
OperatorBase::GetSingleArgument<float>("p", 2.0));
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool PoolGradientOp<float, CUDAContext, LpPoolFunctor>::
RunOnDeviceWithOrderNCHW() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
CAFFE_ENFORCE_EQ(dY.dim(), 4);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(2), X.dim32(3)});
LpPoolBackwardNCHW<float>
<<<CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(),
dY.data<float>(),
Y.data<float>(),
X.data<float>(),
X.dim32(1),
X.dim32(2),
X.dim32(3),
dY.dim32(2),
dY.dim32(3),
kernel_h(),
kernel_w(),
stride_h(),
stride_w(),
pad_t(),
pad_l(),
dX->template mutable_data<float>(),
OperatorBase::GetSingleArgument<float>("p", 2.0));
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
template <>
bool PoolGradientOp<float, CUDAContext, LpPoolFunctor>::
RunOnDeviceWithOrderNHWC() {
auto& X = Input(0);
auto& Y = Input(1);
auto& dY = Input(2);
CAFFE_ENFORCE_EQ(dY.dim(), 4);
auto* dX = Output(0, X.sizes(), at::dtype<float>());
ConvPoolOpBase<CUDAContext>::ComputePads({X.dim32(1), X.dim32(2)});
LpPoolBackwardNHWC<float>
<<<CAFFE_GET_BLOCKS(X.numel()),
CAFFE_CUDA_NUM_THREADS,
0,
context_.cuda_stream()>>>(
X.numel(),
dY.data<float>(),
Y.data<float>(),
X.data<float>(),
X.dim32(1),
X.dim32(2),
X.dim32(3),
dY.dim32(1),
dY.dim32(2),
kernel_h(),
kernel_w(),
stride_h(),
stride_w(),
pad_t(),
pad_l(),
dX->template mutable_data<float>(),
OperatorBase::GetSingleArgument<float>("p", 2.0));
C10_CUDA_KERNEL_LAUNCH_CHECK();
return true;
}
REGISTER_CUDA_OPERATOR(LpPool, PoolOp<float, CUDAContext, LpPoolFunctor>);
REGISTER_CUDA_OPERATOR(
LpPoolGradient,
PoolGradientOp<float, CUDAContext, LpPoolFunctor>);
}
|
1861d6d67c8f80fd04eca15cefe6c28eebc7a360.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <algorithm>
#include "../../include/cuda/nndct_fix_kernels.cuh"
#include "../../include/cuda/nndct_cu_utils.h"
#include "../../include/cuda/nndct_cuda_math.h"
#ifdef __HIP_PLATFORM_AMD__
#define CUDART_INF_F __int_as_float(0x7f800000)
#define CUDART_INF __longlong_as_double(0x7ff0000000000000ULL)
#else
#include <math_constants.h>
#endif
template<typename Dtype>
__global__ static void _set(const int N,
Dtype* data,
Dtype val){
NNDCT_KERNEL_LOOP(index, N){
data[index] = val;
}
}
template<typename Dtype>
__global__ static void _scale_inplace(const int N,
Dtype* data,
Dtype scale){
NNDCT_KERNEL_LOOP(index, N){
data[index] *= scale;
}
}
template<typename Dtype>
__global__ static void _scale(const int N,
const Dtype* src,
Dtype* dst,
Dtype scale){
NNDCT_KERNEL_LOOP(index, N){
dst[index] = scale * src[index];
}
}
template<typename Dtype>
__global__ static void _sub(const int N,
const Dtype* src,
Dtype* dst){
NNDCT_KERNEL_LOOP(index, N){
dst[index] = src[index] - dst[index];
}
}
template<typename Dtype>
__global__ static void _pow(const int N,
Dtype* data,
Dtype power){
NNDCT_KERNEL_LOOP(index, N){
data[index] = pow(data[index], power);
}
}
//from kaldi, reduction without device handle
enum EnumTransformReduce {
SUMAB, SUM, MAX, MIN, LINFNORM, L2NORM, L1NORM, L0NORM, LPNORM
};
template<EnumTransformReduce TransReduceType, typename Dtype>
struct TransReduceOp {
__forceinline__
__device__ Dtype InitValue() const {
return Dtype(0);
}
__forceinline__
__device__ Dtype Transform(const Dtype& x) const {
return Dtype(0);
}
__forceinline__
__device__ Dtype Reduce(const Dtype& a, const Dtype& b) const {
return Dtype(0);
}
__forceinline__
__device__ Dtype PostReduce(const Dtype& x, const Dtype& output) const {
return Dtype(0);
}
};
template<typename Dtype>
struct TransReduceOp<SUM, Dtype> {
__forceinline__
__device__ Dtype InitValue() const {
return Dtype(0);
}
__forceinline__
__device__ Dtype Transform(const Dtype& x) const {
return x;
}
__forceinline__
__device__ Dtype Reduce(const Dtype& a, const Dtype& b) const {
return a + b;
}
__forceinline__
__device__ Dtype PostReduce(const Dtype& x, const Dtype& output) const {
return x;
}
};
template<typename Dtype>
struct TransReduceOp<MAX, Dtype> {
__forceinline__
__device__ Dtype InitValue() const {
return sizeof(Dtype) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
}
__forceinline__
__device__ Dtype Transform(const Dtype& x) const {
return x;
}
__forceinline__
__device__ Dtype Reduce(const Dtype& a, const Dtype& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Dtype PostReduce(const Dtype& x, const Dtype& output) const {
return x;
}
};
template<typename Dtype>
struct TransReduceOp<MIN, Dtype> {
__forceinline__
__device__ Dtype InitValue() const {
return sizeof(Dtype) == sizeof(float) ? CUDART_INF_F : CUDART_INF;
}
__forceinline__
__device__ Dtype Transform(const Dtype& x) const {
return x;
}
__forceinline__
__device__ Dtype Reduce(const Dtype& a, const Dtype& b) const {
return min(a, b);
}
__forceinline__
__device__ Dtype PostReduce(const Dtype& x, const Dtype& output) const {
return x;
}
};
template<EnumTransformReduce TransReduceType, typename Dtype>
__global__
static void _vec_transform_reduce(const int dim,const Dtype* src, Dtype* dst,
const TransReduceOp<TransReduceType, Dtype> op) {
__shared__ Dtype sdata[CU1DBLOCK];
Dtype tdata = op.InitValue();
const int tid = threadIdx.x;
const int vec_len = dim;
const int grid_stride = gridDim.x * blockDim.x;
int i = (blockIdx.x * blockDim.x + tid);
// Grid reduce. Loop over the whole vector v.
for (; i < vec_len; i += grid_stride) {
tdata = op.Reduce(tdata, op.Transform(src[i]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
__syncthreads();
}
// Reduce last warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
#ifdef __HIP_PLATFORM_AMD__
__threadfence_block();
#endif
}
}
// Output to vector dst.
if (tid == 0)
dst[blockIdx.x] = op.PostReduce(sdata[0], dst[blockIdx.x]);
}
template<EnumTransformReduce TransReduceType, typename Dtype>
__global__
static void _vec_transform_reduce_inplace(const int dim,Dtype* data,
const TransReduceOp<TransReduceType, Dtype> op) {
__shared__ Dtype sdata[CU1DBLOCK];
Dtype tdata = op.InitValue();
const int tid = threadIdx.x;
const int vec_len = dim;
const int grid_stride = gridDim.x * blockDim.x;
int i = (blockIdx.x * blockDim.x + tid);
// Grid reduce. Loop over the whole vector v.
for (; i < vec_len; i += grid_stride) {
tdata = op.Reduce(tdata, op.Transform(data[i]));
data[i]=0;
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
__syncthreads();
}
// Reduce last warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
#ifdef __HIP_PLATFORM_AMD__
__threadfence_block();
#endif
}
}
// Output to vector dst.
if (tid == 0){
data[blockIdx.x] = op.PostReduce(sdata[0], data[blockIdx.x]);
}
}
template<EnumTransformReduce TransReduceType, typename Dtype>
__global__ static void _single_reduce(const int dim, Dtype* dst,
const TransReduceOp<TransReduceType, Dtype> op){
for(int i = 1; i < dim; i++){
dst[0] = op.Reduce(dst[0], dst[i]);
dst[i] = 0;
}
}
template<typename Dtype>
void cuda_set(const int N, Dtype* data, Dtype val){
hipLaunchKernelGGL(( _set), dim3(NNDCT_GET_BLOCKS(N)), dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N, data, val);
}
template
void cuda_set<float>(const int N, float* data, float val);
template
void cuda_set<double>(const int N, double* data, double val);
template<typename Dtype>
void cuda_scale_inplace(const int N, Dtype* data, Dtype scale){
hipLaunchKernelGGL(( _scale_inplace), dim3(NNDCT_GET_BLOCKS(N)), dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N, data, scale);
}
template
void cuda_scale_inplace<float>(const int N, float* data, float scale);
template
void cuda_scale_inplace<double>(const int N, double* data, double scale);
template<typename Dtype>
void cuda_scale(const int N, const Dtype* src, Dtype* dst, Dtype scale){
hipLaunchKernelGGL(( _scale), dim3(NNDCT_GET_BLOCKS(N)), dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N, src, dst, scale);
}
template
void cuda_scale<float>(const int N, const float* src, float* dst, float scale);
template
void cuda_scale<double>(const int N, const double* src, double* dst, double scale);
template<typename Dtype>
void cuda_pow(const int N, Dtype* data, Dtype pow){
hipLaunchKernelGGL(( _pow), dim3(NNDCT_GET_BLOCKS(N)), dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N, data, pow);
}
template
void cuda_pow<float>(const int N, float* data, float pow);
template
void cuda_pow<double>(const int N, double* data, double pow);
template<typename Dtype>
void cuda_max(const int N, const Dtype* src, Dtype* dst){
int dimGrid=NNDCT_GET_BLOCKS1D(N);
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(dimGrid), dim3(CU1DBLOCK), 0, 0,
N, src, dst, TransReduceOp<MAX, Dtype>());
hipLaunchKernelGGL(( _single_reduce), dim3(1), dim3(1), 0, 0,
dimGrid, dst, TransReduceOp<MAX, Dtype>());
}
template
void cuda_max<float>(const int N, const float* src, float* dst);
template
void cuda_max<double>(const int N, const double* src, double* dst);
template<typename Dtype>
void cuda_min(const int N, const Dtype* src, Dtype* dst){
int dimGrid=NNDCT_GET_BLOCKS1D(N);
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(dimGrid), dim3(CU1DBLOCK), 0, 0,
N, src, dst, TransReduceOp<MIN, Dtype>());
hipLaunchKernelGGL(( _single_reduce), dim3(1), dim3(1), 0, 0,
dimGrid, dst, TransReduceOp<MIN, Dtype>());
}
template
void cuda_min<float>(const int N, const float* src, float* dst);
template
void cuda_min<double>(const int N, const double* src, double* dst);
template<typename Dtype>
void cuda_sum(const int N, const Dtype* src, Dtype* dst){
int dimGrid=NNDCT_GET_BLOCKS1D(N);
hipLaunchKernelGGL(( _vec_transform_reduce), dim3(dimGrid),dim3(CU1DBLOCK), 0, 0,
N, src, dst, TransReduceOp<SUM, Dtype>());
hipLaunchKernelGGL(( _single_reduce), dim3(1), dim3(1), 0, 0,
dimGrid, dst, TransReduceOp<SUM, Dtype>());
}
template
void cuda_sum<float>(const int N, const float* src, float* dst);
template
void cuda_sum<double>(const int N, const double* src, double* dst);
template<typename Dtype>
void cuda_sum_inplace(const int N, Dtype* data){
int dimGrid = NNDCT_GET_BLOCKS1D(N);
hipLaunchKernelGGL(( _vec_transform_reduce_inplace), dim3(dimGrid), dim3(CU1DBLOCK), 0, 0,
N, data, TransReduceOp<SUM, Dtype>());
hipLaunchKernelGGL(( _single_reduce), dim3(1), dim3(1), 0, 0,
dimGrid, data, TransReduceOp<SUM, Dtype>());
}
template
void cuda_sum_inplace<float>(const int N, float* data);
template
void cuda_sum_inplace<double>(const int N, double* data);
template<typename Dtype>
void cuda_sub(const int N, const Dtype* src, Dtype* dst){
hipLaunchKernelGGL(( _sub), dim3(NNDCT_GET_BLOCKS(N)), dim3(NNDCT_CUDA_NUM_THREADS), 0, 0,
N, src, dst);
}
template
void cuda_sub<float>(const int N, const float* src, float* dst);
template
void cuda_sub<double>(const int N, const double* src, double* dst);
|
1861d6d67c8f80fd04eca15cefe6c28eebc7a360.cu
|
/*
* Copyright 2019 Xilinx Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <math.h>
#include <algorithm>
#include "../../include/cuda/nndct_fix_kernels.cuh"
#include "../../include/cuda/nndct_cu_utils.h"
#include "../../include/cuda/nndct_cuda_math.h"
#ifdef __HIP_PLATFORM_AMD__
#define CUDART_INF_F __int_as_float(0x7f800000)
#define CUDART_INF __longlong_as_double(0x7ff0000000000000ULL)
#else
#include <math_constants.h>
#endif
template<typename Dtype>
__global__ static void _set(const int N,
Dtype* data,
Dtype val){
NNDCT_KERNEL_LOOP(index, N){
data[index] = val;
}
}
template<typename Dtype>
__global__ static void _scale_inplace(const int N,
Dtype* data,
Dtype scale){
NNDCT_KERNEL_LOOP(index, N){
data[index] *= scale;
}
}
template<typename Dtype>
__global__ static void _scale(const int N,
const Dtype* src,
Dtype* dst,
Dtype scale){
NNDCT_KERNEL_LOOP(index, N){
dst[index] = scale * src[index];
}
}
template<typename Dtype>
__global__ static void _sub(const int N,
const Dtype* src,
Dtype* dst){
NNDCT_KERNEL_LOOP(index, N){
dst[index] = src[index] - dst[index];
}
}
template<typename Dtype>
__global__ static void _pow(const int N,
Dtype* data,
Dtype power){
NNDCT_KERNEL_LOOP(index, N){
data[index] = pow(data[index], power);
}
}
//from kaldi, reduction without device handle
enum EnumTransformReduce {
SUMAB, SUM, MAX, MIN, LINFNORM, L2NORM, L1NORM, L0NORM, LPNORM
};
template<EnumTransformReduce TransReduceType, typename Dtype>
struct TransReduceOp {
__forceinline__
__device__ Dtype InitValue() const {
return Dtype(0);
}
__forceinline__
__device__ Dtype Transform(const Dtype& x) const {
return Dtype(0);
}
__forceinline__
__device__ Dtype Reduce(const Dtype& a, const Dtype& b) const {
return Dtype(0);
}
__forceinline__
__device__ Dtype PostReduce(const Dtype& x, const Dtype& output) const {
return Dtype(0);
}
};
template<typename Dtype>
struct TransReduceOp<SUM, Dtype> {
__forceinline__
__device__ Dtype InitValue() const {
return Dtype(0);
}
__forceinline__
__device__ Dtype Transform(const Dtype& x) const {
return x;
}
__forceinline__
__device__ Dtype Reduce(const Dtype& a, const Dtype& b) const {
return a + b;
}
__forceinline__
__device__ Dtype PostReduce(const Dtype& x, const Dtype& output) const {
return x;
}
};
template<typename Dtype>
struct TransReduceOp<MAX, Dtype> {
__forceinline__
__device__ Dtype InitValue() const {
return sizeof(Dtype) == sizeof(float) ? -CUDART_INF_F : -CUDART_INF;
}
__forceinline__
__device__ Dtype Transform(const Dtype& x) const {
return x;
}
__forceinline__
__device__ Dtype Reduce(const Dtype& a, const Dtype& b) const {
return fmax(a, b);
}
__forceinline__
__device__ Dtype PostReduce(const Dtype& x, const Dtype& output) const {
return x;
}
};
template<typename Dtype>
struct TransReduceOp<MIN, Dtype> {
__forceinline__
__device__ Dtype InitValue() const {
return sizeof(Dtype) == sizeof(float) ? CUDART_INF_F : CUDART_INF;
}
__forceinline__
__device__ Dtype Transform(const Dtype& x) const {
return x;
}
__forceinline__
__device__ Dtype Reduce(const Dtype& a, const Dtype& b) const {
return min(a, b);
}
__forceinline__
__device__ Dtype PostReduce(const Dtype& x, const Dtype& output) const {
return x;
}
};
template<EnumTransformReduce TransReduceType, typename Dtype>
__global__
static void _vec_transform_reduce(const int dim,const Dtype* src, Dtype* dst,
const TransReduceOp<TransReduceType, Dtype> op) {
__shared__ Dtype sdata[CU1DBLOCK];
Dtype tdata = op.InitValue();
const int tid = threadIdx.x;
const int vec_len = dim;
const int grid_stride = gridDim.x * blockDim.x;
int i = (blockIdx.x * blockDim.x + tid);
// Grid reduce. Loop over the whole vector v.
for (; i < vec_len; i += grid_stride) {
tdata = op.Reduce(tdata, op.Transform(src[i]));
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
__syncthreads();
}
// Reduce last warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
#ifdef __HIP_PLATFORM_AMD__
__threadfence_block();
#endif
}
}
// Output to vector dst.
if (tid == 0)
dst[blockIdx.x] = op.PostReduce(sdata[0], dst[blockIdx.x]);
}
template<EnumTransformReduce TransReduceType, typename Dtype>
__global__
static void _vec_transform_reduce_inplace(const int dim,Dtype* data,
const TransReduceOp<TransReduceType, Dtype> op) {
__shared__ Dtype sdata[CU1DBLOCK];
Dtype tdata = op.InitValue();
const int tid = threadIdx.x;
const int vec_len = dim;
const int grid_stride = gridDim.x * blockDim.x;
int i = (blockIdx.x * blockDim.x + tid);
// Grid reduce. Loop over the whole vector v.
for (; i < vec_len; i += grid_stride) {
tdata = op.Reduce(tdata, op.Transform(data[i]));
data[i]=0;
}
sdata[tid] = tdata;
__syncthreads();
// Tree reduce
# pragma unroll
for (int shift = CU1DBLOCK / 2; shift > warpSize; shift >>= 1) {
if (tid < shift) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
}
__syncthreads();
}
// Reduce last warp.
if (tid < warpSize) {
for (int shift = warpSize; shift > 0; shift >>= 1) {
sdata[tid] = op.Reduce(sdata[tid], sdata[tid + shift]);
#ifdef __HIP_PLATFORM_AMD__
__threadfence_block();
#endif
}
}
// Output to vector dst.
if (tid == 0){
data[blockIdx.x] = op.PostReduce(sdata[0], data[blockIdx.x]);
}
}
template<EnumTransformReduce TransReduceType, typename Dtype>
__global__ static void _single_reduce(const int dim, Dtype* dst,
const TransReduceOp<TransReduceType, Dtype> op){
for(int i = 1; i < dim; i++){
dst[0] = op.Reduce(dst[0], dst[i]);
dst[i] = 0;
}
}
template<typename Dtype>
void cuda_set(const int N, Dtype* data, Dtype val){
_set<<<NNDCT_GET_BLOCKS(N), NNDCT_CUDA_NUM_THREADS>>>(
N, data, val);
}
template
void cuda_set<float>(const int N, float* data, float val);
template
void cuda_set<double>(const int N, double* data, double val);
template<typename Dtype>
void cuda_scale_inplace(const int N, Dtype* data, Dtype scale){
_scale_inplace<<<NNDCT_GET_BLOCKS(N), NNDCT_CUDA_NUM_THREADS>>>(
N, data, scale);
}
template
void cuda_scale_inplace<float>(const int N, float* data, float scale);
template
void cuda_scale_inplace<double>(const int N, double* data, double scale);
template<typename Dtype>
void cuda_scale(const int N, const Dtype* src, Dtype* dst, Dtype scale){
_scale<<<NNDCT_GET_BLOCKS(N), NNDCT_CUDA_NUM_THREADS>>>(
N, src, dst, scale);
}
template
void cuda_scale<float>(const int N, const float* src, float* dst, float scale);
template
void cuda_scale<double>(const int N, const double* src, double* dst, double scale);
template<typename Dtype>
void cuda_pow(const int N, Dtype* data, Dtype pow){
_pow<<<NNDCT_GET_BLOCKS(N), NNDCT_CUDA_NUM_THREADS>>>(
N, data, pow);
}
template
void cuda_pow<float>(const int N, float* data, float pow);
template
void cuda_pow<double>(const int N, double* data, double pow);
template<typename Dtype>
void cuda_max(const int N, const Dtype* src, Dtype* dst){
int dimGrid=NNDCT_GET_BLOCKS1D(N);
_vec_transform_reduce<<<dimGrid, CU1DBLOCK>>>(
N, src, dst, TransReduceOp<MAX, Dtype>());
_single_reduce<<<1, 1>>>(
dimGrid, dst, TransReduceOp<MAX, Dtype>());
}
template
void cuda_max<float>(const int N, const float* src, float* dst);
template
void cuda_max<double>(const int N, const double* src, double* dst);
template<typename Dtype>
void cuda_min(const int N, const Dtype* src, Dtype* dst){
int dimGrid=NNDCT_GET_BLOCKS1D(N);
_vec_transform_reduce<<<dimGrid, CU1DBLOCK>>>(
N, src, dst, TransReduceOp<MIN, Dtype>());
_single_reduce<<<1, 1>>>(
dimGrid, dst, TransReduceOp<MIN, Dtype>());
}
template
void cuda_min<float>(const int N, const float* src, float* dst);
template
void cuda_min<double>(const int N, const double* src, double* dst);
template<typename Dtype>
void cuda_sum(const int N, const Dtype* src, Dtype* dst){
int dimGrid=NNDCT_GET_BLOCKS1D(N);
_vec_transform_reduce<<<dimGrid,CU1DBLOCK>>>(
N, src, dst, TransReduceOp<SUM, Dtype>());
_single_reduce<<<1, 1>>>(
dimGrid, dst, TransReduceOp<SUM, Dtype>());
}
template
void cuda_sum<float>(const int N, const float* src, float* dst);
template
void cuda_sum<double>(const int N, const double* src, double* dst);
template<typename Dtype>
void cuda_sum_inplace(const int N, Dtype* data){
int dimGrid = NNDCT_GET_BLOCKS1D(N);
_vec_transform_reduce_inplace<<<dimGrid, CU1DBLOCK>>>(
N, data, TransReduceOp<SUM, Dtype>());
_single_reduce<<<1, 1>>>(
dimGrid, data, TransReduceOp<SUM, Dtype>());
}
template
void cuda_sum_inplace<float>(const int N, float* data);
template
void cuda_sum_inplace<double>(const int N, double* data);
template<typename Dtype>
void cuda_sub(const int N, const Dtype* src, Dtype* dst){
_sub<<<NNDCT_GET_BLOCKS(N), NNDCT_CUDA_NUM_THREADS>>>(
N, src, dst);
}
template
void cuda_sub<float>(const int N, const float* src, float* dst);
template
void cuda_sub<double>(const int N, const double* src, double* dst);
|
00189fc96e206d68c3d6e9442c9cbbf2c2898eba.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Zhenyuan Shen created 2015/11/09
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <fstream>
#include <iostream>
#include <cassert>
#include <fstream>
#include <cstring>
#include <ccv_image.h>
#define HISTOGRAM_LENGTH 256
#define BLOCK_SIZE 256
// convert data from float to unsigned char
__global__ void cvtFltToUchar(float* inputImg, unsigned char* outputImg, int len)
{
int t = blockIdx.x*blockDim.x+threadIdx.x;
if(t<len)
{
outputImg[t] = (unsigned char)(255*inputImg[t]);
}
/* test code 1
if (t/3<10)
printf("%d-th thread: %d\n", t, outputImg[t]);
*/
}
// convert color image to gray image
__global__ void cvtClrToGray(unsigned char* outputImg, unsigned char* inputImg, int width, int height, int channels)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int row = blockIdx.y*blockDim.y+threadIdx.y;
int idx = row*width+col;
if(row < height && col < width && channels == 3)
{
outputImg[idx] = 0.21*inputImg[idx*3] + 0.71*inputImg[idx*3+1] + 0.07*inputImg[idx*3+2];
}
/* test code 2
if(idx<10)
{
printf("=%d\n",outputImg[idx]);
}
*/
}
// compute the histogram
__global__ void histo_kernel(unsigned char* buffer, long size, unsigned int* histo)
{
__shared__ unsigned int histo_private[256];
if(threadIdx.x < 256)
histo_private[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
// stride is total number of threads
int stride = blockDim.x * gridDim.x;
while(i < size)
{
atomicAdd(&(histo_private[buffer[i]]), 1);
i += stride;
}
// wait for all other threads in the block to finish
__syncthreads();
if(threadIdx.x < 256)
{
atomicAdd(&(histo[threadIdx.x]), histo_private[threadIdx.x]);
}
}
//
__device__ float p(unsigned int x, int width, int height)
{
return float(x) / (width * height);
}
__global__ void scan(unsigned int * input, float * output, int len, int width, int height)
{
__shared__ int XY[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockDim.x*blockIdx.x;
assert(BLOCK_SIZE == blockDim.x);
XY[t] = (start+t < len)? input[start + t] : 0;
XY[t+blockDim.x] = (start+blockDim.x+t < len)? input[start + blockDim.x + t] : 0;
__syncthreads();
// Reduction Phase
for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
int index = (threadIdx.x+1)*stride*2 - 1;
assert(index >= stride); // SZY add
if(index < 2*BLOCK_SIZE)
XY[index] += XY[index-stride];
__syncthreads();
}
// Post Reduction Phase
for (int stride = BLOCK_SIZE/2; stride > 0; stride /= 2) {
__syncthreads();
int index = (threadIdx.x+1)*stride*2 - 1;
if(index+stride < 2*BLOCK_SIZE)
{
XY[index + stride] += XY[index];
}
}
__syncthreads();
//if (i < len) output[i] = XY[threadIdx.x];
// Recording the values to output
if(start < len){
output[start+t] = p(XY[t], width, height);
}
if((start + BLOCK_SIZE) < len){
output[start + BLOCK_SIZE + t] = p(XY[t + BLOCK_SIZE], width, height);
}
}
/* Redundant for this case, since the minimum of CDF(monotonically increasing) is the first element input[0] */
/*
__global__ void minInCDF(float * input, float * output, int len) {
__shared__ int partialMin[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockDim.x*blockIdx.x;
assert(BLOCK_SIZE == blockDim.x);
partialMin[t] = (start+t < len)? input[start + t] : 0;
partialMin[t+blockDim.x] = (start+blockDim.x+t < len)? input[start + blockDim.x + t] : 0;
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2)
{
__syncthreads();
if (t < stride)
partialMin[t] = (partialMin[t] < partialMin[t+stride]) ? partialMin[t] : partialMin[t+stride];
}
output[blockIdx.x] = partialMin[0];
}
*/
__device__ float clamp(float x, float start, float end)
{
return min(max(x, start), end);
}
__global__ void histEqual(float * outputCDF, int len)
{
int t = blockIdx.x*blockDim.x+threadIdx.x;
float minCDF = outputCDF[0]; // compute the minimum of outputCDF
if(t<len)
{
//printf("Before: %d-th thread: %f\n", t, outputCDF[t]);
outputCDF[t] = clamp(255*(outputCDF[t]-minCDF)/(1-minCDF), 0.0, 255.0);
//printf("After: %d-th thread: %f\n", t, outputCDF[t]);
}
__syncthreads();
outputCDF[0] = 0.0f;
// test code 6
/*
if (t<len)
printf("%d-th thread: %f\n", t, outputCDF[t]);
printf("The minimum of CDF is %f\n", minCDF);
*/
}
__global__ void applyEqualAndCastBack(float* outputImg, unsigned char* inputImg, float* inputCDF, int len)
{
int t = blockIdx.x*blockDim.x+threadIdx.x;
if(t<len)
{
assert(inputImg[t] < HISTOGRAM_LENGTH);
outputImg[t] = (float)(inputCDF[inputImg[t]]/255.0);
}
}
//------------------------------------------------------------------------------------
// Main function
//------------------------------------------------------------------------------------
int main(int argc, char ** argv) {
if(argc != )
int imageWidth;
int imageHeight;
int imageChannels;
ccvImage inputImage;
ccvImage outputImage;
float * hostInputImageData;
float * hostOutputImageData;
const char * inputImageFile;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
wbTime_stop(Generic, "Importing data and creating memory on host");
//@@ insert code here
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
for(int i=0; i<imageWidth; i++)
for(int j=0; j<imageHeight; j++)
for(int c=0; c<imageChannels; c++)
{
if ((i*imageWidth+j)<10)
wbLog(TRACE, "The value of C at i= ",i,", j= ",j, " is: ",hostInputImageData[(i*imageWidth+j)*3+c]);
}
// 1. Cast the image from float to unsigned char
dim3 dimBlock(BLOCK_SIZE,1);
dim3 dimGrid((imageWidth*imageHeight*imageChannels-1)/BLOCK_SIZE+1,1);
float * deviceInputImageData;
unsigned char * deviceOutputImageData;
hipMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
hipMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(unsigned char));
hipMemcpy(deviceInputImageData,
hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cvtFltToUchar), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceInputImageData, deviceOutputImageData, imageWidth*imageHeight*imageChannels);
hipDeviceSynchronize();
/*
hipMemcpy(hostOutputImageData,
deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(unsigned char),
hipMemcpyDeviceToHost);
*/
/*
// test code 1
unsigned char *testOutput1 = new unsigned char[imageWidth * imageHeight * imageChannels];
hipMemcpy(testOutput1,
deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(unsigned char),
hipMemcpyDeviceToHost);
for(int i=0; i<imageWidth; i++)
for(int j=0; j<imageHeight; j++)
for(int c=0; c<imageChannels; c++)
{
if ((i*imageWidth+j)<10)
printf("The value of C at i= %d, j= %d is: %d\n",i,j,testOutput1[(i*imageWidth+j)*3+c]);
}
delete[] testOutput1;
// end of test code 1
*/
//--------------------------------------------------------------------------------
// 2. Convert the image from RGB to GrayScale
dimBlock = dim3(BLOCK_SIZE,BLOCK_SIZE);
dimGrid = dim3((imageWidth-1)/BLOCK_SIZE+1,(imageHeight-1)/BLOCK_SIZE+1);
unsigned char * deviceOutputImgGray;
hipMalloc((void **) &deviceOutputImgGray, imageWidth * imageHeight * sizeof(unsigned char));
hipLaunchKernelGGL(( cvtClrToGray), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceOutputImgGray, deviceOutputImageData, imageWidth, imageHeight, imageChannels);
hipDeviceSynchronize();
/*
// test code 2
unsigned char *testOutput2 = new unsigned char[imageWidth * imageHeight];
hipMemcpy(testOutput2,
deviceOutputImgGray,
imageWidth * imageHeight * sizeof(unsigned char),
hipMemcpyDeviceToHost);
for(int i=0; i<imageWidth; i++)
for(int j=0; j<imageHeight; j++)
{
if ((i*imageWidth+j)<10)
printf("The value of pixel at i= %d, j= %d is: %d\n",i,j,testOutput2[i*imageWidth+j]);
}
delete[] testOutput2;
// end of test code 2
*/
//--------------------------------------------------------------------------------
// 3. Compute the histogram of grayImage
unsigned int* deviceHist;
hipMalloc((void **) &deviceHist, HISTOGRAM_LENGTH*sizeof(unsigned int));
dimBlock = dim3(HISTOGRAM_LENGTH);
dimGrid = dim3((imageWidth*imageHeight-1)/HISTOGRAM_LENGTH+1);
hipLaunchKernelGGL(( histo_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceOutputImgGray, imageWidth*imageHeight, deviceHist);
hipDeviceSynchronize();
/*
// test code 3
unsigned int hostHist[HISTOGRAM_LENGTH];
hipMemcpy(hostHist,
deviceHist,
HISTOGRAM_LENGTH * sizeof(unsigned int),
hipMemcpyDeviceToHost);
for(int i=0; i<HISTOGRAM_LENGTH; i++)
printf("The %d-th value of the hist is: %d\n", i, hostHist[i]);
// end of test code 3
*/
//--------------------------------------------------------------------------------
// 4. Compute the Cumulative Distribution Function of histogram
dimBlock = dim3(BLOCK_SIZE,1);
dimGrid = dim3((HISTOGRAM_LENGTH-1)/(2*BLOCK_SIZE)+1,1); // dimGrid = (1,1)
// Thus, no need to combine the scan results of each thread block (covering 2*BLOCK_SIZE outputs)
float * deviceCDF;
hipMalloc((void **) &deviceCDF, HISTOGRAM_LENGTH*sizeof(float));
hipLaunchKernelGGL(( scan), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceHist, deviceCDF, HISTOGRAM_LENGTH, imageWidth, imageHeight);
hipDeviceSynchronize();
/*
// test code 4
float hostCDF[HISTOGRAM_LENGTH];
hipMemcpy(hostCDF,
deviceCDF,
HISTOGRAM_LENGTH * sizeof(float),
hipMemcpyDeviceToHost);
for(int i=0; i<HISTOGRAM_LENGTH; i++)
printf("The %d-th value of the hist is: %f\n", i, hostCDF[i]);
// end of test code 4
*/
//--------------------------------------------------------------------------------
// 5. Compute the minimum value of the CDF
/* Redundant but ok */
/*
int numInputElements = HISTOGRAM_LENGTH;
int numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
dimBlock = dim3(BLOCK_SIZE,1);
dimGrid = dim3(numOutputElements,1);
float * devMinCDF;
float * hostMinCDF;
hipMalloc((void **) &devMinCDF, numOutputElements*sizeof(float));
hostMinCDF = (float*) malloc(numOutputElements * sizeof(float));
minInCDF<<<dimGrid, dimBlock>>>(deviceCDF, devMinCDF, numInputElements);
hipDeviceSynchronize();
hipMemcpy(hostMinCDF, devMinCDF, numOutputElements*sizeof(float), hipMemcpyDeviceToHost);
for (int ii = 1; ii < numOutputElements; ii++) {
if (hostMinCDF[0] == 0.0f)
break;
hostMinCDF[0] = (hostMinCDF[0]<hostMinCDF[ii]) ? hostMinCDF[0] : hostMinCDF[ii];
}
// test code 5
printf("The minimum of CDF is %f\n", hostMinCDF[0]);
// end of test code 5
*/
//--------------------------------------------------------------------------------
// 6. Define the histogram equalization function
dimBlock = dim3(HISTOGRAM_LENGTH,1);
dimGrid = dim3(1,1);
hipLaunchKernelGGL(( histEqual), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceCDF, HISTOGRAM_LENGTH);
hipDeviceSynchronize();
//--------------------------------------------------------------------------------
// 7. Apply the histogram equalization function and Cast back to float
dimBlock = dim3(BLOCK_SIZE,1);
dimGrid = dim3((imageWidth*imageHeight*imageChannels-1)/BLOCK_SIZE+1,1);
float * deviceOutputImgClr;
hipMalloc((void **) &deviceOutputImgClr, imageWidth*imageHeight*imageChannels*sizeof(float));
hipLaunchKernelGGL(( applyEqualAndCastBack), dim3(dimGrid), dim3(dimBlock), 0, 0, deviceOutputImgClr, deviceOutputImageData, deviceCDF, imageWidth*imageHeight*imageChannels);
hipDeviceSynchronize();
hipMemcpy(hostOutputImageData,
deviceOutputImgClr,
imageWidth * imageHeight * imageChannels * sizeof(float),
hipMemcpyDeviceToHost);
wbSolution(args, outputImage);
hipFree(deviceInputImageData);
hipFree(deviceOutputImageData);
hipFree(deviceOutputImgGray);
hipFree(deviceHist);
hipFree(deviceCDF);
hipFree(deviceOutputImgClr);
return 0;
}
|
00189fc96e206d68c3d6e9442c9cbbf2c2898eba.cu
|
/*
* Zhenyuan Shen created 2015/11/09
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <float.h>
#include <fstream>
#include <iostream>
#include <cassert>
#include <fstream>
#include <cstring>
#include <ccv_image.h>
#define HISTOGRAM_LENGTH 256
#define BLOCK_SIZE 256
// convert data from float to unsigned char
__global__ void cvtFltToUchar(float* inputImg, unsigned char* outputImg, int len)
{
int t = blockIdx.x*blockDim.x+threadIdx.x;
if(t<len)
{
outputImg[t] = (unsigned char)(255*inputImg[t]);
}
/* test code 1
if (t/3<10)
printf("%d-th thread: %d\n", t, outputImg[t]);
*/
}
// convert color image to gray image
__global__ void cvtClrToGray(unsigned char* outputImg, unsigned char* inputImg, int width, int height, int channels)
{
int col = blockIdx.x*blockDim.x+threadIdx.x;
int row = blockIdx.y*blockDim.y+threadIdx.y;
int idx = row*width+col;
if(row < height && col < width && channels == 3)
{
outputImg[idx] = 0.21*inputImg[idx*3] + 0.71*inputImg[idx*3+1] + 0.07*inputImg[idx*3+2];
}
/* test code 2
if(idx<10)
{
printf("=%d\n",outputImg[idx]);
}
*/
}
// compute the histogram
__global__ void histo_kernel(unsigned char* buffer, long size, unsigned int* histo)
{
__shared__ unsigned int histo_private[256];
if(threadIdx.x < 256)
histo_private[threadIdx.x] = 0;
__syncthreads();
int i = threadIdx.x + blockIdx.x * blockDim.x;
// stride is total number of threads
int stride = blockDim.x * gridDim.x;
while(i < size)
{
atomicAdd(&(histo_private[buffer[i]]), 1);
i += stride;
}
// wait for all other threads in the block to finish
__syncthreads();
if(threadIdx.x < 256)
{
atomicAdd(&(histo[threadIdx.x]), histo_private[threadIdx.x]);
}
}
//
__device__ float p(unsigned int x, int width, int height)
{
return float(x) / (width * height);
}
__global__ void scan(unsigned int * input, float * output, int len, int width, int height)
{
__shared__ int XY[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockDim.x*blockIdx.x;
assert(BLOCK_SIZE == blockDim.x);
XY[t] = (start+t < len)? input[start + t] : 0;
XY[t+blockDim.x] = (start+blockDim.x+t < len)? input[start + blockDim.x + t] : 0;
__syncthreads();
// Reduction Phase
for (int stride = 1; stride <= BLOCK_SIZE; stride *= 2) {
int index = (threadIdx.x+1)*stride*2 - 1;
assert(index >= stride); // SZY add
if(index < 2*BLOCK_SIZE)
XY[index] += XY[index-stride];
__syncthreads();
}
// Post Reduction Phase
for (int stride = BLOCK_SIZE/2; stride > 0; stride /= 2) {
__syncthreads();
int index = (threadIdx.x+1)*stride*2 - 1;
if(index+stride < 2*BLOCK_SIZE)
{
XY[index + stride] += XY[index];
}
}
__syncthreads();
//if (i < len) output[i] = XY[threadIdx.x];
// Recording the values to output
if(start < len){
output[start+t] = p(XY[t], width, height);
}
if((start + BLOCK_SIZE) < len){
output[start + BLOCK_SIZE + t] = p(XY[t + BLOCK_SIZE], width, height);
}
}
/* Redundant for this case, since the minimum of CDF(monotonically increasing) is the first element input[0] */
/*
__global__ void minInCDF(float * input, float * output, int len) {
__shared__ int partialMin[2*BLOCK_SIZE];
unsigned int t = threadIdx.x;
unsigned int start = 2*blockDim.x*blockIdx.x;
assert(BLOCK_SIZE == blockDim.x);
partialMin[t] = (start+t < len)? input[start + t] : 0;
partialMin[t+blockDim.x] = (start+blockDim.x+t < len)? input[start + blockDim.x + t] : 0;
for (unsigned int stride = blockDim.x; stride > 0; stride /= 2)
{
__syncthreads();
if (t < stride)
partialMin[t] = (partialMin[t] < partialMin[t+stride]) ? partialMin[t] : partialMin[t+stride];
}
output[blockIdx.x] = partialMin[0];
}
*/
__device__ float clamp(float x, float start, float end)
{
return min(max(x, start), end);
}
__global__ void histEqual(float * outputCDF, int len)
{
int t = blockIdx.x*blockDim.x+threadIdx.x;
float minCDF = outputCDF[0]; // compute the minimum of outputCDF
if(t<len)
{
//printf("Before: %d-th thread: %f\n", t, outputCDF[t]);
outputCDF[t] = clamp(255*(outputCDF[t]-minCDF)/(1-minCDF), 0.0, 255.0);
//printf("After: %d-th thread: %f\n", t, outputCDF[t]);
}
__syncthreads();
outputCDF[0] = 0.0f;
// test code 6
/*
if (t<len)
printf("%d-th thread: %f\n", t, outputCDF[t]);
printf("The minimum of CDF is %f\n", minCDF);
*/
}
__global__ void applyEqualAndCastBack(float* outputImg, unsigned char* inputImg, float* inputCDF, int len)
{
int t = blockIdx.x*blockDim.x+threadIdx.x;
if(t<len)
{
assert(inputImg[t] < HISTOGRAM_LENGTH);
outputImg[t] = (float)(inputCDF[inputImg[t]]/255.0);
}
}
//------------------------------------------------------------------------------------
// Main function
//------------------------------------------------------------------------------------
int main(int argc, char ** argv) {
if(argc != )
int imageWidth;
int imageHeight;
int imageChannels;
ccvImage inputImage;
ccvImage outputImage;
float * hostInputImageData;
float * hostOutputImageData;
const char * inputImageFile;
args = wbArg_read(argc, argv); /* parse the input arguments */
inputImageFile = wbArg_getInputFile(args, 0);
wbTime_start(Generic, "Importing data and creating memory on host");
inputImage = wbImport(inputImageFile);
imageWidth = wbImage_getWidth(inputImage);
imageHeight = wbImage_getHeight(inputImage);
imageChannels = wbImage_getChannels(inputImage);
outputImage = wbImage_new(imageWidth, imageHeight, imageChannels);
wbTime_stop(Generic, "Importing data and creating memory on host");
//@@ insert code here
hostInputImageData = wbImage_getData(inputImage);
hostOutputImageData = wbImage_getData(outputImage);
for(int i=0; i<imageWidth; i++)
for(int j=0; j<imageHeight; j++)
for(int c=0; c<imageChannels; c++)
{
if ((i*imageWidth+j)<10)
wbLog(TRACE, "The value of C at i= ",i,", j= ",j, " is: ",hostInputImageData[(i*imageWidth+j)*3+c]);
}
// 1. Cast the image from float to unsigned char
dim3 dimBlock(BLOCK_SIZE,1);
dim3 dimGrid((imageWidth*imageHeight*imageChannels-1)/BLOCK_SIZE+1,1);
float * deviceInputImageData;
unsigned char * deviceOutputImageData;
cudaMalloc((void **) &deviceInputImageData, imageWidth * imageHeight * imageChannels * sizeof(float));
cudaMalloc((void **) &deviceOutputImageData, imageWidth * imageHeight * imageChannels * sizeof(unsigned char));
cudaMemcpy(deviceInputImageData,
hostInputImageData,
imageWidth * imageHeight * imageChannels * sizeof(float),
cudaMemcpyHostToDevice);
cvtFltToUchar<<<dimGrid, dimBlock>>>(deviceInputImageData, deviceOutputImageData, imageWidth*imageHeight*imageChannels);
cudaDeviceSynchronize();
/*
cudaMemcpy(hostOutputImageData,
deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(unsigned char),
cudaMemcpyDeviceToHost);
*/
/*
// test code 1
unsigned char *testOutput1 = new unsigned char[imageWidth * imageHeight * imageChannels];
cudaMemcpy(testOutput1,
deviceOutputImageData,
imageWidth * imageHeight * imageChannels * sizeof(unsigned char),
cudaMemcpyDeviceToHost);
for(int i=0; i<imageWidth; i++)
for(int j=0; j<imageHeight; j++)
for(int c=0; c<imageChannels; c++)
{
if ((i*imageWidth+j)<10)
printf("The value of C at i= %d, j= %d is: %d\n",i,j,testOutput1[(i*imageWidth+j)*3+c]);
}
delete[] testOutput1;
// end of test code 1
*/
//--------------------------------------------------------------------------------
// 2. Convert the image from RGB to GrayScale
dimBlock = dim3(BLOCK_SIZE,BLOCK_SIZE);
dimGrid = dim3((imageWidth-1)/BLOCK_SIZE+1,(imageHeight-1)/BLOCK_SIZE+1);
unsigned char * deviceOutputImgGray;
cudaMalloc((void **) &deviceOutputImgGray, imageWidth * imageHeight * sizeof(unsigned char));
cvtClrToGray<<<dimGrid, dimBlock>>>(deviceOutputImgGray, deviceOutputImageData, imageWidth, imageHeight, imageChannels);
cudaDeviceSynchronize();
/*
// test code 2
unsigned char *testOutput2 = new unsigned char[imageWidth * imageHeight];
cudaMemcpy(testOutput2,
deviceOutputImgGray,
imageWidth * imageHeight * sizeof(unsigned char),
cudaMemcpyDeviceToHost);
for(int i=0; i<imageWidth; i++)
for(int j=0; j<imageHeight; j++)
{
if ((i*imageWidth+j)<10)
printf("The value of pixel at i= %d, j= %d is: %d\n",i,j,testOutput2[i*imageWidth+j]);
}
delete[] testOutput2;
// end of test code 2
*/
//--------------------------------------------------------------------------------
// 3. Compute the histogram of grayImage
unsigned int* deviceHist;
cudaMalloc((void **) &deviceHist, HISTOGRAM_LENGTH*sizeof(unsigned int));
dimBlock = dim3(HISTOGRAM_LENGTH);
dimGrid = dim3((imageWidth*imageHeight-1)/HISTOGRAM_LENGTH+1);
histo_kernel<<<dimGrid, dimBlock>>>(deviceOutputImgGray, imageWidth*imageHeight, deviceHist);
cudaDeviceSynchronize();
/*
// test code 3
unsigned int hostHist[HISTOGRAM_LENGTH];
cudaMemcpy(hostHist,
deviceHist,
HISTOGRAM_LENGTH * sizeof(unsigned int),
cudaMemcpyDeviceToHost);
for(int i=0; i<HISTOGRAM_LENGTH; i++)
printf("The %d-th value of the hist is: %d\n", i, hostHist[i]);
// end of test code 3
*/
//--------------------------------------------------------------------------------
// 4. Compute the Cumulative Distribution Function of histogram
dimBlock = dim3(BLOCK_SIZE,1);
dimGrid = dim3((HISTOGRAM_LENGTH-1)/(2*BLOCK_SIZE)+1,1); // dimGrid = (1,1)
// Thus, no need to combine the scan results of each thread block (covering 2*BLOCK_SIZE outputs)
float * deviceCDF;
cudaMalloc((void **) &deviceCDF, HISTOGRAM_LENGTH*sizeof(float));
scan<<<dimGrid, dimBlock>>>(deviceHist, deviceCDF, HISTOGRAM_LENGTH, imageWidth, imageHeight);
cudaDeviceSynchronize();
/*
// test code 4
float hostCDF[HISTOGRAM_LENGTH];
cudaMemcpy(hostCDF,
deviceCDF,
HISTOGRAM_LENGTH * sizeof(float),
cudaMemcpyDeviceToHost);
for(int i=0; i<HISTOGRAM_LENGTH; i++)
printf("The %d-th value of the hist is: %f\n", i, hostCDF[i]);
// end of test code 4
*/
//--------------------------------------------------------------------------------
// 5. Compute the minimum value of the CDF
/* Redundant but ok */
/*
int numInputElements = HISTOGRAM_LENGTH;
int numOutputElements = numInputElements / (BLOCK_SIZE<<1);
if (numInputElements % (BLOCK_SIZE<<1)) {
numOutputElements++;
}
dimBlock = dim3(BLOCK_SIZE,1);
dimGrid = dim3(numOutputElements,1);
float * devMinCDF;
float * hostMinCDF;
cudaMalloc((void **) &devMinCDF, numOutputElements*sizeof(float));
hostMinCDF = (float*) malloc(numOutputElements * sizeof(float));
minInCDF<<<dimGrid, dimBlock>>>(deviceCDF, devMinCDF, numInputElements);
cudaDeviceSynchronize();
cudaMemcpy(hostMinCDF, devMinCDF, numOutputElements*sizeof(float), cudaMemcpyDeviceToHost);
for (int ii = 1; ii < numOutputElements; ii++) {
if (hostMinCDF[0] == 0.0f)
break;
hostMinCDF[0] = (hostMinCDF[0]<hostMinCDF[ii]) ? hostMinCDF[0] : hostMinCDF[ii];
}
// test code 5
printf("The minimum of CDF is %f\n", hostMinCDF[0]);
// end of test code 5
*/
//--------------------------------------------------------------------------------
// 6. Define the histogram equalization function
dimBlock = dim3(HISTOGRAM_LENGTH,1);
dimGrid = dim3(1,1);
histEqual<<<dimGrid, dimBlock>>>(deviceCDF, HISTOGRAM_LENGTH);
cudaDeviceSynchronize();
//--------------------------------------------------------------------------------
// 7. Apply the histogram equalization function and Cast back to float
dimBlock = dim3(BLOCK_SIZE,1);
dimGrid = dim3((imageWidth*imageHeight*imageChannels-1)/BLOCK_SIZE+1,1);
float * deviceOutputImgClr;
cudaMalloc((void **) &deviceOutputImgClr, imageWidth*imageHeight*imageChannels*sizeof(float));
applyEqualAndCastBack<<<dimGrid, dimBlock>>>(deviceOutputImgClr, deviceOutputImageData, deviceCDF, imageWidth*imageHeight*imageChannels);
cudaDeviceSynchronize();
cudaMemcpy(hostOutputImageData,
deviceOutputImgClr,
imageWidth * imageHeight * imageChannels * sizeof(float),
cudaMemcpyDeviceToHost);
wbSolution(args, outputImage);
cudaFree(deviceInputImageData);
cudaFree(deviceOutputImageData);
cudaFree(deviceOutputImgGray);
cudaFree(deviceHist);
cudaFree(deviceCDF);
cudaFree(deviceOutputImgClr);
return 0;
}
|
2dcf74ec022166b9f1f294ad1db7362763f49638.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<iostream>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "../inc/WeightedGraph.cuh"
WeightedGraph::WeightedGraph(int s,int p){
size=s;
hipMallocManaged(&adjmat,size*sizeof(float*));
srand(time(NULL));
for(int i=0;i<size;i++){
hipMallocManaged(&adjmat[i],size*sizeof(float));
for(int j=0;j<size;j++){
//Graph is supposed to be undirected
if(j<i){ adjmat[i][j]=adjmat[j][i]; }
else{
if(rand()%100<p){ adjmat[i][j]=(float)rand()/(float)RAND_MAX; }
else{ adjmat[i][j]=0; }
}
}
}
}
float **WeightedGraph::getAdjmat(){ return adjmat;}
int WeightedGraph::getSize(){ return size;}
void WeightedGraph::print(){
for(int i=0;i<size;i++){
for(int j=0;j<size;j++)
std::cout<<adjmat[i][j]<<" ";
std::cout<<"\n";
}
}
|
2dcf74ec022166b9f1f294ad1db7362763f49638.cu
|
#include<iostream>
#include <stdlib.h>
#include <time.h>
#include <math.h>
#include "../inc/WeightedGraph.cuh"
WeightedGraph::WeightedGraph(int s,int p){
size=s;
cudaMallocManaged(&adjmat,size*sizeof(float*));
srand(time(NULL));
for(int i=0;i<size;i++){
cudaMallocManaged(&adjmat[i],size*sizeof(float));
for(int j=0;j<size;j++){
//Graph is supposed to be undirected
if(j<i){ adjmat[i][j]=adjmat[j][i]; }
else{
if(rand()%100<p){ adjmat[i][j]=(float)rand()/(float)RAND_MAX; }
else{ adjmat[i][j]=0; }
}
}
}
}
float **WeightedGraph::getAdjmat(){ return adjmat;}
int WeightedGraph::getSize(){ return size;}
void WeightedGraph::print(){
for(int i=0;i<size;i++){
for(int j=0;j<size;j++)
std::cout<<adjmat[i][j]<<" ";
std::cout<<"\n";
}
}
|
c8adb3440129f456c50efdb2d05a6a4b8c4495e9.hip
|
// !!! This is a file automatically generated by hipify!!!
#include<stdio.h>
#include <time.h>
#include <hip/hip_runtime.h>
// Forward Declarations
#define BLOCKSIZE 1024
#ifndef Nsize
#define Nsize 1024
#endif
void printArray(int k);
__global__ void add(int d_a[], int *d_answer);
int* a;
int answer;
int main(){
hipError_t err;
int deviceCount;
err = hipGetDeviceCount(&deviceCount);
printf("Device count: %s\n",hipGetErrorString(err));
printf("There are %d devices\n", deviceCount);
err = hipSetDevice(0);
printf("Device selection: %s\n",hipGetErrorString(err));
a = (int*)malloc(Nsize * sizeof(int));
// Fill the array
int i; /* counter */
time_t t;
//srand((unsigned) time(&t));
for(i = 0; i < Nsize; i++)
a[i] = rand() % 23;
printArray(Nsize);
// Allocate space on the GPU
int* d_Array; /* d_ means "device" */
int* d_answer;
err = hipMalloc(&d_Array, Nsize * sizeof(int));
printf("Malloc device rules: %s\n",hipGetErrorString(err));
err = hipMalloc(&d_answer, sizeof(long));
printf("Malloc device rules: %s\n",hipGetErrorString(err));
// Copy the array to the card
// destination, then source
err = hipMemcpy(d_Array, a, Nsize * sizeof(int), hipMemcpyHostToDevice);
printf("cuda memory error: %s\n",hipGetErrorString(err));
err = hipMemcpy(d_answer, &answer, sizeof(int), hipMemcpyHostToDevice);
printf("cuda memory error: %s\n",hipGetErrorString(err));
// Set up the kernel
int blockSize = BLOCKSIZE;
int numBlocks = 1;
dim3 dimGrid(numBlocks);
dim3 dimBlock(blockSize);
// Launch the kernel
hipLaunchKernelGGL(( add) , dim3(dimGrid), dim3(dimBlock) , 0, 0, d_Array, d_answer);
// Retrieve the results from the card
err = hipMemcpy(&answer, d_answer, sizeof(int), hipMemcpyDeviceToHost);
printf("cuda memory error: %s\n",hipGetErrorString(err));
err = hipMemcpy(a, d_Array, Nsize*sizeof(int), hipMemcpyDeviceToHost);
printf("cuda memory error: %s\n",hipGetErrorString(err));
// Inspect the results.
printf("%i\n", answer);
printArray(20);
}
void printArray(int k){
int i;
for(i = 0; i < k; i++)
printf("%d ", a[i]);
printf("\n");
}
__global__ void add(int d_a[], int *d_answer){
int idx = threadIdx.x;
if(idx >= Nsize){
return;
}
__shared__ int a[BLOCKSIZE];
a[idx] = d_a[idx];
__syncthreads();
for (int i = 0; i < (log2f(BLOCKSIZE)); i++){
int neighbor = idx ^ (1<<i);
int his = 0;
if(neighbor >= Nsize){
his = 0;
}
else{
his = a[neighbor];
}
int my = a[idx];
int holder = my + his;
__syncthreads();
a[idx] = holder;
__syncthreads();
}
*d_answer = a[idx];
}
|
c8adb3440129f456c50efdb2d05a6a4b8c4495e9.cu
|
#include<stdio.h>
#include <time.h>
#include <cuda.h>
// Forward Declarations
#define BLOCKSIZE 1024
#ifndef Nsize
#define Nsize 1024
#endif
void printArray(int k);
__global__ void add(int d_a[], int *d_answer);
int* a;
int answer;
int main(){
cudaError_t err;
int deviceCount;
err = cudaGetDeviceCount(&deviceCount);
printf("Device count: %s\n",cudaGetErrorString(err));
printf("There are %d devices\n", deviceCount);
err = cudaSetDevice(0);
printf("Device selection: %s\n",cudaGetErrorString(err));
a = (int*)malloc(Nsize * sizeof(int));
// Fill the array
int i; /* counter */
time_t t;
//srand((unsigned) time(&t));
for(i = 0; i < Nsize; i++)
a[i] = rand() % 23;
printArray(Nsize);
// Allocate space on the GPU
int* d_Array; /* d_ means "device" */
int* d_answer;
err = cudaMalloc(&d_Array, Nsize * sizeof(int));
printf("Malloc device rules: %s\n",cudaGetErrorString(err));
err = cudaMalloc(&d_answer, sizeof(long));
printf("Malloc device rules: %s\n",cudaGetErrorString(err));
// Copy the array to the card
// destination, then source
err = cudaMemcpy(d_Array, a, Nsize * sizeof(int), cudaMemcpyHostToDevice);
printf("cuda memory error: %s\n",cudaGetErrorString(err));
err = cudaMemcpy(d_answer, &answer, sizeof(int), cudaMemcpyHostToDevice);
printf("cuda memory error: %s\n",cudaGetErrorString(err));
// Set up the kernel
int blockSize = BLOCKSIZE;
int numBlocks = 1;
dim3 dimGrid(numBlocks);
dim3 dimBlock(blockSize);
// Launch the kernel
add <<< dimGrid, dimBlock >>> (d_Array, d_answer);
// Retrieve the results from the card
err = cudaMemcpy(&answer, d_answer, sizeof(int), cudaMemcpyDeviceToHost);
printf("cuda memory error: %s\n",cudaGetErrorString(err));
err = cudaMemcpy(a, d_Array, Nsize*sizeof(int), cudaMemcpyDeviceToHost);
printf("cuda memory error: %s\n",cudaGetErrorString(err));
// Inspect the results.
printf("%i\n", answer);
printArray(20);
}
void printArray(int k){
int i;
for(i = 0; i < k; i++)
printf("%d ", a[i]);
printf("\n");
}
__global__ void add(int d_a[], int *d_answer){
int idx = threadIdx.x;
if(idx >= Nsize){
return;
}
__shared__ int a[BLOCKSIZE];
a[idx] = d_a[idx];
__syncthreads();
for (int i = 0; i < (log2f(BLOCKSIZE)); i++){
int neighbor = idx ^ (1<<i);
int his = 0;
if(neighbor >= Nsize){
his = 0;
}
else{
his = a[neighbor];
}
int my = a[idx];
int holder = my + his;
__syncthreads();
a[idx] = holder;
__syncthreads();
}
*d_answer = a[idx];
}
|
4f63a58a03d2d2cbab1062d7fb39ac971d993275.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "util.cuh"
#include "cutens.h"
#include "sign.cuh"
void cusign(cuftens *a)
{
const int BS=32;
const int len = cuftens_len(a);
hipLaunchKernelGGL(( ker_sign) , dim3(CEIL(len, BS)), dim3(BS), 0, 0, a->data, len);
}
|
4f63a58a03d2d2cbab1062d7fb39ac971d993275.cu
|
#include "util.cuh"
#include "cutens.h"
#include "sign.cuh"
void cusign(cuftens *a)
{
const int BS=32;
const int len = cuftens_len(a);
ker_sign <<<CEIL(len, BS), BS>>> (a->data, len);
}
|
1fa086059bd1e3dc4f5869fad5ef849cfb356f18.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/**
*This project targets to check GPU is an option for DynaMIT.
*This project also targets for a paper "Mesoscopic Traffic Simulation on GPU"
*/
#include "../components_on_cpu/network/network.h"
#include "../components_on_cpu/demand/od_pair.h"
#include "../components_on_cpu/demand/od_path.h"
#include "../components_on_cpu/demand/vehicle.h"
#include "../components_on_cpu/util/time_tools.h"
#include "../components_on_cpu/util/string_tools.h"
#include "../components_on_cpu/util/simulation_results.h"
#include "../components_on_cpu/util/shared_cpu_include.h"
#include "../components_on_gpu/on_GPU_kernal_safe.cuh"
#include "../components_on_gpu/supply/on_GPU_memory.h"
#include "../components_on_gpu/supply/on_GPU_vehicle.h"
#include "../components_on_gpu/supply/on_GPU_new_lane_vehicles.h"
#include "../components_on_gpu/util/shared_gpu_include.h"
#include "../components_on_gpu/util/on_gpu_configuration.h"
#include "../components_on_gpu/on_GPU_Macro.h"
#include <math.h>
using namespace std;
/**
* CUDA Execution Configuration
*/
int lane_blocks;
const int lane_threads_in_a_block = 128;
int node_blocks;
const int node_threads_in_a_block = 128;
int segment_blocks;
const int segment_threads_in_a_block = 128;
/*
* Demand
*/
Network* the_network;
std::vector<ODPair*> all_od_pairs;
std::vector<ODPairPATH*> all_od_paths;
std::vector<Vehicle*> all_vehicles;
/*
* Path Input Config
*/
std::string network_file_path = "data_inputs/New_NW_SG/NewNetWork2.dat";
std::string demand_file_path = "data_inputs/New_NW_SG/newDemand.dat";
std::string od_pair_paths_file_path = "data_inputs/New_NW_SG/paths.dat";
/*
* All data in GPU
*/
GPUMemory* gpu_data;
GPUMemory* data_local;
GPUSharedParameter* parameter_setting_on_gpu;
#ifdef ENABLE_CONSTANT_MEMORY
__constant__ GPUSharedParameter data_setting_gpu_constant;
#endif
//A large memory space is pre-defined in order to copy to GPU
GPUVehicle *vpool_cpu;
GPUVehicle *vpool_gpu;
//int *vpool_cpu_index;
//int *vpool_gpu_index;
/**
* Simulation Results
*/
std::string simulation_output_file_path = "output/test3.txt";
std::map<int, SimulationResults*> simulation_results_pool;
ofstream simulation_results_output_file;
//buffer is only used when kGPUToCPUSimulationResultsCopyBufferSize > 1
SimulationResults* simulation_results_buffer_on_gpu;
//Used for buffer at CPU side
SimulationResults* one_buffer = NULL;
/*
* GPU Streams
* stream1: GPU Supply Simulation
*/
hipStream_t stream_gpu_supply;
hipStream_t stream_gpu_io;
hipEvent_t gpu_supply_one_tick_simulation_done_trigger_event;
/*
* Time Management
*/
long simulation_start_time;
long simulation_end_time;
long simulation_time_step;
/*
* simulation_time is already finished time;
* simulation_time + 1 might be the current simulating time on GPU
*/
long to_simulate_time;
/*
* simulation_results_outputed_time is already outputted time;
* simulation_results_outputed_time + 1 might be the outputing time on CPU
*/
long to_output_simulation_result_time;
/*
*/
//std::map<int, int> link_ID_to_link_Index;
//std::map<int, int> link_Index_to_link_ID;
//std::map<int, int> node_ID_to_node_Index;
//std::map<int, int> node_index_to_node_ID;
/*
* Define Major Functions
*/
bool InitParams(int argc, char* argv[]);
bool LoadInNetwork();
bool LoadInDemand();
bool InitilizeCPU();
bool InitilizeGPU();
bool InitGPUParameterSetting(GPUSharedParameter* data_setting_gpu);
bool InitGPUData(GPUMemory* data_local);
bool StartSimulation();
bool StartSimulationOptimizeWarp();
bool StartSimulationVP();
bool StartSimulationSynch();
bool DestroyResources();
/*
* Define Helper Functions
*/
StringTools* str_tools;
bool CopySimulatedResultsToCPU(int time_step);
bool CopyBufferSimulatedResultsToCPU(int time_step);
bool OutputSimulatedResults(int time_step);
bool OutputBufferedSimulatedResults(int time_step);
inline int TimestepToArrayIndex(int time_step) {
return (time_step - kStartTimeSteps) / kUnitTimeStep;
}
/*
* MAIN
*/
int main(int argc, char* argv[]) {
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
cout << "GPU Program Starts" << endl;
if (InitParams(argc, argv) == false) {
cout << "InitParams fails" << endl;
return 0;
}
if (LoadInNetwork() == false) {
cout << "Loading network fails" << endl;
return 0;
}
if (LoadInDemand() == false) {
cout << "Loading demand fails" << endl;
return 0;
}
cout<<"Finished loading input files"<<endl;
if (InitilizeCPU() == false) {
cout << "InitilizeCPU fails" << endl;
return 0;
}
cout<<"Finished initializing CPU"<<endl;
if (InitilizeGPU() == false) {
cout << "InitilizeGPU fails" << endl;
return 0;
}
cout << "Finished initializing GPU"<<endl;
//create streams
//hipStreamCreate(&stream_gpu_supply);
//hipStreamCreate(&stream_gpu_io);
//create a event
//hipEventCreate(&gpu_supply_one_tick_simulation_done_trigger_event);
std::cout << "Simulation Starts" << std::endl;
//TimeTools profile;
//profile.start_profiling();
//Start Simulation (ETSF implemented inside)
if (StartSimulationOptimizeWarp() == false) {
cout << "Simulation Fails" << endl;
DestroyResources();
return 0;
}
//profile.end_profiling();
//profile.output();
DestroyResources();
cout << "Simulation Succeed!" << endl;
#ifdef _WIN32
system("pause");
#endif
return 0;
}
/**
*
*/
bool InitParams(int argc, char* argv[]) {
if (argc == 4) {
network_file_path = argv[1];
demand_file_path = argv[2];
od_pair_paths_file_path = argv[3];
std::cout << "network_file_path: "<<network_file_path<< std::endl;
std::cout << "demand_file_path: "<<demand_file_path<< std::endl;
std::cout << "od_pair_paths_file_path: "<<od_pair_paths_file_path<< std::endl;
}
return true;
}
bool LoadInNetwork() {
the_network = new Network();
return Network::load_network(*the_network, network_file_path);
}
bool LoadInDemand() {
//if (ODPair::load_in_all_ODs(all_od_pairs, od_pair_file_path) == false) {
// return false;
//}
if (ODPairPATH::load_in_all_OD_Paths(all_od_paths, od_pair_paths_file_path) == false) {
return false;
}
if (Vehicle::load_in_all_vehicles(all_vehicles, demand_file_path) == false) {
return false;
}
return true;
}
bool InitilizeCPU() {
simulation_start_time = 0;
simulation_end_time = kEndTimeSteps-kStartTimeSteps; // 1 hour
simulation_time_step = kUnitTimeStep;
assert(simulation_time_step == 1);
to_simulate_time = 0;
to_output_simulation_result_time = 0;
lane_blocks = kLaneSize / lane_threads_in_a_block + 1;
node_blocks = kNodeSize / node_threads_in_a_block + 1;
segment_blocks = kSegmentSize / segment_threads_in_a_block + 1;
simulation_results_pool.clear();
simulation_results_output_file.open(simulation_output_file_path.c_str());
simulation_results_output_file << "##TIME STEP" << ":Segment ID:" << ":(" << "COUNTS" << ":" << "flow" << ":" << "density" << ":" << "speed" << ":" << "queue_length" << ")" << endl;
str_tools = new StringTools();
return true;
}
bool InitilizeGPU() {
gpu_data = NULL;
parameter_setting_on_gpu = NULL;
data_local = new GPUMemory();
InitGPUData(data_local);
GPUSharedParameter* data_setting_gpu = new GPUSharedParameter();
InitGPUParameterSetting(data_setting_gpu);
#ifdef ENABLE_CONSTANT_MEMORY
GPUSharedParameter data_setting_cpu_constant;
InitGPUParameterSetting(&data_setting_cpu_constant);
#endif
//apply memory on GPU
size_t memory_space_for_vehicles = all_vehicles.size() * sizeof(GPUVehicle);
if (hipMalloc((void**) &vpool_gpu, memory_space_for_vehicles) != hipSuccess) {
cerr << "hipMalloc((void**) &vpool_gpu, memory_space_for_vehicles) failed" << endl;
}
// size_t memory_space_for_rebuild_index = kTotalTimeSteps * kLaneSize * kLaneInputCapacityPerTimeStep * sizeof(int);
// if (hipMalloc((void**) &vpool_gpu_index, memory_space_for_rebuild_index) != hipSuccess) {
// cerr << "hipMalloc((void**) &vpool_gpu_index, memory_space_for_rebuild_index) failed" << endl;
// }
if (hipMalloc((void**) &gpu_data, data_local->total_size()) != hipSuccess) {
cerr << "hipMalloc(&gpu_data, sizeof(GPUMemory)) failed" << endl;
}
if (hipMalloc((void**) ¶meter_setting_on_gpu, sizeof(GPUSharedParameter)) != hipSuccess) {
cerr << "hipMalloc(&GPUSharedParameter, sizeof(GPUSharedParameter)) failed" << endl;
}
#ifdef ENABLE_CONSTANT_MEMORY
hipMemcpyToSymbol(data_setting_gpu_constant, &data_setting_cpu_constant, sizeof(GPUSharedParameter));
#endif
//apply a buffer space for GPU outputs
if (kGPUToCPUSimulationResultsCopyBufferSize > 1) {
size_t memory_space_for_buffer_outputs = sizeof(SimulationResults) * kGPUToCPUSimulationResultsCopyBufferSize;
if (hipMalloc((void**) &simulation_results_buffer_on_gpu, memory_space_for_buffer_outputs) != hipSuccess) {
cerr << "hipMalloc((void**) &simulation_results_buffer_on_gpu, memory_space_for_buffer_outputs) failed" << endl;
}
}
hipMemcpy(vpool_gpu, vpool_cpu, memory_space_for_vehicles, hipMemcpyHostToDevice);
// hipMemcpy(vpool_gpu_index, vpool_cpu_index, memory_space_for_rebuild_index, hipMemcpyHostToDevice);
hipMemcpy(gpu_data, data_local, data_local->total_size(), hipMemcpyHostToDevice);
hipMemcpy(parameter_setting_on_gpu, data_setting_gpu, sizeof(GPUSharedParameter), hipMemcpyHostToDevice);
// int GRID_SIZE = 1;
// int BLOCK_SIZE = kTotalTimeSteps;
//
// LinkGPUData<<<GRID_SIZE, BLOCK_SIZE>>>(gpu_data, kTotalTimeSteps, vpool_gpu, vpool_gpu_index, parameter_seeting_on_gpu);
//
// //wait for all CUDA related operations to finish;
// std::cout << "LinkGPUData begins" << std::endl;
// hipDeviceSynchronize();
// std::cout << "LinkGPUData ends" << std::endl;
#ifdef ENABLE_OUTPUT_GPU_BUFFER
hipHostMalloc((void **) &one_buffer, sizeof(SimulationResults) * kGPUToCPUSimulationResultsCopyBufferSize);
#endif
return true;
}
/*
* Copy the parameter setting to GPU memory
*/
bool InitGPUParameterSetting(GPUSharedParameter* data_setting_gpu) {
data_setting_gpu->kOnGPULaneSize = kLaneSize;
data_setting_gpu->kOnGPUNodeSize = kNodeSize;
data_setting_gpu->kOnGPUSegmentSize = kSegmentSize;
data_setting_gpu->kOnGPUEndTimeStep = kEndTimeSteps;
data_setting_gpu->kOnGPUStartTimeStep = kStartTimeSteps;
data_setting_gpu->kOnGPUTotalTimeSteps = kTotalTimeSteps;
data_setting_gpu->kOnGPUUnitTimeStep = kUnitTimeStep;
data_setting_gpu->kOnGPUVehicleLength = kVehicleLength;
data_setting_gpu->kOnGPUMaxRouteLength = kMaxRouteLength;
data_setting_gpu->kOnGPUGPUToCPUSimulationResultsCopyBufferSize = kGPUToCPUSimulationResultsCopyBufferSize;
data_setting_gpu->kOnGPUTotalVehicleSpace = kTotalVehicleSpace;
return true;
}
/*
* Build a GPU data from the network data
*/
bool InitGPUData(GPUMemory* data_local) {
data_local->num_processed_blocks = 0;
/**
* First Part: Lane
*/
for (int i = 0; i < the_network->lane_size; i++) {
Lane* one_lane = the_network->all_lanes[i];
//
assert(one_lane->lane_id == i);
data_local->lane_pool.lane_ID[i] = one_lane->lane_id;
data_local->lane_pool.Seg_ID[i] = one_lane->seg_id;
data_local->lane_pool.Tp[i] = simulation_start_time - simulation_time_step;
data_local->lane_pool.Tq[i] = simulation_start_time - simulation_time_step;
data_local->lane_pool.accumulated_offset[i] = 0;
//data_local->lane_pool.flow[i] = 0;
//data_local->lane_pool.density[i] = 0;
//data_local->lane_pool.speed[i] = 0;
//data_local->lane_pool.queue_length[i] = 0;
/*
* for density calculation
*/
data_local->lane_pool.max_vehicles[i] = one_lane->max_vehs; //number of vehicles
/*
* for speed calculation
*/
data_local->lane_pool.vehicle_counts[i] = 0;
data_local->lane_pool.vehicle_passed_to_the_lane_counts[i] = 0;
data_local->lane_pool.leaving_vehicle_counts[i] = 0;
data_local->lane_pool.vehicle_start_index[i] = one_lane->veh_start_index;
//data_local->lane_pool.first_veh_index[i] = 0;
data_local->lane_pool.buffered_first_veh_index[i] = 0;
data_local->lane_pool.buffered_vehicle_counts[i] = 0;
data_local->lane_pool.ring_buffer_size[i] = one_lane->max_vehs + kMaxSegmentInputCapacityPerTimeStep;
data_local->lane_pool.queue_length[i] = 0;
}
std::cout << "Lane Pool size: "<<sizeof(data_local->lane_pool) << std::endl;
/*
* Segment
*/
for(int i=0; i<the_network->seg_size; i++){
Segment* one_seg = the_network->all_segs[i];
data_local->seg_pool.seg_ID[i] = one_seg->seg_id;
data_local->seg_pool.lane_start_index[i] = one_seg->lane_start_index;
data_local->seg_pool.num_lanes[i] = one_seg->num_lanes;
data_local->seg_pool.lane_end_index[i] = one_seg->lane_start_index + one_seg->num_lanes - 1;
data_local->seg_pool.alpha[i] = one_seg->alpha;
data_local->seg_pool.beta[i] = one_seg->beta;
data_local->seg_pool.min_density[i] = one_seg->min_density;
data_local->seg_pool.max_density[i] = one_seg->max_density;
data_local->seg_pool.MIN_speed[i] = one_seg->MIN_speed;
data_local->seg_pool.MAX_speed[i] = one_seg->MAX_speed;
data_local->seg_pool.input_capacity[i] = one_seg->capacity;
data_local->seg_pool.output_capacity[i] = one_seg->capacity;
data_local->seg_pool.capacity[i] = one_seg->capacity;
data_local->seg_pool.seg_length[i] = one_seg->length;
data_local->seg_pool.density[i] = 0;
data_local->seg_pool.speed[i] = 0;
data_local->seg_pool.queue_length[i] = 0;
data_local->seg_pool.flow[i] = 0;
data_local->seg_pool.empty_space[i] = (int)(one_seg->length/kVehicleLength)*one_seg->num_lanes;
data_local->seg_pool.veh_counts[i] = 0;
data_local->seg_pool.max_vehicles[i] = one_seg->num_lanes*(int)(one_seg->length/kVehicleLength);
data_local->seg_pool.processed[i] = one_seg->num_lanes;
for (int j = 0; j < kTotalTimeSteps; j++) {
data_local->seg_pool.speed_history[j][i] = -1;
}
//it is assumed that QUEUE_LENGTH_HISTORY = 4;
// assert(kQueueLengthHistory == 4);
// float weight[kQueueLengthHistory];
// weight[0] = 1.0;
// weight[1] = 0;
// weight[2] = 0;
// weight[3] = 0;
//
// for (int j = 0; j < kQueueLengthHistory; j++) {
// data_local->seg_pool.his_queue_length[j][i] = -1;
// data_local->seg_pool.his_queue_length_weighting[j][i] = weight[j];
// }
//
// data_local->seg_pool.predicted_empty_space[i] = 0;
// data_local->seg_pool.predicted_queue_length[i] = 0;
// data_local->seg_pool.last_time_empty_space[i] = 0;
}
std::cout << "Segment Pool size: "<<sizeof(data_local->seg_pool) << std::endl;
/**
* Third Part: Node
*/
for (int i = 0; i < the_network->node_size; i++) {
Node* one_node = the_network->all_nodes[i];
data_local->node_pool.node_ID[i] = one_node->node_id;
data_local->node_pool.upstream_seg_start_index[i] = one_node->up_seg_start_index;
data_local->node_pool.upstream_seg_end_index[i] = one_node->up_seg_end_index;
data_local->node_pool.vnode[i] = one_node->vnode;
//data_local->node_pool.enode[i] = one_node->enode;
if(one_node->up_seg_start_index>=0){
data_local->node_pool.upstream_start_lane_index[i] = data_local->seg_pool.lane_start_index[one_node->up_seg_start_index];
data_local->node_pool.upstream_end_lane_index[i] = data_local->seg_pool.lane_end_index[one_node->up_seg_end_index];
}
}
std::cout << "Node Pool Size: "<<sizeof(data_local->node_pool)<<std::endl;
/**
* Third Part:
*/
//Init VehiclePool
for (int i = kStartTimeSteps; i < kEndTimeSteps; i += kUnitTimeStep) {
for (int j = 0; j < kSegmentSize; j++) {
data_local->new_vehicles_every_time_step[i-kStartTimeSteps].new_vehicle_size[j] = 0;
data_local->new_vehicles_every_time_step[i-kStartTimeSteps].seg_ID[j] = -1;
}
}
//init host vehicle pool data /*xiaosong*/
int memory_space_for_vehicles = all_vehicles.size() * sizeof(GPUVehicle);
vpool_cpu = (GPUVehicle*) malloc(memory_space_for_vehicles);
if (vpool_cpu == NULL)
exit(1);
for (int i = kStartTimeSteps; i < kEndTimeSteps; i += kUnitTimeStep) {
for (int j = 0; j < kSegmentSize; j++) {
for (int z = 0; z < kMaxSegmentInputCapacityPerTimeStep; z++) {
//init as no vehicle
data_local->new_vehicles_every_time_step[i-kStartTimeSteps].new_vehicles[j][z] = -1;
}
}
}
// int nVehiclePerTick = kLaneInputCapacityPerTimeStep * kLaneSize;
// std::cout << "init all_vehicles" << std::endl;
int total_inserted_vehicles = 0;
//Insert Vehicles
for (int i = 0; i < all_vehicles.size(); i++) {
Vehicle* one_vehicle = all_vehicles[i];
int time_index = one_vehicle->entry_time;
int time_index_convert = TimestepToArrayIndex(time_index);
//assert(time_index == time_index_convert);
//try to load vehicles beyond the simulation border
if (time_index_convert >= kTotalTimeSteps || time_index_convert<0)
continue;
int seg_ID = all_od_paths[one_vehicle->path_id]->seg_ids[0];
int seg_Index = seg_ID; //the same for the SG Expressway case
if (data_local->new_vehicles_every_time_step[time_index_convert].new_vehicle_size[seg_Index] < data_local->seg_pool.capacity[seg_Index]) {
int last_vehicle_index = data_local->new_vehicles_every_time_step[time_index_convert].new_vehicle_size[seg_Index];
vpool_cpu[total_inserted_vehicles].vehicle_ID = one_vehicle->vehicle_id;
vpool_cpu[total_inserted_vehicles].entry_time = time_index_convert;
//vpool_cpu[i].current_seg_ID = seg_Index;
//assert(kMaxRouteLength > all_od_paths[one_vehicle->path_id]->seg_ids.size());
int max_copy_length = kMaxRouteLength > all_od_paths[one_vehicle->path_id]->seg_ids.size() ? all_od_paths[one_vehicle->path_id]->seg_ids.size() : kMaxRouteLength;
for (int p = 0; p < max_copy_length; p++) {
vpool_cpu[total_inserted_vehicles].path_code[p] = all_od_paths[one_vehicle->path_id]->seg_ids[p];
}
//ready for the next lane, so next_path_index is set to 1, if the next_path_index == whole_path_length, it means cannot find path any more, can exit;
vpool_cpu[total_inserted_vehicles].next_path_index = 1;
vpool_cpu[total_inserted_vehicles].whole_path_length = all_od_paths[one_vehicle->path_id]->seg_ids.size();
//will be re-writen by GPU
//insert new vehicle
data_local->new_vehicles_every_time_step[time_index_convert].new_vehicles[seg_Index][last_vehicle_index] = total_inserted_vehicles;//vpool_cpu[i].vehicle_ID;
data_local->new_vehicles_every_time_step[time_index_convert].new_vehicle_size[seg_Index]++;
total_inserted_vehicles++;
} else {
// std::cout << "Loading Vehicles Exceeds The Loading Capacity: Time:" << time_index_covert << ", Lane_ID:" << lane_ID << ",i:" << i << ",ID:" << one_vehicle->vehicle_id << std::endl;
}
}
std::cout << "init all_vehicles:" << total_inserted_vehicles << std::endl;
std::cout << "vpool.size():" << total_inserted_vehicles * sizeof(GPUVehicle)<< std::endl;
std::cout << "total global mem: "<< data_local->total_size()<<std::endl;
return true;
}
bool DestroyResources() {
simulation_results_output_file.flush();
simulation_results_output_file.close();
if (vpool_cpu != NULL)
delete vpool_cpu;
if (str_tools != NULL)
delete str_tools;
hipDeviceReset();
return true;
}
bool StartSimulation() {
TimeTools profile;
//profile.start_profiling();
while (to_simulate_time < 1800) {
hipLaunchKernelGGL(( SupplySimulationPreVehiclePassing), dim3(segment_blocks), dim3(segment_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
//SupplySimulationVehiclePassingVNode<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
bool num_processed_nodes = true;
hipLaunchKernelGGL(( SupplySimulationVehiclePassingFirst), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
hipMemcpy(&num_processed_nodes, &gpu_data->num_processed_blocks, sizeof(bool), hipMemcpyDeviceToHost);
//int n = 1;
while(num_processed_nodes){
num_processed_nodes = false;
hipMemcpy(&gpu_data->num_processed_blocks, &num_processed_nodes, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( SupplySimulationVehiclePassing), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
hipMemcpy(&num_processed_nodes, &gpu_data->num_processed_blocks, sizeof(bool), hipMemcpyDeviceToHost);
//hipMemcpy(nodes_processed, gpu_data->seg_pool.processed, sizeof(bool)*kSegmentSize, hipMemcpyDeviceToHost);
// for(int i=0; i<kSegmentSize; i++){
// if(nodes_processed[i]>0){
// std::cout<<"time "<<to_simulate_time<<" iter "<<n<<" node "<<i<<"\n";
// }
// }
//std::cout<<"Interval "<<n<<' '<<num_processed_nodes<<'\n';
//n++;
}
//std::cout<<n<<'\n';
//SupplySimulationAfterVehiclePassing<<<lane_blocks, lane_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kLaneSize, parameter_setting_on_gpu);
to_simulate_time += simulation_time_step;
}
//profile.end_profiling();
//profile.output();
profile.start_profiling();
while (to_simulate_time < simulation_end_time) {
hipLaunchKernelGGL(( SupplySimulationPreVehiclePassing), dim3(segment_blocks), dim3(segment_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
//SupplySimulationVehiclePassingVNode<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
bool num_processed_nodes = true;
hipLaunchKernelGGL(( SupplySimulationVehiclePassingFirst), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
hipMemcpy(&num_processed_nodes, &gpu_data->num_processed_blocks, sizeof(bool), hipMemcpyDeviceToHost);
//int n = 1;
while(num_processed_nodes){
num_processed_nodes = false;
hipMemcpy(&gpu_data->num_processed_blocks, &num_processed_nodes, sizeof(bool), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( SupplySimulationVehiclePassing), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
hipMemcpy(&num_processed_nodes, &gpu_data->num_processed_blocks, sizeof(bool), hipMemcpyDeviceToHost);
//hipMemcpy(nodes_processed, gpu_data->seg_pool.processed, sizeof(bool)*kSegmentSize, hipMemcpyDeviceToHost);
// for(int i=0; i<kSegmentSize; i++){
// if(nodes_processed[i]>0){
// std::cout<<"time "<<to_simulate_time<<" iter "<<n<<" node "<<i<<"\n";
// }
// }
//std::cout<<"Interval "<<n<<' '<<num_processed_nodes<<'\n';
//n++;
}
//std::cout<<n<<'\n';
//SupplySimulationAfterVehiclePassing<<<lane_blocks, lane_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kLaneSize, parameter_setting_on_gpu);
to_simulate_time += simulation_time_step;
}
profile.end_profiling();
profile.output();
return true;
}
bool StartSimulationOptimizeWarp() {
TimeTools profile;
int num_unprocessed_nodes;
int updated_count;
while (to_simulate_time < 1800) {
//std::cout<<to_simulate_time<<"\n";
hipLaunchKernelGGL(( SupplySimulationPreVehiclePassing), dim3(segment_blocks), dim3(segment_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
num_unprocessed_nodes = kNodeSize;
hipLaunchKernelGGL(( SupplySimulationVehiclePassingFirstOptimizeWarp), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
hipMemcpy(data_local->node_status, gpu_data->node_status, sizeof(int)*num_unprocessed_nodes, hipMemcpyDeviceToHost);
updated_count = 0;
for(int i=0; i<num_unprocessed_nodes; i++){
if(data_local->node_status[i]>=0){
data_local->node_status[updated_count] = data_local->node_status[i];
updated_count++;
}
}
num_unprocessed_nodes = updated_count;
//std::cout<<"After: "<<num_unprocessed_nodes<<"\n";
hipMemcpy(gpu_data->node_status, data_local->node_status, sizeof(int)*num_unprocessed_nodes, hipMemcpyDeviceToHost);
//int n = 1;
while(num_unprocessed_nodes>0){
//num_processed_nodes = false;
hipMemcpy(gpu_data->node_status, data_local->node_status, sizeof(int)*num_unprocessed_nodes, hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( SupplySimulationVehiclePassingOptimizeWarp), dim3(ceil(num_unprocessed_nodes/node_threads_in_a_block)), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, num_unprocessed_nodes, parameter_setting_on_gpu, vpool_gpu);
hipMemcpy(data_local->node_status, gpu_data->node_status, sizeof(int)*num_unprocessed_nodes, hipMemcpyDeviceToHost);
//rearrange status array
updated_count = 0;
for(int i=0; i<num_unprocessed_nodes; i++){
if(data_local->node_status[i]>=0){
data_local->node_status[updated_count] = data_local->node_status[i];
updated_count++;
}
}
num_unprocessed_nodes = updated_count;
//std::cout<<to_simulate_time<<" "<<num_unprocessed_nodes<<'\n';
}
to_simulate_time += simulation_time_step;
}
profile.start_profiling();
while (to_simulate_time < simulation_end_time) {
hipLaunchKernelGGL(( SupplySimulationPreVehiclePassing), dim3(segment_blocks), dim3(segment_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
num_unprocessed_nodes = kNodeSize;
hipLaunchKernelGGL(( SupplySimulationVehiclePassingFirstOptimizeWarp), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
hipMemcpy(data_local->node_status, gpu_data->node_status, sizeof(int)*num_unprocessed_nodes, hipMemcpyDeviceToHost);
updated_count = 0;
for(int i=0; i<num_unprocessed_nodes; i++){
if(data_local->node_status[i]>=0){
data_local->node_status[updated_count] = data_local->node_status[i];
updated_count++;
}
}
num_unprocessed_nodes = updated_count;
hipMemcpy(gpu_data->node_status, data_local->node_status, sizeof(int)*num_unprocessed_nodes, hipMemcpyDeviceToHost);
//int n = 1;
while(num_unprocessed_nodes>0){
//num_processed_nodes = false;
hipMemcpy(gpu_data->node_status, data_local->node_status, sizeof(int)*num_unprocessed_nodes, hipMemcpyDeviceToHost);
hipLaunchKernelGGL(( SupplySimulationVehiclePassingOptimizeWarp), dim3(ceil(num_unprocessed_nodes/node_threads_in_a_block)), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, num_unprocessed_nodes, parameter_setting_on_gpu, vpool_gpu);
hipMemcpy(data_local->node_status, gpu_data->node_status, sizeof(int)*num_unprocessed_nodes, hipMemcpyDeviceToHost);
//rearrange status array
updated_count = 0;
for(int i=0; i<num_unprocessed_nodes; i++){
if(data_local->node_status[i]>=0){
data_local->node_status[updated_count] = data_local->node_status[i];
updated_count++;
}
}
num_unprocessed_nodes = updated_count;
}
to_simulate_time += simulation_time_step;
}
profile.end_profiling();
profile.output();
return true;
}
bool StartSimulationVP() {
TimeTools profile;
//profile.start_profiling();
while (to_simulate_time < 1800) {
hipLaunchKernelGGL(( SupplySimulationPreVehiclePassing), dim3(segment_blocks), dim3(segment_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
hipLaunchKernelGGL(( SupplySimulationVehiclePassingVNode), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
to_simulate_time += simulation_time_step;
}
//profile.end_profiling();
//profile.output();
profile.start_profiling();
while (to_simulate_time < simulation_end_time) {
hipLaunchKernelGGL(( SupplySimulationPreVehiclePassing), dim3(segment_blocks), dim3(segment_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
hipLaunchKernelGGL(( SupplySimulationVehiclePassingVNode), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
to_simulate_time += simulation_time_step;
}
profile.end_profiling();
profile.output();
return true;
}
bool StartSimulationSynch() {
TimeTools profile;
//profile.start_profiling();
while (to_simulate_time < 1800) {
hipLaunchKernelGGL(( SupplySimulationPreVehiclePassing), dim3(segment_blocks), dim3(segment_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
hipLaunchKernelGGL(( SupplySimulationVehiclePassingSynch), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
to_simulate_time += simulation_time_step;
}
//profile.end_profiling();
//profile.output();
profile.start_profiling();
while (to_simulate_time < simulation_end_time) {
hipLaunchKernelGGL(( SupplySimulationPreVehiclePassing), dim3(segment_blocks), dim3(segment_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
hipLaunchKernelGGL(( SupplySimulationVehiclePassingSynch), dim3(node_blocks), dim3(node_threads_in_a_block), 0, stream_gpu_supply, gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
to_simulate_time += simulation_time_step;
}
profile.end_profiling();
profile.output();
return true;
}
/**
* Minor Functions
*/
bool CopySimulatedResultsToCPU(int time_step) {
int index = TimestepToArrayIndex(time_step);
SimulationResults* one = new SimulationResults();
hipMemcpy(one->flow, gpu_data->seg_pool.flow, sizeof(float) * kSegmentSize, hipMemcpyDeviceToHost);
hipMemcpy(one->density, gpu_data->seg_pool.density, sizeof(float) * kSegmentSize, hipMemcpyDeviceToHost);
hipMemcpy(one->speed, gpu_data->seg_pool.speed, sizeof(float) * kSegmentSize, hipMemcpyDeviceToHost);
hipMemcpy(one->queue_length, gpu_data->seg_pool.queue_length, sizeof(float) * kSegmentSize, hipMemcpyDeviceToHost);
hipMemcpy(one->counts, gpu_data->seg_pool.veh_counts, sizeof(int) * kSegmentSize, hipMemcpyDeviceToHost);
simulation_results_pool[index] = one;
return true;
}
bool CopyBufferSimulatedResultsToCPU(int time_step) {
hipMemcpyAsync(one_buffer, simulation_results_buffer_on_gpu, sizeof(SimulationResults) * kGPUToCPUSimulationResultsCopyBufferSize, hipMemcpyDeviceToHost, stream_gpu_io);
for (int i = 0; i < kGPUToCPUSimulationResultsCopyBufferSize; i++) {
int time_index = time_step - (kGPUToCPUSimulationResultsCopyBufferSize - 1) + i;
simulation_results_pool[time_index] = &one_buffer[i];
}
return true;
}
bool OutputSimulatedResults(int time_step) {
//output every a minute
//if(time_step % 60 != 0) return true;
if (simulation_results_pool.find(time_step) == simulation_results_pool.end()) {
std::cerr << "System Error, Try to output time " << time_step << ", while it is not ready!" << std::endl;
return false;
}
int index = time_step;
SimulationResults* one = simulation_results_pool[index];
assert(one != NULL);
if(time_step>=3599){
for (int i = 0; i < kNodeSize; i++) {
if(one->states[i]>0){
std::cout<<"VIRTUAL_NODES:"<<i<<'\n';
}
// int lane_ID = i;
// int lane_Index = lane_ID;
// if(one->counts[lane_Index]>0)
// std::cout << time_step << ":Segment:" << lane_ID << ":(" << one->counts[lane_Index] << ":" << one->flow[lane_Index] << ":" << one->density[lane_Index]
//// << ":" << gpu_data->lane_pool.speed[i] << ":" << gpu_data->lane_pool.queue_length[i] << ":" << gpu_data->lane_pool.empty_space[i] << ")" << endl;
// << ":" << one->speed[lane_Index] << ":" << one->queue_length[lane_Index] << ")" << endl;
}
}
return true;
}
bool OutputBufferedSimulatedResults(int time_step) {
//std::cout << "OutputBufferedSimulatedResults AT time " << time_step << std::endl;
for (int i = 0; i < kGPUToCPUSimulationResultsCopyBufferSize; i++) {
OutputSimulatedResults(time_step + i);
}
return true;
}
|
1fa086059bd1e3dc4f5869fad5ef849cfb356f18.cu
|
/**
*This project targets to check GPU is an option for DynaMIT.
*This project also targets for a paper "Mesoscopic Traffic Simulation on GPU"
*/
#include "../components_on_cpu/network/network.h"
#include "../components_on_cpu/demand/od_pair.h"
#include "../components_on_cpu/demand/od_path.h"
#include "../components_on_cpu/demand/vehicle.h"
#include "../components_on_cpu/util/time_tools.h"
#include "../components_on_cpu/util/string_tools.h"
#include "../components_on_cpu/util/simulation_results.h"
#include "../components_on_cpu/util/shared_cpu_include.h"
#include "../components_on_gpu/on_GPU_kernal_safe.cuh"
#include "../components_on_gpu/supply/on_GPU_memory.h"
#include "../components_on_gpu/supply/on_GPU_vehicle.h"
#include "../components_on_gpu/supply/on_GPU_new_lane_vehicles.h"
#include "../components_on_gpu/util/shared_gpu_include.h"
#include "../components_on_gpu/util/on_gpu_configuration.h"
#include "../components_on_gpu/on_GPU_Macro.h"
#include <math.h>
using namespace std;
/**
* CUDA Execution Configuration
*/
int lane_blocks;
const int lane_threads_in_a_block = 128;
int node_blocks;
const int node_threads_in_a_block = 128;
int segment_blocks;
const int segment_threads_in_a_block = 128;
/*
* Demand
*/
Network* the_network;
std::vector<ODPair*> all_od_pairs;
std::vector<ODPairPATH*> all_od_paths;
std::vector<Vehicle*> all_vehicles;
/*
* Path Input Config
*/
std::string network_file_path = "data_inputs/New_NW_SG/NewNetWork2.dat";
std::string demand_file_path = "data_inputs/New_NW_SG/newDemand.dat";
std::string od_pair_paths_file_path = "data_inputs/New_NW_SG/paths.dat";
/*
* All data in GPU
*/
GPUMemory* gpu_data;
GPUMemory* data_local;
GPUSharedParameter* parameter_setting_on_gpu;
#ifdef ENABLE_CONSTANT_MEMORY
__constant__ GPUSharedParameter data_setting_gpu_constant;
#endif
//A large memory space is pre-defined in order to copy to GPU
GPUVehicle *vpool_cpu;
GPUVehicle *vpool_gpu;
//int *vpool_cpu_index;
//int *vpool_gpu_index;
/**
* Simulation Results
*/
std::string simulation_output_file_path = "output/test3.txt";
std::map<int, SimulationResults*> simulation_results_pool;
ofstream simulation_results_output_file;
//buffer is only used when kGPUToCPUSimulationResultsCopyBufferSize > 1
SimulationResults* simulation_results_buffer_on_gpu;
//Used for buffer at CPU side
SimulationResults* one_buffer = NULL;
/*
* GPU Streams
* stream1: GPU Supply Simulation
*/
cudaStream_t stream_gpu_supply;
cudaStream_t stream_gpu_io;
cudaEvent_t gpu_supply_one_tick_simulation_done_trigger_event;
/*
* Time Management
*/
long simulation_start_time;
long simulation_end_time;
long simulation_time_step;
/*
* simulation_time is already finished time;
* simulation_time + 1 might be the current simulating time on GPU
*/
long to_simulate_time;
/*
* simulation_results_outputed_time is already outputted time;
* simulation_results_outputed_time + 1 might be the outputing time on CPU
*/
long to_output_simulation_result_time;
/*
*/
//std::map<int, int> link_ID_to_link_Index;
//std::map<int, int> link_Index_to_link_ID;
//std::map<int, int> node_ID_to_node_Index;
//std::map<int, int> node_index_to_node_ID;
/*
* Define Major Functions
*/
bool InitParams(int argc, char* argv[]);
bool LoadInNetwork();
bool LoadInDemand();
bool InitilizeCPU();
bool InitilizeGPU();
bool InitGPUParameterSetting(GPUSharedParameter* data_setting_gpu);
bool InitGPUData(GPUMemory* data_local);
bool StartSimulation();
bool StartSimulationOptimizeWarp();
bool StartSimulationVP();
bool StartSimulationSynch();
bool DestroyResources();
/*
* Define Helper Functions
*/
StringTools* str_tools;
bool CopySimulatedResultsToCPU(int time_step);
bool CopyBufferSimulatedResultsToCPU(int time_step);
bool OutputSimulatedResults(int time_step);
bool OutputBufferedSimulatedResults(int time_step);
inline int TimestepToArrayIndex(int time_step) {
return (time_step - kStartTimeSteps) / kUnitTimeStep;
}
/*
* MAIN
*/
int main(int argc, char* argv[]) {
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
cout << "GPU Program Starts" << endl;
if (InitParams(argc, argv) == false) {
cout << "InitParams fails" << endl;
return 0;
}
if (LoadInNetwork() == false) {
cout << "Loading network fails" << endl;
return 0;
}
if (LoadInDemand() == false) {
cout << "Loading demand fails" << endl;
return 0;
}
cout<<"Finished loading input files"<<endl;
if (InitilizeCPU() == false) {
cout << "InitilizeCPU fails" << endl;
return 0;
}
cout<<"Finished initializing CPU"<<endl;
if (InitilizeGPU() == false) {
cout << "InitilizeGPU fails" << endl;
return 0;
}
cout << "Finished initializing GPU"<<endl;
//create streams
//cudaStreamCreate(&stream_gpu_supply);
//cudaStreamCreate(&stream_gpu_io);
//create a event
//cudaEventCreate(&gpu_supply_one_tick_simulation_done_trigger_event);
std::cout << "Simulation Starts" << std::endl;
//TimeTools profile;
//profile.start_profiling();
//Start Simulation (ETSF implemented inside)
if (StartSimulationOptimizeWarp() == false) {
cout << "Simulation Fails" << endl;
DestroyResources();
return 0;
}
//profile.end_profiling();
//profile.output();
DestroyResources();
cout << "Simulation Succeed!" << endl;
#ifdef _WIN32
system("pause");
#endif
return 0;
}
/**
*
*/
bool InitParams(int argc, char* argv[]) {
if (argc == 4) {
network_file_path = argv[1];
demand_file_path = argv[2];
od_pair_paths_file_path = argv[3];
std::cout << "network_file_path: "<<network_file_path<< std::endl;
std::cout << "demand_file_path: "<<demand_file_path<< std::endl;
std::cout << "od_pair_paths_file_path: "<<od_pair_paths_file_path<< std::endl;
}
return true;
}
bool LoadInNetwork() {
the_network = new Network();
return Network::load_network(*the_network, network_file_path);
}
bool LoadInDemand() {
//if (ODPair::load_in_all_ODs(all_od_pairs, od_pair_file_path) == false) {
// return false;
//}
if (ODPairPATH::load_in_all_OD_Paths(all_od_paths, od_pair_paths_file_path) == false) {
return false;
}
if (Vehicle::load_in_all_vehicles(all_vehicles, demand_file_path) == false) {
return false;
}
return true;
}
bool InitilizeCPU() {
simulation_start_time = 0;
simulation_end_time = kEndTimeSteps-kStartTimeSteps; // 1 hour
simulation_time_step = kUnitTimeStep;
assert(simulation_time_step == 1);
to_simulate_time = 0;
to_output_simulation_result_time = 0;
lane_blocks = kLaneSize / lane_threads_in_a_block + 1;
node_blocks = kNodeSize / node_threads_in_a_block + 1;
segment_blocks = kSegmentSize / segment_threads_in_a_block + 1;
simulation_results_pool.clear();
simulation_results_output_file.open(simulation_output_file_path.c_str());
simulation_results_output_file << "##TIME STEP" << ":Segment ID:" << ":(" << "COUNTS" << ":" << "flow" << ":" << "density" << ":" << "speed" << ":" << "queue_length" << ")" << endl;
str_tools = new StringTools();
return true;
}
bool InitilizeGPU() {
gpu_data = NULL;
parameter_setting_on_gpu = NULL;
data_local = new GPUMemory();
InitGPUData(data_local);
GPUSharedParameter* data_setting_gpu = new GPUSharedParameter();
InitGPUParameterSetting(data_setting_gpu);
#ifdef ENABLE_CONSTANT_MEMORY
GPUSharedParameter data_setting_cpu_constant;
InitGPUParameterSetting(&data_setting_cpu_constant);
#endif
//apply memory on GPU
size_t memory_space_for_vehicles = all_vehicles.size() * sizeof(GPUVehicle);
if (cudaMalloc((void**) &vpool_gpu, memory_space_for_vehicles) != cudaSuccess) {
cerr << "cudaMalloc((void**) &vpool_gpu, memory_space_for_vehicles) failed" << endl;
}
// size_t memory_space_for_rebuild_index = kTotalTimeSteps * kLaneSize * kLaneInputCapacityPerTimeStep * sizeof(int);
// if (cudaMalloc((void**) &vpool_gpu_index, memory_space_for_rebuild_index) != cudaSuccess) {
// cerr << "cudaMalloc((void**) &vpool_gpu_index, memory_space_for_rebuild_index) failed" << endl;
// }
if (cudaMalloc((void**) &gpu_data, data_local->total_size()) != cudaSuccess) {
cerr << "cudaMalloc(&gpu_data, sizeof(GPUMemory)) failed" << endl;
}
if (cudaMalloc((void**) ¶meter_setting_on_gpu, sizeof(GPUSharedParameter)) != cudaSuccess) {
cerr << "cudaMalloc(&GPUSharedParameter, sizeof(GPUSharedParameter)) failed" << endl;
}
#ifdef ENABLE_CONSTANT_MEMORY
cudaMemcpyToSymbol(data_setting_gpu_constant, &data_setting_cpu_constant, sizeof(GPUSharedParameter));
#endif
//apply a buffer space for GPU outputs
if (kGPUToCPUSimulationResultsCopyBufferSize > 1) {
size_t memory_space_for_buffer_outputs = sizeof(SimulationResults) * kGPUToCPUSimulationResultsCopyBufferSize;
if (cudaMalloc((void**) &simulation_results_buffer_on_gpu, memory_space_for_buffer_outputs) != cudaSuccess) {
cerr << "cudaMalloc((void**) &simulation_results_buffer_on_gpu, memory_space_for_buffer_outputs) failed" << endl;
}
}
cudaMemcpy(vpool_gpu, vpool_cpu, memory_space_for_vehicles, cudaMemcpyHostToDevice);
// cudaMemcpy(vpool_gpu_index, vpool_cpu_index, memory_space_for_rebuild_index, cudaMemcpyHostToDevice);
cudaMemcpy(gpu_data, data_local, data_local->total_size(), cudaMemcpyHostToDevice);
cudaMemcpy(parameter_setting_on_gpu, data_setting_gpu, sizeof(GPUSharedParameter), cudaMemcpyHostToDevice);
// int GRID_SIZE = 1;
// int BLOCK_SIZE = kTotalTimeSteps;
//
// LinkGPUData<<<GRID_SIZE, BLOCK_SIZE>>>(gpu_data, kTotalTimeSteps, vpool_gpu, vpool_gpu_index, parameter_seeting_on_gpu);
//
// //wait for all CUDA related operations to finish;
// std::cout << "LinkGPUData begins" << std::endl;
// cudaDeviceSynchronize();
// std::cout << "LinkGPUData ends" << std::endl;
#ifdef ENABLE_OUTPUT_GPU_BUFFER
cudaMallocHost((void **) &one_buffer, sizeof(SimulationResults) * kGPUToCPUSimulationResultsCopyBufferSize);
#endif
return true;
}
/*
* Copy the parameter setting to GPU memory
*/
bool InitGPUParameterSetting(GPUSharedParameter* data_setting_gpu) {
data_setting_gpu->kOnGPULaneSize = kLaneSize;
data_setting_gpu->kOnGPUNodeSize = kNodeSize;
data_setting_gpu->kOnGPUSegmentSize = kSegmentSize;
data_setting_gpu->kOnGPUEndTimeStep = kEndTimeSteps;
data_setting_gpu->kOnGPUStartTimeStep = kStartTimeSteps;
data_setting_gpu->kOnGPUTotalTimeSteps = kTotalTimeSteps;
data_setting_gpu->kOnGPUUnitTimeStep = kUnitTimeStep;
data_setting_gpu->kOnGPUVehicleLength = kVehicleLength;
data_setting_gpu->kOnGPUMaxRouteLength = kMaxRouteLength;
data_setting_gpu->kOnGPUGPUToCPUSimulationResultsCopyBufferSize = kGPUToCPUSimulationResultsCopyBufferSize;
data_setting_gpu->kOnGPUTotalVehicleSpace = kTotalVehicleSpace;
return true;
}
/*
* Build a GPU data from the network data
*/
bool InitGPUData(GPUMemory* data_local) {
data_local->num_processed_blocks = 0;
/**
* First Part: Lane
*/
for (int i = 0; i < the_network->lane_size; i++) {
Lane* one_lane = the_network->all_lanes[i];
//
assert(one_lane->lane_id == i);
data_local->lane_pool.lane_ID[i] = one_lane->lane_id;
data_local->lane_pool.Seg_ID[i] = one_lane->seg_id;
data_local->lane_pool.Tp[i] = simulation_start_time - simulation_time_step;
data_local->lane_pool.Tq[i] = simulation_start_time - simulation_time_step;
data_local->lane_pool.accumulated_offset[i] = 0;
//data_local->lane_pool.flow[i] = 0;
//data_local->lane_pool.density[i] = 0;
//data_local->lane_pool.speed[i] = 0;
//data_local->lane_pool.queue_length[i] = 0;
/*
* for density calculation
*/
data_local->lane_pool.max_vehicles[i] = one_lane->max_vehs; //number of vehicles
/*
* for speed calculation
*/
data_local->lane_pool.vehicle_counts[i] = 0;
data_local->lane_pool.vehicle_passed_to_the_lane_counts[i] = 0;
data_local->lane_pool.leaving_vehicle_counts[i] = 0;
data_local->lane_pool.vehicle_start_index[i] = one_lane->veh_start_index;
//data_local->lane_pool.first_veh_index[i] = 0;
data_local->lane_pool.buffered_first_veh_index[i] = 0;
data_local->lane_pool.buffered_vehicle_counts[i] = 0;
data_local->lane_pool.ring_buffer_size[i] = one_lane->max_vehs + kMaxSegmentInputCapacityPerTimeStep;
data_local->lane_pool.queue_length[i] = 0;
}
std::cout << "Lane Pool size: "<<sizeof(data_local->lane_pool) << std::endl;
/*
* Segment
*/
for(int i=0; i<the_network->seg_size; i++){
Segment* one_seg = the_network->all_segs[i];
data_local->seg_pool.seg_ID[i] = one_seg->seg_id;
data_local->seg_pool.lane_start_index[i] = one_seg->lane_start_index;
data_local->seg_pool.num_lanes[i] = one_seg->num_lanes;
data_local->seg_pool.lane_end_index[i] = one_seg->lane_start_index + one_seg->num_lanes - 1;
data_local->seg_pool.alpha[i] = one_seg->alpha;
data_local->seg_pool.beta[i] = one_seg->beta;
data_local->seg_pool.min_density[i] = one_seg->min_density;
data_local->seg_pool.max_density[i] = one_seg->max_density;
data_local->seg_pool.MIN_speed[i] = one_seg->MIN_speed;
data_local->seg_pool.MAX_speed[i] = one_seg->MAX_speed;
data_local->seg_pool.input_capacity[i] = one_seg->capacity;
data_local->seg_pool.output_capacity[i] = one_seg->capacity;
data_local->seg_pool.capacity[i] = one_seg->capacity;
data_local->seg_pool.seg_length[i] = one_seg->length;
data_local->seg_pool.density[i] = 0;
data_local->seg_pool.speed[i] = 0;
data_local->seg_pool.queue_length[i] = 0;
data_local->seg_pool.flow[i] = 0;
data_local->seg_pool.empty_space[i] = (int)(one_seg->length/kVehicleLength)*one_seg->num_lanes;
data_local->seg_pool.veh_counts[i] = 0;
data_local->seg_pool.max_vehicles[i] = one_seg->num_lanes*(int)(one_seg->length/kVehicleLength);
data_local->seg_pool.processed[i] = one_seg->num_lanes;
for (int j = 0; j < kTotalTimeSteps; j++) {
data_local->seg_pool.speed_history[j][i] = -1;
}
//it is assumed that QUEUE_LENGTH_HISTORY = 4;
// assert(kQueueLengthHistory == 4);
// float weight[kQueueLengthHistory];
// weight[0] = 1.0;
// weight[1] = 0;
// weight[2] = 0;
// weight[3] = 0;
//
// for (int j = 0; j < kQueueLengthHistory; j++) {
// data_local->seg_pool.his_queue_length[j][i] = -1;
// data_local->seg_pool.his_queue_length_weighting[j][i] = weight[j];
// }
//
// data_local->seg_pool.predicted_empty_space[i] = 0;
// data_local->seg_pool.predicted_queue_length[i] = 0;
// data_local->seg_pool.last_time_empty_space[i] = 0;
}
std::cout << "Segment Pool size: "<<sizeof(data_local->seg_pool) << std::endl;
/**
* Third Part: Node
*/
for (int i = 0; i < the_network->node_size; i++) {
Node* one_node = the_network->all_nodes[i];
data_local->node_pool.node_ID[i] = one_node->node_id;
data_local->node_pool.upstream_seg_start_index[i] = one_node->up_seg_start_index;
data_local->node_pool.upstream_seg_end_index[i] = one_node->up_seg_end_index;
data_local->node_pool.vnode[i] = one_node->vnode;
//data_local->node_pool.enode[i] = one_node->enode;
if(one_node->up_seg_start_index>=0){
data_local->node_pool.upstream_start_lane_index[i] = data_local->seg_pool.lane_start_index[one_node->up_seg_start_index];
data_local->node_pool.upstream_end_lane_index[i] = data_local->seg_pool.lane_end_index[one_node->up_seg_end_index];
}
}
std::cout << "Node Pool Size: "<<sizeof(data_local->node_pool)<<std::endl;
/**
* Third Part:
*/
//Init VehiclePool
for (int i = kStartTimeSteps; i < kEndTimeSteps; i += kUnitTimeStep) {
for (int j = 0; j < kSegmentSize; j++) {
data_local->new_vehicles_every_time_step[i-kStartTimeSteps].new_vehicle_size[j] = 0;
data_local->new_vehicles_every_time_step[i-kStartTimeSteps].seg_ID[j] = -1;
}
}
//init host vehicle pool data /*xiaosong*/
int memory_space_for_vehicles = all_vehicles.size() * sizeof(GPUVehicle);
vpool_cpu = (GPUVehicle*) malloc(memory_space_for_vehicles);
if (vpool_cpu == NULL)
exit(1);
for (int i = kStartTimeSteps; i < kEndTimeSteps; i += kUnitTimeStep) {
for (int j = 0; j < kSegmentSize; j++) {
for (int z = 0; z < kMaxSegmentInputCapacityPerTimeStep; z++) {
//init as no vehicle
data_local->new_vehicles_every_time_step[i-kStartTimeSteps].new_vehicles[j][z] = -1;
}
}
}
// int nVehiclePerTick = kLaneInputCapacityPerTimeStep * kLaneSize;
// std::cout << "init all_vehicles" << std::endl;
int total_inserted_vehicles = 0;
//Insert Vehicles
for (int i = 0; i < all_vehicles.size(); i++) {
Vehicle* one_vehicle = all_vehicles[i];
int time_index = one_vehicle->entry_time;
int time_index_convert = TimestepToArrayIndex(time_index);
//assert(time_index == time_index_convert);
//try to load vehicles beyond the simulation border
if (time_index_convert >= kTotalTimeSteps || time_index_convert<0)
continue;
int seg_ID = all_od_paths[one_vehicle->path_id]->seg_ids[0];
int seg_Index = seg_ID; //the same for the SG Expressway case
if (data_local->new_vehicles_every_time_step[time_index_convert].new_vehicle_size[seg_Index] < data_local->seg_pool.capacity[seg_Index]) {
int last_vehicle_index = data_local->new_vehicles_every_time_step[time_index_convert].new_vehicle_size[seg_Index];
vpool_cpu[total_inserted_vehicles].vehicle_ID = one_vehicle->vehicle_id;
vpool_cpu[total_inserted_vehicles].entry_time = time_index_convert;
//vpool_cpu[i].current_seg_ID = seg_Index;
//assert(kMaxRouteLength > all_od_paths[one_vehicle->path_id]->seg_ids.size());
int max_copy_length = kMaxRouteLength > all_od_paths[one_vehicle->path_id]->seg_ids.size() ? all_od_paths[one_vehicle->path_id]->seg_ids.size() : kMaxRouteLength;
for (int p = 0; p < max_copy_length; p++) {
vpool_cpu[total_inserted_vehicles].path_code[p] = all_od_paths[one_vehicle->path_id]->seg_ids[p];
}
//ready for the next lane, so next_path_index is set to 1, if the next_path_index == whole_path_length, it means cannot find path any more, can exit;
vpool_cpu[total_inserted_vehicles].next_path_index = 1;
vpool_cpu[total_inserted_vehicles].whole_path_length = all_od_paths[one_vehicle->path_id]->seg_ids.size();
//will be re-writen by GPU
//insert new vehicle
data_local->new_vehicles_every_time_step[time_index_convert].new_vehicles[seg_Index][last_vehicle_index] = total_inserted_vehicles;//vpool_cpu[i].vehicle_ID;
data_local->new_vehicles_every_time_step[time_index_convert].new_vehicle_size[seg_Index]++;
total_inserted_vehicles++;
} else {
// std::cout << "Loading Vehicles Exceeds The Loading Capacity: Time:" << time_index_covert << ", Lane_ID:" << lane_ID << ",i:" << i << ",ID:" << one_vehicle->vehicle_id << std::endl;
}
}
std::cout << "init all_vehicles:" << total_inserted_vehicles << std::endl;
std::cout << "vpool.size():" << total_inserted_vehicles * sizeof(GPUVehicle)<< std::endl;
std::cout << "total global mem: "<< data_local->total_size()<<std::endl;
return true;
}
bool DestroyResources() {
simulation_results_output_file.flush();
simulation_results_output_file.close();
if (vpool_cpu != NULL)
delete vpool_cpu;
if (str_tools != NULL)
delete str_tools;
cudaDeviceReset();
return true;
}
bool StartSimulation() {
TimeTools profile;
//profile.start_profiling();
while (to_simulate_time < 1800) {
SupplySimulationPreVehiclePassing<<<segment_blocks, segment_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
//SupplySimulationVehiclePassingVNode<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
bool num_processed_nodes = true;
SupplySimulationVehiclePassingFirst<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
cudaMemcpy(&num_processed_nodes, &gpu_data->num_processed_blocks, sizeof(bool), cudaMemcpyDeviceToHost);
//int n = 1;
while(num_processed_nodes){
num_processed_nodes = false;
cudaMemcpy(&gpu_data->num_processed_blocks, &num_processed_nodes, sizeof(bool), cudaMemcpyHostToDevice);
SupplySimulationVehiclePassing<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
cudaMemcpy(&num_processed_nodes, &gpu_data->num_processed_blocks, sizeof(bool), cudaMemcpyDeviceToHost);
//cudaMemcpy(nodes_processed, gpu_data->seg_pool.processed, sizeof(bool)*kSegmentSize, cudaMemcpyDeviceToHost);
// for(int i=0; i<kSegmentSize; i++){
// if(nodes_processed[i]>0){
// std::cout<<"time "<<to_simulate_time<<" iter "<<n<<" node "<<i<<"\n";
// }
// }
//std::cout<<"Interval "<<n<<' '<<num_processed_nodes<<'\n';
//n++;
}
//std::cout<<n<<'\n';
//SupplySimulationAfterVehiclePassing<<<lane_blocks, lane_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kLaneSize, parameter_setting_on_gpu);
to_simulate_time += simulation_time_step;
}
//profile.end_profiling();
//profile.output();
profile.start_profiling();
while (to_simulate_time < simulation_end_time) {
SupplySimulationPreVehiclePassing<<<segment_blocks, segment_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
//SupplySimulationVehiclePassingVNode<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
bool num_processed_nodes = true;
SupplySimulationVehiclePassingFirst<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
cudaMemcpy(&num_processed_nodes, &gpu_data->num_processed_blocks, sizeof(bool), cudaMemcpyDeviceToHost);
//int n = 1;
while(num_processed_nodes){
num_processed_nodes = false;
cudaMemcpy(&gpu_data->num_processed_blocks, &num_processed_nodes, sizeof(bool), cudaMemcpyHostToDevice);
SupplySimulationVehiclePassing<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
cudaMemcpy(&num_processed_nodes, &gpu_data->num_processed_blocks, sizeof(bool), cudaMemcpyDeviceToHost);
//cudaMemcpy(nodes_processed, gpu_data->seg_pool.processed, sizeof(bool)*kSegmentSize, cudaMemcpyDeviceToHost);
// for(int i=0; i<kSegmentSize; i++){
// if(nodes_processed[i]>0){
// std::cout<<"time "<<to_simulate_time<<" iter "<<n<<" node "<<i<<"\n";
// }
// }
//std::cout<<"Interval "<<n<<' '<<num_processed_nodes<<'\n';
//n++;
}
//std::cout<<n<<'\n';
//SupplySimulationAfterVehiclePassing<<<lane_blocks, lane_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kLaneSize, parameter_setting_on_gpu);
to_simulate_time += simulation_time_step;
}
profile.end_profiling();
profile.output();
return true;
}
bool StartSimulationOptimizeWarp() {
TimeTools profile;
int num_unprocessed_nodes;
int updated_count;
while (to_simulate_time < 1800) {
//std::cout<<to_simulate_time<<"\n";
SupplySimulationPreVehiclePassing<<<segment_blocks, segment_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
num_unprocessed_nodes = kNodeSize;
SupplySimulationVehiclePassingFirstOptimizeWarp<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
cudaMemcpy(data_local->node_status, gpu_data->node_status, sizeof(int)*num_unprocessed_nodes, cudaMemcpyDeviceToHost);
updated_count = 0;
for(int i=0; i<num_unprocessed_nodes; i++){
if(data_local->node_status[i]>=0){
data_local->node_status[updated_count] = data_local->node_status[i];
updated_count++;
}
}
num_unprocessed_nodes = updated_count;
//std::cout<<"After: "<<num_unprocessed_nodes<<"\n";
cudaMemcpy(gpu_data->node_status, data_local->node_status, sizeof(int)*num_unprocessed_nodes, cudaMemcpyDeviceToHost);
//int n = 1;
while(num_unprocessed_nodes>0){
//num_processed_nodes = false;
cudaMemcpy(gpu_data->node_status, data_local->node_status, sizeof(int)*num_unprocessed_nodes, cudaMemcpyDeviceToHost);
SupplySimulationVehiclePassingOptimizeWarp<<<ceil(num_unprocessed_nodes/node_threads_in_a_block), node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, num_unprocessed_nodes, parameter_setting_on_gpu, vpool_gpu);
cudaMemcpy(data_local->node_status, gpu_data->node_status, sizeof(int)*num_unprocessed_nodes, cudaMemcpyDeviceToHost);
//rearrange status array
updated_count = 0;
for(int i=0; i<num_unprocessed_nodes; i++){
if(data_local->node_status[i]>=0){
data_local->node_status[updated_count] = data_local->node_status[i];
updated_count++;
}
}
num_unprocessed_nodes = updated_count;
//std::cout<<to_simulate_time<<" "<<num_unprocessed_nodes<<'\n';
}
to_simulate_time += simulation_time_step;
}
profile.start_profiling();
while (to_simulate_time < simulation_end_time) {
SupplySimulationPreVehiclePassing<<<segment_blocks, segment_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
num_unprocessed_nodes = kNodeSize;
SupplySimulationVehiclePassingFirstOptimizeWarp<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
cudaMemcpy(data_local->node_status, gpu_data->node_status, sizeof(int)*num_unprocessed_nodes, cudaMemcpyDeviceToHost);
updated_count = 0;
for(int i=0; i<num_unprocessed_nodes; i++){
if(data_local->node_status[i]>=0){
data_local->node_status[updated_count] = data_local->node_status[i];
updated_count++;
}
}
num_unprocessed_nodes = updated_count;
cudaMemcpy(gpu_data->node_status, data_local->node_status, sizeof(int)*num_unprocessed_nodes, cudaMemcpyDeviceToHost);
//int n = 1;
while(num_unprocessed_nodes>0){
//num_processed_nodes = false;
cudaMemcpy(gpu_data->node_status, data_local->node_status, sizeof(int)*num_unprocessed_nodes, cudaMemcpyDeviceToHost);
SupplySimulationVehiclePassingOptimizeWarp<<<ceil(num_unprocessed_nodes/node_threads_in_a_block), node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, num_unprocessed_nodes, parameter_setting_on_gpu, vpool_gpu);
cudaMemcpy(data_local->node_status, gpu_data->node_status, sizeof(int)*num_unprocessed_nodes, cudaMemcpyDeviceToHost);
//rearrange status array
updated_count = 0;
for(int i=0; i<num_unprocessed_nodes; i++){
if(data_local->node_status[i]>=0){
data_local->node_status[updated_count] = data_local->node_status[i];
updated_count++;
}
}
num_unprocessed_nodes = updated_count;
}
to_simulate_time += simulation_time_step;
}
profile.end_profiling();
profile.output();
return true;
}
bool StartSimulationVP() {
TimeTools profile;
//profile.start_profiling();
while (to_simulate_time < 1800) {
SupplySimulationPreVehiclePassing<<<segment_blocks, segment_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
SupplySimulationVehiclePassingVNode<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
to_simulate_time += simulation_time_step;
}
//profile.end_profiling();
//profile.output();
profile.start_profiling();
while (to_simulate_time < simulation_end_time) {
SupplySimulationPreVehiclePassing<<<segment_blocks, segment_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
SupplySimulationVehiclePassingVNode<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
to_simulate_time += simulation_time_step;
}
profile.end_profiling();
profile.output();
return true;
}
bool StartSimulationSynch() {
TimeTools profile;
//profile.start_profiling();
while (to_simulate_time < 1800) {
SupplySimulationPreVehiclePassing<<<segment_blocks, segment_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
SupplySimulationVehiclePassingSynch<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
to_simulate_time += simulation_time_step;
}
//profile.end_profiling();
//profile.output();
profile.start_profiling();
while (to_simulate_time < simulation_end_time) {
SupplySimulationPreVehiclePassing<<<segment_blocks, segment_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kSegmentSize, parameter_setting_on_gpu, vpool_gpu);
SupplySimulationVehiclePassingSynch<<<node_blocks, node_threads_in_a_block, 0, stream_gpu_supply>>>(gpu_data, to_simulate_time, kNodeSize, parameter_setting_on_gpu, vpool_gpu);
to_simulate_time += simulation_time_step;
}
profile.end_profiling();
profile.output();
return true;
}
/**
* Minor Functions
*/
bool CopySimulatedResultsToCPU(int time_step) {
int index = TimestepToArrayIndex(time_step);
SimulationResults* one = new SimulationResults();
cudaMemcpy(one->flow, gpu_data->seg_pool.flow, sizeof(float) * kSegmentSize, cudaMemcpyDeviceToHost);
cudaMemcpy(one->density, gpu_data->seg_pool.density, sizeof(float) * kSegmentSize, cudaMemcpyDeviceToHost);
cudaMemcpy(one->speed, gpu_data->seg_pool.speed, sizeof(float) * kSegmentSize, cudaMemcpyDeviceToHost);
cudaMemcpy(one->queue_length, gpu_data->seg_pool.queue_length, sizeof(float) * kSegmentSize, cudaMemcpyDeviceToHost);
cudaMemcpy(one->counts, gpu_data->seg_pool.veh_counts, sizeof(int) * kSegmentSize, cudaMemcpyDeviceToHost);
simulation_results_pool[index] = one;
return true;
}
bool CopyBufferSimulatedResultsToCPU(int time_step) {
cudaMemcpyAsync(one_buffer, simulation_results_buffer_on_gpu, sizeof(SimulationResults) * kGPUToCPUSimulationResultsCopyBufferSize, cudaMemcpyDeviceToHost, stream_gpu_io);
for (int i = 0; i < kGPUToCPUSimulationResultsCopyBufferSize; i++) {
int time_index = time_step - (kGPUToCPUSimulationResultsCopyBufferSize - 1) + i;
simulation_results_pool[time_index] = &one_buffer[i];
}
return true;
}
bool OutputSimulatedResults(int time_step) {
//output every a minute
//if(time_step % 60 != 0) return true;
if (simulation_results_pool.find(time_step) == simulation_results_pool.end()) {
std::cerr << "System Error, Try to output time " << time_step << ", while it is not ready!" << std::endl;
return false;
}
int index = time_step;
SimulationResults* one = simulation_results_pool[index];
assert(one != NULL);
if(time_step>=3599){
for (int i = 0; i < kNodeSize; i++) {
if(one->states[i]>0){
std::cout<<"VIRTUAL_NODES:"<<i<<'\n';
}
// int lane_ID = i;
// int lane_Index = lane_ID;
// if(one->counts[lane_Index]>0)
// std::cout << time_step << ":Segment:" << lane_ID << ":(" << one->counts[lane_Index] << ":" << one->flow[lane_Index] << ":" << one->density[lane_Index]
//// << ":" << gpu_data->lane_pool.speed[i] << ":" << gpu_data->lane_pool.queue_length[i] << ":" << gpu_data->lane_pool.empty_space[i] << ")" << endl;
// << ":" << one->speed[lane_Index] << ":" << one->queue_length[lane_Index] << ")" << endl;
}
}
return true;
}
bool OutputBufferedSimulatedResults(int time_step) {
//std::cout << "OutputBufferedSimulatedResults AT time " << time_step << std::endl;
for (int i = 0; i < kGPUToCPUSimulationResultsCopyBufferSize; i++) {
OutputSimulatedResults(time_step + i);
}
return true;
}
|
1cccfb2b6895a5c3dc90fa08df461760c0304e22.hip
|
// !!! This is a file automatically generated by hipify!!!
/*************************************************************
*
* kpp_integrate_cuda_prototype.cu
* Prototype file for kpp CUDA kernel
*
* Copyright 2016 The Cyprus Institute
*
* Developers: Michail Alvanos - [email protected]
* Giannis Ashiotis
* Theodoros Christoudias - [email protected]
*
********************************************************************/
#include <stdio.h>
#include <unistd.h>
#include "hip/hip_runtime.h"
#define NSPEC 142
#define NVAR 139
#define NFIX 3
#define NREACT 310
#define LU_NONZERO 1486
#define NBSIZE 523
#define BLOCKSIZE 64
//#define MAX_VL_GLO 12288 /* elements that will pass in each call */
#define REDUCTION_SIZE_1 64
#define REDUCTION_SIZE_2 32
#define R_gas 8.3144621
#define N_A 6.02214129e+23
#define atm2Pa 101325.0
#define ip_O2 0
#define ip_O3P 1
#define ip_O1D 2
#define ip_H2O2 3
#define ip_NO2 4
#define ip_NO2O 5
#define ip_NOO2 6
#define ip_N2O5 7
#define ip_HNO3 8
#define ip_HNO4 9
#define ip_PAN 10
#define ip_HONO 11
#define ip_CH3OOH 12
#define ip_COH2 13
#define ip_CHOH 14
#define ip_CH3CO3H 15
#define ip_CH3CHO 16
#define ip_CH3COCH3 17
#define ip_MGLYOX 18
#define ip_HOCl 19
#define ip_OClO 20
#define ip_Cl2O2 21
#define ip_ClNO3 22
#define ip_ClNO2 23
#define ip_Cl2 24
#define ip_BrO 25
#define ip_HOBr 26
#define ip_BrCl 27
#define ip_BrNO3 28
#define ip_BrNO2 29
#define ip_Br2 30
#define ip_CCl4 31
#define ip_CH3Cl 32
#define ip_CH3CCl3 33
#define ip_CFCl3 34
#define ip_CF2Cl2 35
#define ip_CH3Br 36
#define ip_CF2ClBr 37
#define ip_CF3Br 38
#define ip_CH3I 39
#define ip_C3H7I 40
#define ip_CH2ClI 41
#define ip_CH2I2 42
#define ip_IO 43
#define ip_HOI 44
#define ip_I2 45
#define ip_ICl 46
#define ip_IBr 47
#define ip_INO2 48
#define ip_INO3 49
#define ip_SO2 50
#define ip_SO3 51
#define ip_OCS 52
#define ip_CS2 53
#define ip_H2O 54
#define ip_N2O 55
#define ip_NO 56
#define ip_CO2 57
#define ip_HCl 58
#define ip_CHCl2Br 59
#define ip_CHClBr2 60
#define ip_CH2ClBr 61
#define ip_CH2Br2 62
#define ip_CHBr3 63
#define ip_SF6 64
#define ip_NO3NOO 65
#define ip_ClONO2 66
#define ip_MACR 67
#define ip_MVK 68
#define ip_GLYOX 69
#define ip_HOCH2CHO 70
#define ip_CH4 71
#define ip_O2_b1b2 72
#define ip_O2_b1 73
#define ip_O2_b2 74
#define ip_O3PO1D 75
#define ip_O3Pp 76
#define ip_H2O1D 77
#define ip_N2 78
#define ip_N2_b1 79
#define ip_N2_b2 80
#define ip_N2_b3 81
#define ip_NN2D 82
#define ip_NOp 83
#define ip_Op_em 84
#define ip_O2p_em 85
#define ip_Op_O_em 86
#define ip_N2p_em 87
#define ip_Np_N_em 88
#define ip_Np_N2D_em 89
#define ip_N_N2D_em 90
#define ip_Op_em_b 91
#define ip_se_O2_b1 92
#define ip_se_O2_b2 93
#define ip_se_N2_b1 94
#define ip_se_N2_b2 95
#define ip_se_N2_b3 96
#define ip_se_N2_b4 97
#define ip_se_Op_em 98
#define ip_O2_aurq 99
#define ip_N2_aurq 100
#define ip_H2SO4 101
#define ip_C3O2 102
#define ip_CH3NO3 103
#define ip_CH3O2NO2 104
#define ip_CH3ONO 105
#define ip_CH3O2 106
#define ip_HCOOH 107
#define ip_HO2NO2 108
#define ip_OHNO3 109
#define ip_qqqdummy 110
#define ip_CH3OCl 111
#define ip_MEO2NO2 112
#define ip_CHF2Cl 113
#define ip_F113 114
#define ip_C2H5NO3 115
#define ip_NOA 116
#define ip_MEKNO3 117
#define ip_BENZAL 118
#define ip_HOPh3Me2NO2 119
#define ip_HOC6H4NO2 120
#define ip_CH3CHO2VINY 121
#define ip_CH3COCO2H 122
#define ip_IPRCHO2HCO 123
#define ip_C2H5CHO2HCO 124
#define ip_C2H5CHO2ENOL 125
#define ip_C3H7CHO2HCO 126
#define ip_C3H7CHO2VINY 127
#define ip_PeDIONE24 128
#define ip_PINAL2HCO 129
#define ip_PINAL2ENOL 130
#define ip_CF2ClCFCl2 131
#define ip_CH3CFCl2 132
#define ip_CF3CF2Cl 133
#define ip_CF2ClCF2Cl 134
#define ip_CHCl3 135
#define ip_CH2Cl2 136
#define ip_HO2 137
#define ip_ClO 138
#define ind_BrNO2 0
#define ind_CF2ClBr 1
#define ind_CF3Br 2
#define ind_CH3I 3
#define ind_O3s 4
#define ind_CF2ClBr_c 5
#define ind_CF3Br_c 6
#define ind_LCARBON 7
#define ind_LFLUORINE 8
#define ind_LCHLORINE 9
#define ind_CH3SO3H 10
#define ind_H2SO4 11
#define ind_NO3m_cs 12
#define ind_Hp_cs 13
#define ind_Dummy 14
#define ind_CFCl3_c 15
#define ind_CF2Cl2_c 16
#define ind_N2O_c 17
#define ind_CH3CCl3_c 18
#define ind_LO3s 19
#define ind_LossHO2 20
#define ind_LossO1D 21
#define ind_LossO3 22
#define ind_LossO3Br 23
#define ind_LossO3Cl 24
#define ind_LossO3H 25
#define ind_LossO3N 26
#define ind_LossO3O 27
#define ind_LossO3R 28
#define ind_LossOH 29
#define ind_ProdHO2 30
#define ind_ProdLBr 31
#define ind_ProdLCl 32
#define ind_ProdMeO2 33
#define ind_ProdO3 34
#define ind_ProdRO2 35
#define ind_ProdSBr 36
#define ind_ProdSCl 37
#define ind_BIACET 38
#define ind_Cl2O2 39
#define ind_NC4H10 40
#define ind_CCl4 41
#define ind_CF2Cl2 42
#define ind_CFCl3 43
#define ind_CH2Br2 44
#define ind_CHBr3 45
#define ind_CH3SO3 46
#define ind_NH3 47
#define ind_C2H6 48
#define ind_C3H8 49
#define ind_ClNO2 50
#define ind_OClO 51
#define ind_CH2ClBr 52
#define ind_CH3Br 53
#define ind_CHCl2Br 54
#define ind_CHClBr2 55
#define ind_SO2 56
#define ind_CH3CCl3 57
#define ind_NACA 58
#define ind_N 59
#define ind_N2O 60
#define ind_NH2OH 61
#define ind_IC3H7NO3 62
#define ind_CH3CO3H 63
#define ind_MPAN 64
#define ind_DMSO 65
#define ind_ISOOH 66
#define ind_LHOC3H6OOH 67
#define ind_LMEKOOH 68
#define ind_IC3H7OOH 69
#define ind_NHOH 70
#define ind_C2H5OOH 71
#define ind_HYPERACET 72
#define ind_HNO4 73
#define ind_CH3CO2H 74
#define ind_CH3Cl 75
#define ind_HONO 76
#define ind_PAN 77
#define ind_HCOOH 78
#define ind_LC4H9OOH 79
#define ind_Cl2 80
#define ind_CH3SO2 81
#define ind_MVKOOH 82
#define ind_N2O5 83
#define ind_NH2O 84
#define ind_MEK 85
#define ind_CH3COCH3 86
#define ind_HNO 87
#define ind_H2O2 88
#define ind_CH3OH 89
#define ind_BrCl 90
#define ind_ISON 91
#define ind_NH2 92
#define ind_IC3H7O2 93
#define ind_CH3COCH2O2 94
#define ind_CO 95
#define ind_MGLYOX 96
#define ind_H2 97
#define ind_CH4 98
#define ind_LMEKO2 99
#define ind_Br2 100
#define ind_HNO3 101
#define ind_LC4H9O2 102
#define ind_C2H4 103
#define ind_CH3OOH 104
#define ind_BrNO3 105
#define ind_C5H8 106
#define ind_C3H6 107
#define ind_ACETOL 108
#define ind_ISO2 109
#define ind_MVK 110
#define ind_LC4H9NO3 111
#define ind_HOCl 112
#define ind_MVKO2 113
#define ind_DMS 114
#define ind_LHOC3H6O2 115
#define ind_ClNO3 116
#define ind_C2H5O2 117
#define ind_HOBr 118
#define ind_CH3CHO 119
#define ind_O1D 120
#define ind_CH3CO3 121
#define ind_H 122
#define ind_HBr 123
#define ind_O3 124
#define ind_CH3O2 125
#define ind_OH 126
#define ind_Cl 127
#define ind_H2O 128
#define ind_Br 129
#define ind_HCHO 130
#define ind_O3P 131
#define ind_BrO 132
#define ind_NO 133
#define ind_ClO 134
#define ind_NO2 135
#define ind_NO3 136
#define ind_HO2 137
#define ind_HCl 138
#define ind_O2 139
#define ind_N2 140
#define ind_CO2 141
#define ind_H2OH2O -1
#define ind_N2D -1
#define ind_LNITROGEN -1
#define ind_CH2OO -1
#define ind_CH2OOA -1
#define ind_CH3 -1
#define ind_CH3O -1
#define ind_HOCH2O2 -1
#define ind_HOCH2OH -1
#define ind_HOCH2OOH -1
#define ind_CH3NO3 -1
#define ind_CH3O2NO2 -1
#define ind_CH3ONO -1
#define ind_CN -1
#define ind_HCN -1
#define ind_HOCH2O2NO2 -1
#define ind_NCO -1
#define ind_C2H2 -1
#define ind_C2H5OH -1
#define ind_CH2CHOH -1
#define ind_CH2CO -1
#define ind_CH3CHOHO2 -1
#define ind_CH3CHOHOOH -1
#define ind_CH3CO -1
#define ind_ETHGLY -1
#define ind_GLYOX -1
#define ind_HCOCH2O2 -1
#define ind_HCOCO -1
#define ind_HCOCO2H -1
#define ind_HCOCO3 -1
#define ind_HCOCO3H -1
#define ind_HOCH2CH2O -1
#define ind_HOCH2CH2O2 -1
#define ind_HOCH2CHO -1
#define ind_HOCH2CO -1
#define ind_HOCH2CO2H -1
#define ind_HOCH2CO3 -1
#define ind_HOCH2CO3H -1
#define ind_HOCHCHO -1
#define ind_HOOCH2CHO -1
#define ind_HOOCH2CO2H -1
#define ind_HOOCH2CO3 -1
#define ind_HOOCH2CO3H -1
#define ind_HYETHO2H -1
#define ind_C2H5NO3 -1
#define ind_C2H5O2NO2 -1
#define ind_CH3CN -1
#define ind_ETHOHNO3 -1
#define ind_NCCH2O2 -1
#define ind_NO3CH2CHO -1
#define ind_NO3CH2CO3 -1
#define ind_NO3CH2PAN -1
#define ind_PHAN -1
#define ind_ALCOCH2OOH -1
#define ind_C2H5CHO -1
#define ind_C2H5CO2H -1
#define ind_C2H5CO3 -1
#define ind_C2H5CO3H -1
#define ind_C33CO -1
#define ind_CH3CHCO -1
#define ind_CH3COCO2H -1
#define ind_CH3COCO3 -1
#define ind_CH3COCO3H -1
#define ind_CHOCOCH2O2 -1
#define ind_HCOCH2CHO -1
#define ind_HCOCH2CO2H -1
#define ind_HCOCH2CO3 -1
#define ind_HCOCH2CO3H -1
#define ind_HCOCOCH2OOH -1
#define ind_HOC2H4CO2H -1
#define ind_HOC2H4CO3 -1
#define ind_HOC2H4CO3H -1
#define ind_HOCH2COCH2O2 -1
#define ind_HOCH2COCH2OOH -1
#define ind_HOCH2COCHO -1
#define ind_HYPROPO2 -1
#define ind_HYPROPO2H -1
#define ind_IPROPOL -1
#define ind_NC3H7O2 -1
#define ind_NC3H7OOH -1
#define ind_NPROPOL -1
#define ind_PROPENOL -1
#define ind_C32OH13CO -1
#define ind_C3DIALO2 -1
#define ind_C3DIALOOH -1
#define ind_HCOCOHCO3 -1
#define ind_HCOCOHCO3H -1
#define ind_METACETHO -1
#define ind_C3PAN1 -1
#define ind_C3PAN2 -1
#define ind_CH3COCH2O2NO2 -1
#define ind_NC3H7NO3 -1
#define ind_NOA -1
#define ind_PPN -1
#define ind_PR2O2HNO3 -1
#define ind_PRONO3BO2 -1
#define ind_PROPOLNO3 -1
#define ind_HCOCOHPAN -1
#define ind_BIACETO2 -1
#define ind_BIACETOH -1
#define ind_BIACETOOH -1
#define ind_BUT1ENE -1
#define ind_BUT2OLO -1
#define ind_BUT2OLO2 -1
#define ind_BUT2OLOOH -1
#define ind_BUTENOL -1
#define ind_C312COCO3 -1
#define ind_C312COCO3H -1
#define ind_C3H7CHO -1
#define ind_C413COOOH -1
#define ind_C44O2 -1
#define ind_C44OOH -1
#define ind_C4CODIAL -1
#define ind_CBUT2ENE -1
#define ind_CH3COCHCO -1
#define ind_CH3COCHO2CHO -1
#define ind_CH3COCOCO2H -1
#define ind_CH3COOHCHCHO -1
#define ind_CHOC3COO2 -1
#define ind_CO23C3CHO -1
#define ind_CO2C3CHO -1
#define ind_CO2H3CHO -1
#define ind_CO2H3CO2H -1
#define ind_CO2H3CO3 -1
#define ind_CO2H3CO3H -1
#define ind_EZCH3CO2CHCHO -1
#define ind_EZCHOCCH3CHO2 -1
#define ind_HCOCCH3CHOOH -1
#define ind_HCOCCH3CO -1
#define ind_HCOCO2CH3CHO -1
#define ind_HMAC -1
#define ind_HO12CO3C4 -1
#define ind_HVMK -1
#define ind_IBUTALOH -1
#define ind_IBUTDIAL -1
#define ind_IBUTOLBO2 -1
#define ind_IBUTOLBOOH -1
#define ind_IC4H10 -1
#define ind_IC4H9O2 -1
#define ind_IC4H9OOH -1
#define ind_IPRCHO -1
#define ind_IPRCO3 -1
#define ind_IPRHOCO2H -1
#define ind_IPRHOCO3 -1
#define ind_IPRHOCO3H -1
#define ind_MACO2 -1
#define ind_MACO2H -1
#define ind_MACO3 -1
#define ind_MACO3H -1
#define ind_MACR -1
#define ind_MACRO -1
#define ind_MACRO2 -1
#define ind_MACROH -1
#define ind_MACROOH -1
#define ind_MBOOO -1
#define ind_MEPROPENE -1
#define ind_MPROPENOL -1
#define ind_PERIBUACID -1
#define ind_TBUT2ENE -1
#define ind_TC4H9O2 -1
#define ind_TC4H9OOH -1
#define ind_BZFUCO -1
#define ind_BZFUO2 -1
#define ind_BZFUONE -1
#define ind_BZFUOOH -1
#define ind_CO14O3CHO -1
#define ind_CO14O3CO2H -1
#define ind_CO2C4DIAL -1
#define ind_EPXC4DIAL -1
#define ind_EPXDLCO2H -1
#define ind_EPXDLCO3 -1
#define ind_EPXDLCO3H -1
#define ind_HOCOC4DIAL -1
#define ind_MALANHY -1
#define ind_MALANHYO2 -1
#define ind_MALANHYOOH -1
#define ind_MALDALCO2H -1
#define ind_MALDALCO3H -1
#define ind_MALDIAL -1
#define ind_MALDIALCO3 -1
#define ind_MALDIALO2 -1
#define ind_MALDIALOOH -1
#define ind_MALNHYOHCO -1
#define ind_MECOACEOOH -1
#define ind_MECOACETO2 -1
#define ind_BUT2OLNO3 -1
#define ind_C312COPAN -1
#define ind_C4PAN5 -1
#define ind_IBUTOLBNO3 -1
#define ind_IC4H9NO3 -1
#define ind_MACRN -1
#define ind_MVKNO3 -1
#define ind_PIPN -1
#define ind_TC4H9NO3 -1
#define ind_EPXDLPAN -1
#define ind_MALDIALPAN -1
#define ind_NBZFUO2 -1
#define ind_NBZFUONE -1
#define ind_NBZFUOOH -1
#define ind_NC4DCO2H -1
#define ind_LBUT1ENO2 -1
#define ind_LBUT1ENOOH -1
#define ind_LHMVKABO2 -1
#define ind_LHMVKABOOH -1
#define ind_LBUT1ENNO3 -1
#define ind_LMEKNO3 -1
#define ind_C1ODC2O2C4OD -1
#define ind_C1ODC2O2C4OOH -1
#define ind_C1ODC2OOHC4OD -1
#define ind_C1ODC3O2C4OOH -1
#define ind_C1OOHC2O2C4OD -1
#define ind_C1OOHC2OOHC4OD -1
#define ind_C1OOHC3O2C4OD -1
#define ind_C4MDIAL -1
#define ind_C511O2 -1
#define ind_C511OOH -1
#define ind_C512O2 -1
#define ind_C512OOH -1
#define ind_C513CO -1
#define ind_C513O2 -1
#define ind_C513OOH -1
#define ind_C514O2 -1
#define ind_C514OOH -1
#define ind_C59O2 -1
#define ind_C59OOH -1
#define ind_CHOC3COCO3 -1
#define ind_CHOC3COOOH -1
#define ind_CO13C4CHO -1
#define ind_CO23C4CHO -1
#define ind_CO23C4CO3 -1
#define ind_CO23C4CO3H -1
#define ind_DB1O -1
#define ind_DB1O2 -1
#define ind_DB1OOH -1
#define ind_DB2O2 -1
#define ind_DB2OOH -1
#define ind_HCOC5 -1
#define ind_ISOPAB -1
#define ind_ISOPAOH -1
#define ind_ISOPBO2 -1
#define ind_ISOPBOH -1
#define ind_ISOPBOOH -1
#define ind_ISOPCD -1
#define ind_ISOPDO2 -1
#define ind_ISOPDOH -1
#define ind_ISOPDOOH -1
#define ind_MBO -1
#define ind_MBOACO -1
#define ind_MBOCOCO -1
#define ind_ME3FURAN -1
#define ind_ZCO3C23DBCOD -1
#define ind_ZCODC23DBCOOH -1
#define ind_ACCOMECHO -1
#define ind_ACCOMECO3 -1
#define ind_ACCOMECO3H -1
#define ind_C24O3CCO2H -1
#define ind_C4CO2DBCO3 -1
#define ind_C4CO2DCO3H -1
#define ind_C5134CO2OH -1
#define ind_C54CO -1
#define ind_C5CO14O2 -1
#define ind_C5CO14OH -1
#define ind_C5CO14OOH -1
#define ind_C5DIALCO -1
#define ind_C5DIALO2 -1
#define ind_C5DIALOOH -1
#define ind_C5DICARB -1
#define ind_C5DICARBO2 -1
#define ind_C5DICAROOH -1
#define ind_MC3ODBCO2H -1
#define ind_MMALANHY -1
#define ind_MMALANHYO2 -1
#define ind_MMALNHYOOH -1
#define ind_TLFUO2 -1
#define ind_TLFUONE -1
#define ind_TLFUOOH -1
#define ind_C4MCONO3OH -1
#define ind_C514NO3 -1
#define ind_C5PAN9 -1
#define ind_CHOC3COPAN -1
#define ind_DB1NO3 -1
#define ind_ISOPBDNO3O2 -1
#define ind_ISOPBNO3 -1
#define ind_ISOPDNO3 -1
#define ind_NC4CHO -1
#define ind_NC4OHCO3 -1
#define ind_NC4OHCO3H -1
#define ind_NC4OHCPAN -1
#define ind_NISOPO2 -1
#define ind_NISOPOOH -1
#define ind_NMBOBCO -1
#define ind_ZCPANC23DBCOD -1
#define ind_ACCOMEPAN -1
#define ind_C4CO2DBPAN -1
#define ind_C5COO2NO2 -1
#define ind_NC4MDCO2H -1
#define ind_NTLFUO2 -1
#define ind_NTLFUOOH -1
#define ind_LC578O2 -1
#define ind_LC578OOH -1
#define ind_LDISOPACO -1
#define ind_LDISOPACO2 -1
#define ind_LHC4ACCHO -1
#define ind_LHC4ACCO2H -1
#define ind_LHC4ACCO3 -1
#define ind_LHC4ACCO3H -1
#define ind_LIEPOX -1
#define ind_LISOPACO -1
#define ind_LISOPACO2 -1
#define ind_LISOPACOOH -1
#define ind_LISOPEFO -1
#define ind_LISOPEFO2 -1
#define ind_LMBOABO2 -1
#define ind_LMBOABOOH -1
#define ind_LME3FURANO2 -1
#define ind_LZCO3HC23DBCOD -1
#define ind_LC5PAN1719 -1
#define ind_LISOPACNO3 -1
#define ind_LISOPACNO3O2 -1
#define ind_LMBOABNO3 -1
#define ind_LNISO3 -1
#define ind_LNISOOH -1
#define ind_LNMBOABO2 -1
#define ind_LNMBOABOOH -1
#define ind_C614CO -1
#define ind_C614O2 -1
#define ind_C614OOH -1
#define ind_CO235C5CHO -1
#define ind_CO235C6O2 -1
#define ind_CO235C6OOH -1
#define ind_BENZENE -1
#define ind_BZBIPERO2 -1
#define ind_BZBIPEROOH -1
#define ind_BZEMUCCO -1
#define ind_BZEMUCCO2H -1
#define ind_BZEMUCCO3 -1
#define ind_BZEMUCCO3H -1
#define ind_BZEMUCO2 -1
#define ind_BZEMUCOOH -1
#define ind_BZEPOXMUC -1
#define ind_BZOBIPEROH -1
#define ind_C5CO2DBCO3 -1
#define ind_C5CO2DCO3H -1
#define ind_C5CO2OHCO3 -1
#define ind_C5COOHCO3H -1
#define ind_C6125CO -1
#define ind_C615CO2O2 -1
#define ind_C615CO2OOH -1
#define ind_C6CO4DB -1
#define ind_C6H5O -1
#define ind_C6H5O2 -1
#define ind_C6H5OOH -1
#define ind_CATEC1O -1
#define ind_CATEC1O2 -1
#define ind_CATEC1OOH -1
#define ind_CATECHOL -1
#define ind_CPDKETENE -1
#define ind_PBZQCO -1
#define ind_PBZQO2 -1
#define ind_PBZQONE -1
#define ind_PBZQOOH -1
#define ind_PHENO2 -1
#define ind_PHENOL -1
#define ind_PHENOOH -1
#define ind_C614NO3 -1
#define ind_BZBIPERNO3 -1
#define ind_BZEMUCNO3 -1
#define ind_BZEMUCPAN -1
#define ind_C5CO2DBPAN -1
#define ind_C5CO2OHPAN -1
#define ind_DNPHEN -1
#define ind_DNPHENO2 -1
#define ind_DNPHENOOH -1
#define ind_HOC6H4NO2 -1
#define ind_NBZQO2 -1
#define ind_NBZQOOH -1
#define ind_NCATECHOL -1
#define ind_NCATECO2 -1
#define ind_NCATECOOH -1
#define ind_NCPDKETENE -1
#define ind_NDNPHENO2 -1
#define ind_NDNPHENOOH -1
#define ind_NNCATECO2 -1
#define ind_NNCATECOOH -1
#define ind_NPHEN1O -1
#define ind_NPHEN1O2 -1
#define ind_NPHEN1OOH -1
#define ind_NPHENO2 -1
#define ind_NPHENOOH -1
#define ind_C235C6CO3H -1
#define ind_C716O2 -1
#define ind_C716OOH -1
#define ind_C721O2 -1
#define ind_C721OOH -1
#define ind_C722O2 -1
#define ind_C722OOH -1
#define ind_CO235C6CHO -1
#define ind_CO235C6CO3 -1
#define ind_MCPDKETENE -1
#define ind_ROO6R3O -1
#define ind_ROO6R3O2 -1
#define ind_ROO6R5O2 -1
#define ind_BENZAL -1
#define ind_C6CO2OHCO3 -1
#define ind_C6COOHCO3H -1
#define ind_C6H5CH2O2 -1
#define ind_C6H5CH2OOH -1
#define ind_C6H5CO3 -1
#define ind_C6H5CO3H -1
#define ind_C7CO4DB -1
#define ind_CRESO2 -1
#define ind_CRESOL -1
#define ind_CRESOOH -1
#define ind_MCATEC1O -1
#define ind_MCATEC1O2 -1
#define ind_MCATEC1OOH -1
#define ind_MCATECHOL -1
#define ind_OXYL1O2 -1
#define ind_OXYL1OOH -1
#define ind_PHCOOH -1
#define ind_PTLQCO -1
#define ind_PTLQO2 -1
#define ind_PTLQONE -1
#define ind_PTLQOOH -1
#define ind_TLBIPERO2 -1
#define ind_TLBIPEROOH -1
#define ind_TLEMUCCO -1
#define ind_TLEMUCCO2H -1
#define ind_TLEMUCCO3 -1
#define ind_TLEMUCCO3H -1
#define ind_TLEMUCO2 -1
#define ind_TLEMUCOOH -1
#define ind_TLEPOXMUC -1
#define ind_TLOBIPEROH -1
#define ind_TOL1O -1
#define ind_TOLUENE -1
#define ind_C7PAN3 -1
#define ind_C6CO2OHPAN -1
#define ind_C6H5CH2NO3 -1
#define ind_DNCRES -1
#define ind_DNCRESO2 -1
#define ind_DNCRESOOH -1
#define ind_MNCATECH -1
#define ind_MNCATECO2 -1
#define ind_MNCATECOOH -1
#define ind_MNCPDKETENE -1
#define ind_MNNCATCOOH -1
#define ind_MNNCATECO2 -1
#define ind_NCRES1O -1
#define ind_NCRES1O2 -1
#define ind_NCRES1OOH -1
#define ind_NCRESO2 -1
#define ind_NCRESOOH -1
#define ind_NDNCRESO2 -1
#define ind_NDNCRESOOH -1
#define ind_NPTLQO2 -1
#define ind_NPTLQOOH -1
#define ind_PBZN -1
#define ind_TLBIPERNO3 -1
#define ind_TLEMUCNO3 -1
#define ind_TLEMUCPAN -1
#define ind_TOL1OHNO2 -1
#define ind_C721CHO -1
#define ind_C721CO3 -1
#define ind_C721CO3H -1
#define ind_C810O2 -1
#define ind_C810OOH -1
#define ind_C811O2 -1
#define ind_C812O2 -1
#define ind_C812OOH -1
#define ind_C813O2 -1
#define ind_C813OOH -1
#define ind_C85O2 -1
#define ind_C85OOH -1
#define ind_C86O2 -1
#define ind_C86OOH -1
#define ind_C89O2 -1
#define ind_C89OOH -1
#define ind_C8BC -1
#define ind_C8BCCO -1
#define ind_C8BCO2 -1
#define ind_C8BCOOH -1
#define ind_NORPINIC -1
#define ind_EBENZ -1
#define ind_LXYL -1
#define ind_STYRENE -1
#define ind_STYRENO2 -1
#define ind_STYRENOOH -1
#define ind_C721PAN -1
#define ind_C810NO3 -1
#define ind_C89NO3 -1
#define ind_C8BCNO3 -1
#define ind_NSTYRENO2 -1
#define ind_NSTYRENOOH -1
#define ind_C811CO3 -1
#define ind_C811CO3H -1
#define ind_C85CO3 -1
#define ind_C85CO3H -1
#define ind_C89CO2H -1
#define ind_C89CO3 -1
#define ind_C89CO3H -1
#define ind_C96O2 -1
#define ind_C96OOH -1
#define ind_C97O2 -1
#define ind_C97OOH -1
#define ind_C98O2 -1
#define ind_C98OOH -1
#define ind_NOPINDCO -1
#define ind_NOPINDO2 -1
#define ind_NOPINDOOH -1
#define ind_NOPINONE -1
#define ind_NOPINOO -1
#define ind_NORPINAL -1
#define ind_NORPINENOL -1
#define ind_PINIC -1
#define ind_RO6R3P -1
#define ind_C811PAN -1
#define ind_C89PAN -1
#define ind_C96NO3 -1
#define ind_C9PAN2 -1
#define ind_LTMB -1
#define ind_APINAOO -1
#define ind_APINBOO -1
#define ind_APINENE -1
#define ind_BPINAO2 -1
#define ind_BPINAOOH -1
#define ind_BPINENE -1
#define ind_C106O2 -1
#define ind_C106OOH -1
#define ind_C109CO -1
#define ind_C109O2 -1
#define ind_C109OOH -1
#define ind_C96CO3 -1
#define ind_CAMPHENE -1
#define ind_CARENE -1
#define ind_MENTHEN6ONE -1
#define ind_OH2MENTHEN6ONE -1
#define ind_OHMENTHEN6ONEO2 -1
#define ind_PERPINONIC -1
#define ind_PINAL -1
#define ind_PINALO2 -1
#define ind_PINALOOH -1
#define ind_PINENOL -1
#define ind_PINONIC -1
#define ind_RO6R1O2 -1
#define ind_RO6R3O2 -1
#define ind_RO6R3OOH -1
#define ind_ROO6R1O2 -1
#define ind_SABINENE -1
#define ind_BPINANO3 -1
#define ind_C106NO3 -1
#define ind_C10PAN2 -1
#define ind_PINALNO3 -1
#define ind_RO6R1NO3 -1
#define ind_RO6R3NO3 -1
#define ind_ROO6R1NO3 -1
#define ind_LAPINABNO3 -1
#define ind_LAPINABO2 -1
#define ind_LAPINABOOH -1
#define ind_LNAPINABO2 -1
#define ind_LNAPINABOOH -1
#define ind_LNBPINABO2 -1
#define ind_LNBPINABOOH -1
#define ind_LHAROM -1
#define ind_CHF3 -1
#define ind_CHF2CF3 -1
#define ind_CH3CF3 -1
#define ind_CH2F2 -1
#define ind_CH3CHF2 -1
#define ind_CF2ClCF2Cl -1
#define ind_CF2ClCFCl2 -1
#define ind_CF3CF2Cl -1
#define ind_CH2Cl2 -1
#define ind_CH2FCF3 -1
#define ind_CH3CFCl2 -1
#define ind_CHCl3 -1
#define ind_CHF2Cl -1
#define ind_LBROMINE -1
#define ind_C3H7I -1
#define ind_CH2ClI -1
#define ind_CH2I2 -1
#define ind_HI -1
#define ind_HIO3 -1
#define ind_HOI -1
#define ind_I -1
#define ind_I2 -1
#define ind_I2O2 -1
#define ind_IBr -1
#define ind_ICl -1
#define ind_INO2 -1
#define ind_INO3 -1
#define ind_IO -1
#define ind_IPART -1
#define ind_OIO -1
#define ind_OCS -1
#define ind_S -1
#define ind_SF6 -1
#define ind_SH -1
#define ind_SO -1
#define ind_SO3 -1
#define ind_LSULFUR -1
#define ind_Hg -1
#define ind_HgO -1
#define ind_HgCl -1
#define ind_HgCl2 -1
#define ind_HgBr -1
#define ind_HgBr2 -1
#define ind_ClHgBr -1
#define ind_BrHgOBr -1
#define ind_ClHgOBr -1
#define ind_RGM_cs -1
#define ind_PRODUCTS -1
#define ind_M -1
#define ind_Op -1
#define ind_O2p -1
#define ind_Np -1
#define ind_N2p -1
#define ind_NOp -1
#define ind_em -1
#define ind_kJmol -1
#define ind_O4Sp -1
#define ind_O2Dp -1
#define ind_O2Pp -1
#define ind_LTERP -1
#define ind_LALK4 -1
#define ind_LALK5 -1
#define ind_LARO1 -1
#define ind_LARO2 -1
#define ind_LOLE1 -1
#define ind_LOLE2 -1
#define ind_LfPOG02 -1
#define ind_LfPOG03 -1
#define ind_LfPOG04 -1
#define ind_LfPOG05 -1
#define ind_LbbPOG02 -1
#define ind_LbbPOG03 -1
#define ind_LbbPOG04 -1
#define ind_LfSOGsv01 -1
#define ind_LfSOGsv02 -1
#define ind_LbbSOGsv01 -1
#define ind_LbbSOGsv02 -1
#define ind_LfSOGiv01 -1
#define ind_LfSOGiv02 -1
#define ind_LfSOGiv03 -1
#define ind_LfSOGiv04 -1
#define ind_LbbSOGiv01 -1
#define ind_LbbSOGiv02 -1
#define ind_LbbSOGiv03 -1
#define ind_LbSOGv01 -1
#define ind_LbSOGv02 -1
#define ind_LbSOGv03 -1
#define ind_LbSOGv04 -1
#define ind_LbOSOGv01 -1
#define ind_LbOSOGv02 -1
#define ind_LbOSOGv03 -1
#define ind_LaSOGv01 -1
#define ind_LaSOGv02 -1
#define ind_LaSOGv03 -1
#define ind_LaSOGv04 -1
#define ind_LaOSOGv01 -1
#define ind_LaOSOGv02 -1
#define ind_LaOSOGv03 -1
#define ind_ACBZO2 -1
#define ind_ALKNO3 -1
#define ind_ALKO2 -1
#define ind_ALKOH -1
#define ind_ALKOOH -1
#define ind_BCARY -1
#define ind_BENZO2 -1
#define ind_BENZOOH -1
#define ind_BEPOMUC -1
#define ind_BIGALD1 -1
#define ind_BIGALD2 -1
#define ind_BIGALD3 -1
#define ind_BIGALD4 -1
#define ind_BIGALKANE -1
#define ind_BIGENE -1
#define ind_BrONO -1
#define ind_BZALD -1
#define ind_BZOO -1
#define ind_BZOOH -1
#define ind_C3H7O2 -1
#define ind_C3H7OOH -1
#define ind_CFC113 -1
#define ind_CFC114 -1
#define ind_CFC115 -1
#define ind_COF2 -1
#define ind_COFCL -1
#define ind_DICARBO2 -1
#define ind_ELVOC -1
#define ind_ENEO2 -1
#define ind_EOOH -1
#define ind_F -1
#define ind_H1202 -1
#define ind_H2402 -1
#define ind_HCFC141B -1
#define ind_HCFC142B -1
#define ind_HCFC22 -1
#define ind_HF -1
#define ind_HOCH2OO -1
#define ind_HPALD -1
#define ind_IEC1O2 -1
#define ind_LIECHO -1
#define ind_LIECO3 -1
#define ind_LIECO3H -1
#define ind_LIMON -1
#define ind_LISOPNO3NO3 -1
#define ind_LISOPNO3O2 -1
#define ind_LISOPNO3OOH -1
#define ind_LISOPOOHO2 -1
#define ind_LISOPOOHOOH -1
#define ind_MALO2 -1
#define ind_MBONO3O2 -1
#define ind_MBOO2 -1
#define ind_MBOOOH -1
#define ind_MDIALO2 -1
#define ind_MEKNO3 -1
#define ind_MVKN -1
#define ind_MYRC -1
#define ind_NTERPNO3 -1
#define ind_NTERPO2 -1
#define ind_PACALD -1
#define ind_PBZNIT -1
#define ind_TEPOMUC -1
#define ind_TERP2O2 -1
#define ind_TERP2OOH -1
#define ind_TERPNO3 -1
#define ind_TERPO2 -1
#define ind_TERPOOH -1
#define ind_TERPROD1 -1
#define ind_TERPROD2 -1
#define ind_TOLO2 -1
#define ind_TOLOOH -1
#define ind_XYLENO2 -1
#define ind_XYLENOOH -1
#define ind_XYLOL -1
#define ind_XYLOLO2 -1
#define ind_XYLOLOOH -1
#define ind_O2_1D -1
#define ind_O2_1S -1
#define ind_ONIT -1
#define ind_C4H8 -1
#define ind_C4H9O3 -1
#define ind_C5H12 -1
#define ind_C5H11O2 -1
#define ind_C5H6O2 -1
#define ind_HYDRALD -1
#define ind_ISOPO2 -1
#define ind_C5H9O3 -1
#define ind_ISOPOOH -1
#define ind_C5H12O2 -1
#define ind_ONITR -1
#define ind_C5H10O4 -1
#define ind_ROO6R5P -1
#define ind_NH4 -1
#define ind_SO4 -1
#define ind_HCO -1
#define ind_ISPD -1
#define ind_ClOO -1
#define ind_Rn -1
#define ind_Pb -1
#define ind_XO2 -1
#define ind_XO2N -1
#define ind_ROOH -1
#define ind_OLE -1
#define ind_ROR -1
#define ind_ORGNTR -1
#define ind_ACO2 -1
#define ind_PAR -1
#define ind_RXPAR -1
#define ind_OHv0 -1
#define ind_OHv1 -1
#define ind_OHv2 -1
#define ind_OHv3 -1
#define ind_OHv4 -1
#define ind_OHv5 -1
#define ind_OHv6 -1
#define ind_OHv7 -1
#define ind_OHv8 -1
#define ind_OHv9 -1
#define ind_O1S -1
#define ind_O21d -1
#define ind_O2b1s -1
#define ind_O2c1s -1
#define ind_O2x -1
#define ind_O2A3D -1
#define ind_O2A3S -1
#define ind_O25P -1
#define ind_O2_a01 -1
#define ind_O3_a01 -1
#define ind_OH_a01 -1
#define ind_HO2_a01 -1
#define ind_H2O_a01 -1
#define ind_H2O2_a01 -1
#define ind_NH3_a01 -1
#define ind_NO_a01 -1
#define ind_NO2_a01 -1
#define ind_NO3_a01 -1
#define ind_HONO_a01 -1
#define ind_HNO3_a01 -1
#define ind_HNO4_a01 -1
#define ind_CH3OH_a01 -1
#define ind_HCOOH_a01 -1
#define ind_HCHO_a01 -1
#define ind_CH3O2_a01 -1
#define ind_CH3OOH_a01 -1
#define ind_CO2_a01 -1
#define ind_CH3CO2H_a01 -1
#define ind_PAN_a01 -1
#define ind_CH3CHO_a01 -1
#define ind_CH3COCH3_a01 -1
#define ind_Cl_a01 -1
#define ind_Cl2_a01 -1
#define ind_HCl_a01 -1
#define ind_HOCl_a01 -1
#define ind_Br_a01 -1
#define ind_Br2_a01 -1
#define ind_HBr_a01 -1
#define ind_HOBr_a01 -1
#define ind_BrCl_a01 -1
#define ind_I2_a01 -1
#define ind_IO_a01 -1
#define ind_HOI_a01 -1
#define ind_ICl_a01 -1
#define ind_IBr_a01 -1
#define ind_SO2_a01 -1
#define ind_H2SO4_a01 -1
#define ind_DMS_a01 -1
#define ind_DMSO_a01 -1
#define ind_Hg_a01 -1
#define ind_HgO_a01 -1
#define ind_HgOHOH_a01 -1
#define ind_HgOHCl_a01 -1
#define ind_HgCl2_a01 -1
#define ind_HgBr2_a01 -1
#define ind_HgSO3_a01 -1
#define ind_ClHgBr_a01 -1
#define ind_BrHgOBr_a01 -1
#define ind_ClHgOBr_a01 -1
#define ind_FeOH3_a01 -1
#define ind_FeCl3_a01 -1
#define ind_FeF3_a01 -1
#define ind_O2m_a01 -1
#define ind_OHm_a01 -1
#define ind_HO2m_a01 -1
#define ind_O2mm_a01 -1
#define ind_Hp_a01 -1
#define ind_NH4p_a01 -1
#define ind_NO2m_a01 -1
#define ind_NO3m_a01 -1
#define ind_NO4m_a01 -1
#define ind_CO3m_a01 -1
#define ind_HCOOm_a01 -1
#define ind_HCO3m_a01 -1
#define ind_CH3COOm_a01 -1
#define ind_Clm_a01 -1
#define ind_Cl2m_a01 -1
#define ind_ClOm_a01 -1
#define ind_ClOHm_a01 -1
#define ind_Brm_a01 -1
#define ind_Br2m_a01 -1
#define ind_BrOm_a01 -1
#define ind_BrOHm_a01 -1
#define ind_BrCl2m_a01 -1
#define ind_Br2Clm_a01 -1
#define ind_Im_a01 -1
#define ind_IO2m_a01 -1
#define ind_IO3m_a01 -1
#define ind_ICl2m_a01 -1
#define ind_IBr2m_a01 -1
#define ind_SO3m_a01 -1
#define ind_SO3mm_a01 -1
#define ind_SO4m_a01 -1
#define ind_SO4mm_a01 -1
#define ind_SO5m_a01 -1
#define ind_HSO3m_a01 -1
#define ind_HSO4m_a01 -1
#define ind_HSO5m_a01 -1
#define ind_CH3SO3m_a01 -1
#define ind_CH2OHSO3m_a01 -1
#define ind_Hgp_a01 -1
#define ind_Hgpp_a01 -1
#define ind_HgOHp_a01 -1
#define ind_HgClp_a01 -1
#define ind_HgBrp_a01 -1
#define ind_HgSO32mm_a01 -1
#define ind_Fepp_a01 -1
#define ind_FeOpp_a01 -1
#define ind_FeOHp_a01 -1
#define ind_FeOH2p_a01 -1
#define ind_FeClp_a01 -1
#define ind_Feppp_a01 -1
#define ind_FeHOpp_a01 -1
#define ind_FeHO2pp_a01 -1
#define ind_FeOHpp_a01 -1
#define ind_FeOH4m_a01 -1
#define ind_FeOHHO2p_a01 -1
#define ind_FeClpp_a01 -1
#define ind_FeCl2p_a01 -1
#define ind_FeBrpp_a01 -1
#define ind_FeBr2p_a01 -1
#define ind_FeFpp_a01 -1
#define ind_FeF2p_a01 -1
#define ind_FeSO3p_a01 -1
#define ind_FeSO4p_a01 -1
#define ind_FeSO42m_a01 -1
#define ind_FeOH2Fepppp_a01 -1
#define ind_D1O_a01 -1
#define ind_Nap_a01 -1
#define ind_LossO3Su -1
#define ihs_N2O5_H2O 0
#define ihs_HOCl_HCl 1
#define ihs_ClNO3_HCl 2
#define ihs_ClNO3_H2O 3
#define ihs_N2O5_HCl 4
#define ihs_ClNO3_HBr 5
#define ihs_BrNO3_HCl 6
#define ihs_HOCl_HBr 7
#define ihs_HOBr_HCl 8
#define ihs_HOBr_HBr 9
#define ihs_BrNO3_H2O 10
#define ihs_Hg 11
#define ihs_RGM 12
#define iht_N2O5 0
#define iht_HNO3 1
#define iht_Hg 2
#define iht_RGM 3
#define k_C6H5O_NO2 (2.08E-12)
#define k_C6H5O_O3 (2.86E-13)
#define k_adsecprim (3.0E-11)
#define k_adtertprim (5.7E-11 )
#define f_soh (3.44)
#define f_toh (2.68)
#define f_sooh (7.)
#define f_tooh (7.)
#define f_ono2 (0.04 )
#define f_ch2ono2 (0.2)
#define f_cpan (.25)
#define f_allyl (3.6)
#define f_alk (1.23)
#define f_cho (0.55)
#define f_co2h (1.67)
#define f_co (0.73)
#define f_o (8.15)
#define f_pch2oh (1.29)
#define f_tch2oh (0.53)
#define a_pan (0.56 )
#define a_cho (0.31 )
#define a_coch3 (0.76 )
#define a_ch2ono2 (0.64 )
#define a_ch2oh (1.7 )
#define a_ch2ooh (1.7 )
#define a_coh (2.2 )
#define a_cooh (2.2 )
#define a_co2h (0.25)
#define ifun 0
#define ijac 1
#define istp 2
#define iacc 3
#define irej 4
#define idec 5
#define isol 6
#define isng 7
#define itexit 0
#define ihexit 1
#define ZERO 0.0
#define ONE 1.0
#define HALF 0.5
/*
* Fortran to C macros
* GPU-friendly array deffinition
* i:VL_GLO, j:NVAR
*
*/
#define conc(i,j) conc[(j)*VL_GLO+(i)]
#define khet_st(i,j) khet_st[(j)*VL_GLO+(i)]
#define khet_tr(i,j) khet_tr[(j)*VL_GLO+(i)]
#define jx(i,j) jx[j*VL_GLO+i]
#define istatus(i,j) istatus[(j)*(VL_GLO)+(i)]
#define rstatus(i,j) rstatus[(j)*(VL_GLO)+(i)]
#define ROUND128(X) (X + (128 - 1)) & ~(128 - 1)
//#define rconst(i,j) rconst[(j)]
//3968 should be VL_GLO
#define rconst(i,j) rconst[(j)*3968 + (i)]
/* Temporary arrays allocated in stack */
// #define var(i,j) var[(j)]
// #define fix(i,j) fix[(j)]
// #define jcb(i,j) jcb[(j)]
// #define varDot(i,j) varDot[j]
// #define varNew(i,j) varNew[(j)]
// #define Fcn0(i,j) Fcn0[(j)]
// #define Fcn(i,j) Fcn[(j)]
// #define Fcn(i,j) Fcn[(j)]
// #define dFdT(i,j) dFdT[(j)]
// #define varErr(i,j) varErr[(j)]
// #define K(i,j,k) K[(j)*(NVAR)+(k)]
// #define jac0(i,j) jac0[(j)]
// #define Ghimj(i,j) Ghimj[(j)]
//3968 should be VL_GLO
#define var(i,j) var[(j)* 3968 + (i)]
#define fix(i,j) fix[(j)* 3968 + (i)]
#define jcb(i,j) jcb[(j)* 3968 + (i)]
#define varDot(i,j) varDot[j* 3968 + (i)]
#define varNew(i,j) varNew[(j)* 3968 + (i)]
#define Fcn0(i,j) Fcn0[(j)* 3968 + (i)]
#define Fcn(i,j) Fcn[(j)* 3968 + (i)]
#define Fcn(i,j) Fcn[(j)* 3968 + (i)]
#define dFdT(i,j) dFdT[(j)* 3968 + (i)]
#define varErr(i,j) varErr[(j)* 3968 + (i)]
#define K(i,j,k) K[((j)*(NVAR)+(k)) * 3968 + (i)]
#define jac0(i,j) jac0[(j)* 3968 + (i)]
#define Ghimj(i,j) Ghimj[(j)* 3968 + (i)]
/* Enable debug flags for GPU */
//#define DEBUG
#ifdef DEBUG
#define GPU_DEBUG()\
gpuErrchk( hipPeekAtLastError() ); \
gpuErrchk( hipDeviceSynchronize() );
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
static inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
/* If debug flags are disabled */
#define GPU_DEBUG()
#define gpuErrchk(ans) ans
#endif
/** prefetches into L1 cache */
__device__ inline void prefetch_gl1(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.global.L1 [%0];": :"l"(p));
#endif
}
__device__ inline void prefetch_ll1(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.local.L1 [%0];": :"l"(p));
#endif
}
/** prefetches into L2 cache */
__device__ inline void prefetch_gl2(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.global.L2 [%0];": :"l"(p));
#endif
}
__device__ inline void prefetch_ll2(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.local.L2 [%0];": :"l"(p));
#endif
}
__device__ void update_rconst(const double * __restrict__ var,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx, double * __restrict__ rconst,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO);
/* This runs on CPU */
double machine_eps_flt()
{
double machEps = 1.0f;
do
{
machEps /= 2.0f;
// If next epsilon yields 1, then break, because current
// epsilon is the machine epsilon.
}
while ((double)(1.0 + (machEps/2.0)) != 1.0);
return machEps;
}
/* This runs on GPU */
__device__ double machine_eps_flt_cuda()
{
typedef union
{
long i64;
double f64;
} flt_64;
flt_64 s;
s.f64 = 1.;
s.i64++;
return (s.f64 - 1.);
}
__device__ static double alpha_AN(const int n, const int ro2type, const double temp, const double cair){
double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0;
double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a;
/* IF (ro2type = 1) THEN m = 0.4 ! primary RO2
ELSE IF (ro2type = 2) THEN m = 1. ! secondary RO2
ELSE IF (ro2type = 3) THEN m = 0.3 ! tertiary RO2
ELSE m = 1.
*/
double m = 1.;
Y0_298K = alpha*exp(beta*n);
Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0));
Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf));
zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2));
k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta);
alpha_a = k_ratio/(1+ k_ratio) *m;
return alpha_a;
}
__device__ static double alpha_AN(const int n, const int ro2type, const int bcarb, const int gcarb, const int abic, const double temp, const double cair){
double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0;
double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a;
double bcf=1., gcf=1., abf=1.;
double m = 1.; //According to Teng, ref3189
if (bcarb == 1) { bcf = 0.19; }// derived from Praske, ref3190: alpha_AN = 0.03 for the secondary HMKO2 relative to alpha_AN for 6C RO2 (0.16)
if (gcarb == 1) {gcf = 0.44; }// derived from Praske, ref3190: alpha_AN = 0.07 for the primary HMKO2 relative to alpha_AN for 6C RO2 (0.16)
if (abic == 1) { abf = 0.24; }// derived from the ratio of AN- yield for toluene from Elrod et al. (ref3180), 5.5 0x1.9206e69676542p+ 229t &
// 200 torr, and this SAR for linear alkyl RO2 with 9 heavy atoms, 23.3%
Y0_298K = alpha*exp(beta*n);
Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0));
Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf));
zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2));
k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta);
alpha_a = k_ratio/(1+ k_ratio) *m*bcf*gcf*abf;
return alpha_a;
}
__device__ static double k_RO2_HO2(const double temp, const int nC){
return 2.91e-13*exp(1300./temp)*(1.-exp(-0.245*nC)); // ref1630
}
__device__ double ros_ErrorNorm(double * __restrict__ var, double * __restrict__ varNew, double * __restrict__ varErr,
const double * __restrict__ absTol, const double * __restrict__ relTol,
const int vectorTol )
{
double err, scale, varMax;
int index = blockIdx.x*blockDim.x+threadIdx.x;
err = ZERO;
if (vectorTol){
for (int i=0;i<NVAR - 16;i+=16){
prefetch_ll1(&varErr(index,i));
prefetch_ll1(&absTol[i]);
prefetch_ll1(&relTol[i]);
prefetch_ll1(&var(index,i));
prefetch_ll1(&varNew(index,i));
}
for (int i=0; i<NVAR; i++)
{
varMax = fmax(fabs(var(index,i)),fabs(varNew(index,i)));
scale = absTol[i]+ relTol[i]*varMax;
err += pow((double)varErr(index,i)/scale,2.0);
}
err = sqrt((double) err/NVAR);
}else{
for (int i=0;i<NVAR - 16;i+=16){
prefetch_ll1(&varErr(index,i));
prefetch_ll1(&var(index,i));
prefetch_ll1(&varNew(index,i));
}
for (int i=0; i<NVAR; i++)
{
varMax = fmax(fabs(var(index,i)),fabs(varNew(index,i)));
scale = absTol[0]+ relTol[0]*varMax;
err += pow((double)varErr(index,i)/scale,2.0);
}
err = sqrt((double) err/NVAR);
}
return err;
}
__device__ void kppSolve(const double * __restrict__ Ghimj, double * __restrict__ K,
const int istage, const int ros_S ){
int index = blockIdx.x*blockDim.x+threadIdx.x;
//K = &K[istage*NVAR];
K(index,istage,7) = K(index,istage,7)- Ghimj(index,7)*K(index,istage,1)- Ghimj(index,8)*K(index,istage,2);
K(index,istage,8) = K(index,istage,8)- Ghimj(index,23)*K(index,istage,1)- Ghimj(index,24)*K(index,istage,2);
K(index,istage,14) = K(index,istage,14)- Ghimj(index,50)*K(index,istage,5)- Ghimj(index,51)*K(index,istage,6);
K(index,istage,19) = K(index,istage,19)- Ghimj(index,67)*K(index,istage,4);
K(index,istage,31) = K(index,istage,31)- Ghimj(index,188)*K(index,istage,1)- Ghimj(index,189)*K(index,istage,2);
K(index,istage,32) = K(index,istage,32)- Ghimj(index,193)*K(index,istage,1);
K(index,istage,34) = K(index,istage,34)- Ghimj(index,205)*K(index,istage,0);
K(index,istage,60) = K(index,istage,60)- Ghimj(index,309)*K(index,istage,59);
K(index,istage,70) = K(index,istage,70)- Ghimj(index,351)*K(index,istage,61);
K(index,istage,85) = K(index,istage,85)- Ghimj(index,426)*K(index,istage,79);
K(index,istage,86) = K(index,istage,86)- Ghimj(index,434)*K(index,istage,62)- Ghimj(index,435)*K(index,istage,69);
K(index,istage,87) = K(index,istage,87)- Ghimj(index,442)*K(index,istage,70)- Ghimj(index,443)*K(index,istage,84);
K(index,istage,90) = K(index,istage,90)- Ghimj(index,468)*K(index,istage,80);
K(index,istage,92) = K(index,istage,92)- Ghimj(index,487)*K(index,istage,47)- Ghimj(index,488)*K(index,istage,84);
K(index,istage,93) = K(index,istage,93)- Ghimj(index,495)*K(index,istage,49)- Ghimj(index,496)*K(index,istage,69);
K(index,istage,94) = K(index,istage,94)- Ghimj(index,502)*K(index,istage,72)- Ghimj(index,503)*K(index,istage,86)- Ghimj(index,504)*K(index,istage,93);
K(index,istage,95) = K(index,istage,95)- Ghimj(index,510)*K(index,istage,58)- Ghimj(index,511)*K(index,istage,77)- Ghimj(index,512)*K(index,istage,82)- Ghimj(index,513)*K(index,istage,91);
K(index,istage,96) = K(index,istage,96)- Ghimj(index,535)*K(index,istage,72)- Ghimj(index,536)*K(index,istage,82)- Ghimj(index,537)*K(index,istage,94);
K(index,istage,99) = K(index,istage,99)- Ghimj(index,563)*K(index,istage,68)- Ghimj(index,564)*K(index,istage,85);
K(index,istage,100) = K(index,istage,100)- Ghimj(index,572)*K(index,istage,90);
K(index,istage,101) = K(index,istage,101)- Ghimj(index,585)*K(index,istage,83);
K(index,istage,102) = K(index,istage,102)- Ghimj(index,598)*K(index,istage,40)- Ghimj(index,599)*K(index,istage,79);
K(index,istage,108) = K(index,istage,108)- Ghimj(index,630)*K(index,istage,64)- Ghimj(index,631)*K(index,istage,67)- Ghimj(index,632)*K(index,istage,82)- Ghimj(index,633)*K(index,istage,91)- Ghimj(index,634)*K(index,istage,94)- Ghimj(index,635)*K(index,istage,106);
K(index,istage,109) = K(index,istage,109)- Ghimj(index,647)*K(index,istage,106);
K(index,istage,110) = K(index,istage,110)- Ghimj(index,655)*K(index,istage,66)- Ghimj(index,656)*K(index,istage,91)- Ghimj(index,657)*K(index,istage,106)- Ghimj(index,658)*K(index,istage,109);
K(index,istage,111) = K(index,istage,111)- Ghimj(index,666)*K(index,istage,99)- Ghimj(index,667)*K(index,istage,102)- Ghimj(index,668)*K(index,istage,107);
K(index,istage,113) = K(index,istage,113)- Ghimj(index,685)*K(index,istage,64)- Ghimj(index,686)*K(index,istage,82)- Ghimj(index,687)*K(index,istage,106)- Ghimj(index,688)*K(index,istage,110);
K(index,istage,115) = K(index,istage,115)- Ghimj(index,703)*K(index,istage,67)- Ghimj(index,704)*K(index,istage,103)- Ghimj(index,705)*K(index,istage,107);
K(index,istage,117) = K(index,istage,117)- Ghimj(index,722)*K(index,istage,48)- Ghimj(index,723)*K(index,istage,49)- Ghimj(index,724)*K(index,istage,71)- Ghimj(index,725)*K(index,istage,79)- Ghimj(index,726)*K(index,istage,85)- Ghimj(index,727)*K(index,istage,102)- Ghimj(index,728) *K(index,istage,107)- Ghimj(index,729)*K(index,istage,111)- Ghimj(index,730)*K(index,istage,115);
K(index,istage,118) = K(index,istage,118)- Ghimj(index,741)*K(index,istage,100)- Ghimj(index,742)*K(index,istage,105)- Ghimj(index,743)*K(index,istage,112)- Ghimj(index,744)*K(index,istage,116);
K(index,istage,119) = K(index,istage,119)- Ghimj(index,758)*K(index,istage,68)- Ghimj(index,759)*K(index,istage,71)- Ghimj(index,760)*K(index,istage,79)- Ghimj(index,761)*K(index,istage,99)- Ghimj(index,762)*K(index,istage,102)- Ghimj(index,763)*K(index,istage,107)- Ghimj(index,764) *K(index,istage,111)- Ghimj(index,765)*K(index,istage,115)- Ghimj(index,766)*K(index,istage,117);
K(index,istage,120) = K(index,istage,120)- Ghimj(index,777)*K(index,istage,41)- Ghimj(index,778)*K(index,istage,42)- Ghimj(index,779)*K(index,istage,43)- Ghimj(index,780)*K(index,istage,57)- Ghimj(index,781)*K(index,istage,60)- Ghimj(index,782)*K(index,istage,75)- Ghimj(index,783) *K(index,istage,92)- Ghimj(index,784)*K(index,istage,97)- Ghimj(index,785)*K(index,istage,98)- Ghimj(index,786)*K(index,istage,107);
K(index,istage,121) = K(index,istage,121)- Ghimj(index,798)*K(index,istage,38)- Ghimj(index,799)*K(index,istage,63)- Ghimj(index,800)*K(index,istage,68)- Ghimj(index,801)*K(index,istage,72)- Ghimj(index,802)*K(index,istage,77)- Ghimj(index,803)*K(index,istage,82)- Ghimj(index,804) *K(index,istage,85)- Ghimj(index,805)*K(index,istage,86)- Ghimj(index,806)*K(index,istage,93)- Ghimj(index,807)*K(index,istage,94)- Ghimj(index,808)*K(index,istage,96)- Ghimj(index,809)*K(index,istage,99)- Ghimj(index,810)*K(index,istage,102)- Ghimj(index,811) *K(index,istage,106)- Ghimj(index,812)*K(index,istage,107)- Ghimj(index,813)*K(index,istage,108)- Ghimj(index,814)*K(index,istage,109)- Ghimj(index,815)*K(index,istage,110)- Ghimj(index,816)*K(index,istage,111)- Ghimj(index,817)*K(index,istage,113) - Ghimj(index,818)*K(index,istage,115)- Ghimj(index,819)*K(index,istage,117)- Ghimj(index,820)*K(index,istage,119);
K(index,istage,122) = K(index,istage,122)- Ghimj(index,831)*K(index,istage,75)- Ghimj(index,832)*K(index,istage,95)- Ghimj(index,833)*K(index,istage,96)- Ghimj(index,834)*K(index,istage,97)- Ghimj(index,835)*K(index,istage,98)- Ghimj(index,836)*K(index,istage,103)- Ghimj(index,837) *K(index,istage,106)- Ghimj(index,838)*K(index,istage,107)- Ghimj(index,839)*K(index,istage,108)- Ghimj(index,840)*K(index,istage,109)- Ghimj(index,841)*K(index,istage,110)- Ghimj(index,842)*K(index,istage,113)- Ghimj(index,843)*K(index,istage,115) - Ghimj(index,844)*K(index,istage,119)- Ghimj(index,845)*K(index,istage,120)- Ghimj(index,846)*K(index,istage,121);
K(index,istage,123) = K(index,istage,123)- Ghimj(index,861)*K(index,istage,103)- Ghimj(index,862)*K(index,istage,104)- Ghimj(index,863)*K(index,istage,112)- Ghimj(index,864)*K(index,istage,114)- Ghimj(index,865)*K(index,istage,116)- Ghimj(index,866)*K(index,istage,118) - Ghimj(index,867)*K(index,istage,119)- Ghimj(index,868)*K(index,istage,121);
K(index,istage,124) = K(index,istage,124)- Ghimj(index,885)*K(index,istage,81)- Ghimj(index,886)*K(index,istage,84)- Ghimj(index,887)*K(index,istage,92)- Ghimj(index,888)*K(index,istage,103)- Ghimj(index,889)*K(index,istage,106)- Ghimj(index,890)*K(index,istage,107)- Ghimj(index,891) *K(index,istage,110)- Ghimj(index,892)*K(index,istage,114)- Ghimj(index,893)*K(index,istage,120)- Ghimj(index,894)*K(index,istage,121)- Ghimj(index,895)*K(index,istage,122);
K(index,istage,125) = K(index,istage,125)- Ghimj(index,910)*K(index,istage,3)- Ghimj(index,911)*K(index,istage,53)- Ghimj(index,912)*K(index,istage,63)- Ghimj(index,913)*K(index,istage,65)- Ghimj(index,914)*K(index,istage,74)- Ghimj(index,915)*K(index,istage,75)- Ghimj(index,916) *K(index,istage,81)- Ghimj(index,917)*K(index,istage,86)- Ghimj(index,918)*K(index,istage,93)- Ghimj(index,919)*K(index,istage,94)- Ghimj(index,920)*K(index,istage,98)- Ghimj(index,921)*K(index,istage,102)- Ghimj(index,922)*K(index,istage,104)- Ghimj(index,923) *K(index,istage,106)- Ghimj(index,924)*K(index,istage,107)- Ghimj(index,925)*K(index,istage,109)- Ghimj(index,926)*K(index,istage,113)- Ghimj(index,927)*K(index,istage,114)- Ghimj(index,928)*K(index,istage,117)- Ghimj(index,929)*K(index,istage,119) - Ghimj(index,930)*K(index,istage,120)- Ghimj(index,931)*K(index,istage,121)- Ghimj(index,932)*K(index,istage,122)- Ghimj(index,933)*K(index,istage,124);
K(index,istage,126) = K(index,istage,126)- Ghimj(index,948)*K(index,istage,40)- Ghimj(index,949)*K(index,istage,44)- Ghimj(index,950)*K(index,istage,45)- Ghimj(index,951)*K(index,istage,47)- Ghimj(index,952)*K(index,istage,48)- Ghimj(index,953)*K(index,istage,49)- Ghimj(index,954) *K(index,istage,52)- Ghimj(index,955)*K(index,istage,53)- Ghimj(index,956)*K(index,istage,54)- Ghimj(index,957)*K(index,istage,55)- Ghimj(index,958)*K(index,istage,56)- Ghimj(index,959)*K(index,istage,57)- Ghimj(index,960)*K(index,istage,58)- Ghimj(index,961) *K(index,istage,61)- Ghimj(index,962)*K(index,istage,62)- Ghimj(index,963)*K(index,istage,63)- Ghimj(index,964)*K(index,istage,64)- Ghimj(index,965)*K(index,istage,65)- Ghimj(index,966)*K(index,istage,66)- Ghimj(index,967)*K(index,istage,67)- Ghimj(index,968) *K(index,istage,68)- Ghimj(index,969)*K(index,istage,69)- Ghimj(index,970)*K(index,istage,70)- Ghimj(index,971)*K(index,istage,71)- Ghimj(index,972)*K(index,istage,72)- Ghimj(index,973)*K(index,istage,73)- Ghimj(index,974)*K(index,istage,74)- Ghimj(index,975) *K(index,istage,75)- Ghimj(index,976)*K(index,istage,76)- Ghimj(index,977)*K(index,istage,77)- Ghimj(index,978)*K(index,istage,78)- Ghimj(index,979)*K(index,istage,79)- Ghimj(index,980)*K(index,istage,81)- Ghimj(index,981)*K(index,istage,82)- Ghimj(index,982) *K(index,istage,84)- Ghimj(index,983)*K(index,istage,85)- Ghimj(index,984)*K(index,istage,86)- Ghimj(index,985)*K(index,istage,87)- Ghimj(index,986)*K(index,istage,88)- Ghimj(index,987)*K(index,istage,89)- Ghimj(index,988)*K(index,istage,91)- Ghimj(index,989) *K(index,istage,92)- Ghimj(index,990)*K(index,istage,93)- Ghimj(index,991)*K(index,istage,94)- Ghimj(index,992)*K(index,istage,95)- Ghimj(index,993)*K(index,istage,96)- Ghimj(index,994)*K(index,istage,97)- Ghimj(index,995)*K(index,istage,98)- Ghimj(index,996) *K(index,istage,99)- Ghimj(index,997)*K(index,istage,100)- Ghimj(index,998)*K(index,istage,101)- Ghimj(index,999)*K(index,istage,102)- Ghimj(index,1000)*K(index,istage,103)- Ghimj(index,1001)*K(index,istage,104)- Ghimj(index,1002)*K(index,istage,105) - Ghimj(index,1003)*K(index,istage,106)- Ghimj(index,1004)*K(index,istage,107)- Ghimj(index,1005)*K(index,istage,108)- Ghimj(index,1006)*K(index,istage,109)- Ghimj(index,1007)*K(index,istage,110)- Ghimj(index,1008)*K(index,istage,111) - Ghimj(index,1009)*K(index,istage,112)- Ghimj(index,1010)*K(index,istage,113)- Ghimj(index,1011)*K(index,istage,114)- Ghimj(index,1012)*K(index,istage,115)- Ghimj(index,1013)*K(index,istage,116)- Ghimj(index,1014)*K(index,istage,117) - Ghimj(index,1015)*K(index,istage,118)- Ghimj(index,1016)*K(index,istage,119)- Ghimj(index,1017)*K(index,istage,120)- Ghimj(index,1018)*K(index,istage,121)- Ghimj(index,1019)*K(index,istage,122)- Ghimj(index,1020)*K(index,istage,123) - Ghimj(index,1021)*K(index,istage,124)- Ghimj(index,1022)*K(index,istage,125);
K(index,istage,127) = K(index,istage,127)- Ghimj(index,1036)*K(index,istage,1)- Ghimj(index,1037)*K(index,istage,39)- Ghimj(index,1038)*K(index,istage,41)- Ghimj(index,1039)*K(index,istage,42)- Ghimj(index,1040)*K(index,istage,43)- Ghimj(index,1041)*K(index,istage,50) - Ghimj(index,1042)*K(index,istage,52)- Ghimj(index,1043)*K(index,istage,54)- Ghimj(index,1044)*K(index,istage,55)- Ghimj(index,1045)*K(index,istage,57)- Ghimj(index,1046)*K(index,istage,75)- Ghimj(index,1047)*K(index,istage,80)- Ghimj(index,1048) *K(index,istage,83)- Ghimj(index,1049)*K(index,istage,88)- Ghimj(index,1050)*K(index,istage,90)- Ghimj(index,1051)*K(index,istage,97)- Ghimj(index,1052)*K(index,istage,98)- Ghimj(index,1053)*K(index,istage,100)- Ghimj(index,1054)*K(index,istage,103) - Ghimj(index,1055)*K(index,istage,104)- Ghimj(index,1056)*K(index,istage,105)- Ghimj(index,1057)*K(index,istage,106)- Ghimj(index,1058)*K(index,istage,107)- Ghimj(index,1059)*K(index,istage,112)- Ghimj(index,1060)*K(index,istage,114) - Ghimj(index,1061)*K(index,istage,116)- Ghimj(index,1062)*K(index,istage,118)- Ghimj(index,1063)*K(index,istage,119)- Ghimj(index,1064)*K(index,istage,120)- Ghimj(index,1065)*K(index,istage,121)- Ghimj(index,1066)*K(index,istage,122) - Ghimj(index,1067)*K(index,istage,123)- Ghimj(index,1068)*K(index,istage,124)- Ghimj(index,1069)*K(index,istage,125)- Ghimj(index,1070)*K(index,istage,126);
K(index,istage,128) = K(index,istage,128)- Ghimj(index,1083)*K(index,istage,40)- Ghimj(index,1084)*K(index,istage,44)- Ghimj(index,1085)*K(index,istage,45)- Ghimj(index,1086)*K(index,istage,47)- Ghimj(index,1087)*K(index,istage,48)- Ghimj(index,1088)*K(index,istage,49) - Ghimj(index,1089)*K(index,istage,52)- Ghimj(index,1090)*K(index,istage,53)- Ghimj(index,1091)*K(index,istage,54)- Ghimj(index,1092)*K(index,istage,55)- Ghimj(index,1093)*K(index,istage,57)- Ghimj(index,1094)*K(index,istage,61)- Ghimj(index,1095) *K(index,istage,63)- Ghimj(index,1096)*K(index,istage,67)- Ghimj(index,1097)*K(index,istage,70)- Ghimj(index,1098)*K(index,istage,73)- Ghimj(index,1099)*K(index,istage,74)- Ghimj(index,1100)*K(index,istage,75)- Ghimj(index,1101)*K(index,istage,76) - Ghimj(index,1102)*K(index,istage,77)- Ghimj(index,1103)*K(index,istage,78)- Ghimj(index,1104)*K(index,istage,79)- Ghimj(index,1105)*K(index,istage,83)- Ghimj(index,1106)*K(index,istage,84)- Ghimj(index,1107)*K(index,istage,86)- Ghimj(index,1108) *K(index,istage,87)- Ghimj(index,1109)*K(index,istage,88)- Ghimj(index,1110)*K(index,istage,92)- Ghimj(index,1111)*K(index,istage,93)- Ghimj(index,1112)*K(index,istage,97)- Ghimj(index,1113)*K(index,istage,98)- Ghimj(index,1114)*K(index,istage,101) - Ghimj(index,1115)*K(index,istage,102)- Ghimj(index,1116)*K(index,istage,103)- Ghimj(index,1117)*K(index,istage,104)- Ghimj(index,1118)*K(index,istage,105)- Ghimj(index,1119)*K(index,istage,106)- Ghimj(index,1120)*K(index,istage,107) - Ghimj(index,1121)*K(index,istage,110)- Ghimj(index,1122)*K(index,istage,111)- Ghimj(index,1123)*K(index,istage,112)- Ghimj(index,1124)*K(index,istage,114)- Ghimj(index,1125)*K(index,istage,115)- Ghimj(index,1126)*K(index,istage,116) - Ghimj(index,1127)*K(index,istage,117)- Ghimj(index,1128)*K(index,istage,118)- Ghimj(index,1129)*K(index,istage,119)- Ghimj(index,1130)*K(index,istage,120)- Ghimj(index,1131)*K(index,istage,121)- Ghimj(index,1132)*K(index,istage,122) - Ghimj(index,1133)*K(index,istage,123)- Ghimj(index,1134)*K(index,istage,124)- Ghimj(index,1135)*K(index,istage,125)- Ghimj(index,1136)*K(index,istage,126)- Ghimj(index,1137)*K(index,istage,127);
K(index,istage,129) = K(index,istage,129)- Ghimj(index,1149)*K(index,istage,0)- Ghimj(index,1150)*K(index,istage,1)- Ghimj(index,1151)*K(index,istage,2)- Ghimj(index,1152)*K(index,istage,44)- Ghimj(index,1153)*K(index,istage,45)- Ghimj(index,1154)*K(index,istage,52)- Ghimj(index,1155) *K(index,istage,53)- Ghimj(index,1156)*K(index,istage,54)- Ghimj(index,1157)*K(index,istage,55)- Ghimj(index,1158)*K(index,istage,80)- Ghimj(index,1159)*K(index,istage,90)- Ghimj(index,1160)*K(index,istage,100)- Ghimj(index,1161)*K(index,istage,103) - Ghimj(index,1162)*K(index,istage,104)- Ghimj(index,1163)*K(index,istage,105)- Ghimj(index,1164)*K(index,istage,112)- Ghimj(index,1165)*K(index,istage,114)- Ghimj(index,1166)*K(index,istage,116)- Ghimj(index,1167)*K(index,istage,118) - Ghimj(index,1168)*K(index,istage,119)- Ghimj(index,1169)*K(index,istage,121)- Ghimj(index,1170)*K(index,istage,123)- Ghimj(index,1171)*K(index,istage,124)- Ghimj(index,1172)*K(index,istage,125)- Ghimj(index,1173)*K(index,istage,126) - Ghimj(index,1174)*K(index,istage,127)- Ghimj(index,1175)*K(index,istage,128);
K(index,istage,130) = K(index,istage,130)- Ghimj(index,1186)*K(index,istage,58)- Ghimj(index,1187)*K(index,istage,65)- Ghimj(index,1188)*K(index,istage,66)- Ghimj(index,1189)*K(index,istage,72)- Ghimj(index,1190)*K(index,istage,77)- Ghimj(index,1191)*K(index,istage,82) - Ghimj(index,1192)*K(index,istage,89)- Ghimj(index,1193)*K(index,istage,91)- Ghimj(index,1194)*K(index,istage,93)- Ghimj(index,1195)*K(index,istage,94)- Ghimj(index,1196)*K(index,istage,98)- Ghimj(index,1197)*K(index,istage,102)- Ghimj(index,1198) *K(index,istage,103)- Ghimj(index,1199)*K(index,istage,104)- Ghimj(index,1200)*K(index,istage,106)- Ghimj(index,1201)*K(index,istage,107)- Ghimj(index,1202)*K(index,istage,108)- Ghimj(index,1203)*K(index,istage,109)- Ghimj(index,1204)*K(index,istage,110) - Ghimj(index,1205)*K(index,istage,113)- Ghimj(index,1206)*K(index,istage,114)- Ghimj(index,1207)*K(index,istage,115)- Ghimj(index,1208)*K(index,istage,117)- Ghimj(index,1209)*K(index,istage,120)- Ghimj(index,1210)*K(index,istage,121) - Ghimj(index,1211)*K(index,istage,122)- Ghimj(index,1212)*K(index,istage,124)- Ghimj(index,1213)*K(index,istage,125)- Ghimj(index,1214)*K(index,istage,126)- Ghimj(index,1215)*K(index,istage,127)- Ghimj(index,1216)*K(index,istage,128) - Ghimj(index,1217)*K(index,istage,129);
K(index,istage,131) = K(index,istage,131)- Ghimj(index,1227)*K(index,istage,51)- Ghimj(index,1228)*K(index,istage,59)- Ghimj(index,1229)*K(index,istage,75)- Ghimj(index,1230)*K(index,istage,116)- Ghimj(index,1231)*K(index,istage,118)- Ghimj(index,1232)*K(index,istage,120) - Ghimj(index,1233)*K(index,istage,122)- Ghimj(index,1234)*K(index,istage,123)- Ghimj(index,1235)*K(index,istage,124)- Ghimj(index,1236)*K(index,istage,125)- Ghimj(index,1237)*K(index,istage,126)- Ghimj(index,1238)*K(index,istage,127) - Ghimj(index,1239)*K(index,istage,128)- Ghimj(index,1240)*K(index,istage,129)- Ghimj(index,1241)*K(index,istage,130);
K(index,istage,132) = K(index,istage,132)- Ghimj(index,1250)*K(index,istage,105)- Ghimj(index,1251)*K(index,istage,114)- Ghimj(index,1252)*K(index,istage,118)- Ghimj(index,1253)*K(index,istage,123)- Ghimj(index,1254)*K(index,istage,124)- Ghimj(index,1255)*K(index,istage,125) - Ghimj(index,1256)*K(index,istage,126)- Ghimj(index,1257)*K(index,istage,127)- Ghimj(index,1258)*K(index,istage,128)- Ghimj(index,1259)*K(index,istage,129)- Ghimj(index,1260)*K(index,istage,130)- Ghimj(index,1261)*K(index,istage,131);
K(index,istage,133) = K(index,istage,133)- Ghimj(index,1269)*K(index,istage,59)- Ghimj(index,1270)*K(index,istage,60)- Ghimj(index,1271)*K(index,istage,70)- Ghimj(index,1272)*K(index,istage,76)- Ghimj(index,1273)*K(index,istage,84)- Ghimj(index,1274)*K(index,istage,87) - Ghimj(index,1275)*K(index,istage,92)- Ghimj(index,1276)*K(index,istage,93)- Ghimj(index,1277)*K(index,istage,94)- Ghimj(index,1278)*K(index,istage,99)- Ghimj(index,1279)*K(index,istage,102)- Ghimj(index,1280)*K(index,istage,109)- Ghimj(index,1281) *K(index,istage,111)- Ghimj(index,1282)*K(index,istage,113)- Ghimj(index,1283)*K(index,istage,115)- Ghimj(index,1284)*K(index,istage,117)- Ghimj(index,1285)*K(index,istage,120)- Ghimj(index,1286)*K(index,istage,121)- Ghimj(index,1287)*K(index,istage,122) - Ghimj(index,1288)*K(index,istage,124)- Ghimj(index,1289)*K(index,istage,125)- Ghimj(index,1290)*K(index,istage,126)- Ghimj(index,1291)*K(index,istage,127)- Ghimj(index,1292)*K(index,istage,128)- Ghimj(index,1293)*K(index,istage,129) - Ghimj(index,1294)*K(index,istage,130)- Ghimj(index,1295)*K(index,istage,131)- Ghimj(index,1296)*K(index,istage,132);
K(index,istage,134) = K(index,istage,134)- Ghimj(index,1303)*K(index,istage,39)- Ghimj(index,1304)*K(index,istage,41)- Ghimj(index,1305)*K(index,istage,42)- Ghimj(index,1306)*K(index,istage,43)- Ghimj(index,1307)*K(index,istage,51)- Ghimj(index,1308)*K(index,istage,75) - Ghimj(index,1309)*K(index,istage,112)- Ghimj(index,1310)*K(index,istage,116)- Ghimj(index,1311)*K(index,istage,120)- Ghimj(index,1312)*K(index,istage,122)- Ghimj(index,1313)*K(index,istage,123)- Ghimj(index,1314)*K(index,istage,124) - Ghimj(index,1315)*K(index,istage,125)- Ghimj(index,1316)*K(index,istage,126)- Ghimj(index,1317)*K(index,istage,127)- Ghimj(index,1318)*K(index,istage,128)- Ghimj(index,1319)*K(index,istage,129)- Ghimj(index,1320)*K(index,istage,130) - Ghimj(index,1321)*K(index,istage,131)- Ghimj(index,1322)*K(index,istage,132)- Ghimj(index,1323)*K(index,istage,133);
K(index,istage,135) = K(index,istage,135)- Ghimj(index,1329)*K(index,istage,0)- Ghimj(index,1330)*K(index,istage,50)- Ghimj(index,1331)*K(index,istage,58)- Ghimj(index,1332)*K(index,istage,59)- Ghimj(index,1333)*K(index,istage,62)- Ghimj(index,1334)*K(index,istage,64) - Ghimj(index,1335)*K(index,istage,73)- Ghimj(index,1336)*K(index,istage,76)- Ghimj(index,1337)*K(index,istage,77)- Ghimj(index,1338)*K(index,istage,83)- Ghimj(index,1339)*K(index,istage,87)- Ghimj(index,1340)*K(index,istage,91)- Ghimj(index,1341) *K(index,istage,92)- Ghimj(index,1342)*K(index,istage,93)- Ghimj(index,1343)*K(index,istage,94)- Ghimj(index,1344)*K(index,istage,99)- Ghimj(index,1345)*K(index,istage,101)- Ghimj(index,1346)*K(index,istage,102)- Ghimj(index,1347)*K(index,istage,105) - Ghimj(index,1348)*K(index,istage,106)- Ghimj(index,1349)*K(index,istage,109)- Ghimj(index,1350)*K(index,istage,111)- Ghimj(index,1351)*K(index,istage,113)- Ghimj(index,1352)*K(index,istage,114)- Ghimj(index,1353)*K(index,istage,115) - Ghimj(index,1354)*K(index,istage,116)- Ghimj(index,1355)*K(index,istage,117)- Ghimj(index,1356)*K(index,istage,119)- Ghimj(index,1357)*K(index,istage,121)- Ghimj(index,1358)*K(index,istage,123)- Ghimj(index,1359)*K(index,istage,124) - Ghimj(index,1360)*K(index,istage,125)- Ghimj(index,1361)*K(index,istage,126)- Ghimj(index,1362)*K(index,istage,127)- Ghimj(index,1363)*K(index,istage,128)- Ghimj(index,1364)*K(index,istage,129)- Ghimj(index,1365)*K(index,istage,130) - Ghimj(index,1366)*K(index,istage,131)- Ghimj(index,1367)*K(index,istage,132)- Ghimj(index,1368)*K(index,istage,133)- Ghimj(index,1369)*K(index,istage,134);
K(index,istage,136) = K(index,istage,136)- Ghimj(index,1374)*K(index,istage,73)- Ghimj(index,1375)*K(index,istage,83)- Ghimj(index,1376)*K(index,istage,101)- Ghimj(index,1377)*K(index,istage,105)- Ghimj(index,1378)*K(index,istage,106)- Ghimj(index,1379)*K(index,istage,107) - Ghimj(index,1380)*K(index,istage,114)- Ghimj(index,1381)*K(index,istage,116)- Ghimj(index,1382)*K(index,istage,117)- Ghimj(index,1383)*K(index,istage,119)- Ghimj(index,1384)*K(index,istage,121)- Ghimj(index,1385)*K(index,istage,123) - Ghimj(index,1386)*K(index,istage,124)- Ghimj(index,1387)*K(index,istage,125)- Ghimj(index,1388)*K(index,istage,126)- Ghimj(index,1389)*K(index,istage,127)- Ghimj(index,1390)*K(index,istage,128)- Ghimj(index,1391)*K(index,istage,129) - Ghimj(index,1392)*K(index,istage,130)- Ghimj(index,1393)*K(index,istage,131)- Ghimj(index,1394)*K(index,istage,132)- Ghimj(index,1395)*K(index,istage,133)- Ghimj(index,1396)*K(index,istage,134)- Ghimj(index,1397)*K(index,istage,135);
K(index,istage,137) = K(index,istage,137)- Ghimj(index,1401)*K(index,istage,46)- Ghimj(index,1402)*K(index,istage,56)- Ghimj(index,1403)*K(index,istage,62)- Ghimj(index,1404)*K(index,istage,65)- Ghimj(index,1405)*K(index,istage,66)- Ghimj(index,1406)*K(index,istage,69) - Ghimj(index,1407)*K(index,istage,71)- Ghimj(index,1408)*K(index,istage,73)- Ghimj(index,1409)*K(index,istage,78)- Ghimj(index,1410)*K(index,istage,79)- Ghimj(index,1411)*K(index,istage,81)- Ghimj(index,1412)*K(index,istage,82)- Ghimj(index,1413) *K(index,istage,87)- Ghimj(index,1414)*K(index,istage,88)- Ghimj(index,1415)*K(index,istage,89)- Ghimj(index,1416)*K(index,istage,91)- Ghimj(index,1417)*K(index,istage,92)- Ghimj(index,1418)*K(index,istage,93)- Ghimj(index,1419)*K(index,istage,94) - Ghimj(index,1420)*K(index,istage,96)- Ghimj(index,1421)*K(index,istage,99)- Ghimj(index,1422)*K(index,istage,102)- Ghimj(index,1423)*K(index,istage,103)- Ghimj(index,1424)*K(index,istage,104)- Ghimj(index,1425)*K(index,istage,106) - Ghimj(index,1426)*K(index,istage,107)- Ghimj(index,1427)*K(index,istage,108)- Ghimj(index,1428)*K(index,istage,109)- Ghimj(index,1429)*K(index,istage,110)- Ghimj(index,1430)*K(index,istage,111)- Ghimj(index,1431)*K(index,istage,113) - Ghimj(index,1432)*K(index,istage,114)- Ghimj(index,1433)*K(index,istage,115)- Ghimj(index,1434)*K(index,istage,117)- Ghimj(index,1435)*K(index,istage,119)- Ghimj(index,1436)*K(index,istage,121)- Ghimj(index,1437)*K(index,istage,122) - Ghimj(index,1438)*K(index,istage,124)- Ghimj(index,1439)*K(index,istage,125)- Ghimj(index,1440)*K(index,istage,126)- Ghimj(index,1441)*K(index,istage,127)- Ghimj(index,1442)*K(index,istage,128)- Ghimj(index,1443)*K(index,istage,129) - Ghimj(index,1444)*K(index,istage,130)- Ghimj(index,1445)*K(index,istage,131)- Ghimj(index,1446)*K(index,istage,132)- Ghimj(index,1447)*K(index,istage,133)- Ghimj(index,1448)*K(index,istage,134)- Ghimj(index,1449)*K(index,istage,135) - Ghimj(index,1450)*K(index,istage,136);
K(index,istage,138) = K(index,istage,138)- Ghimj(index,1453)*K(index,istage,83)- Ghimj(index,1454)*K(index,istage,88)- Ghimj(index,1455)*K(index,istage,97)- Ghimj(index,1456)*K(index,istage,98)- Ghimj(index,1457)*K(index,istage,103)- Ghimj(index,1458)*K(index,istage,104) - Ghimj(index,1459)*K(index,istage,105)- Ghimj(index,1460)*K(index,istage,106)- Ghimj(index,1461)*K(index,istage,107)- Ghimj(index,1462)*K(index,istage,112)- Ghimj(index,1463)*K(index,istage,114)- Ghimj(index,1464)*K(index,istage,116) - Ghimj(index,1465)*K(index,istage,118)- Ghimj(index,1466)*K(index,istage,119)- Ghimj(index,1467)*K(index,istage,120)- Ghimj(index,1468)*K(index,istage,121)- Ghimj(index,1469)*K(index,istage,122)- Ghimj(index,1470)*K(index,istage,123) - Ghimj(index,1471)*K(index,istage,124)- Ghimj(index,1472)*K(index,istage,125)- Ghimj(index,1473)*K(index,istage,126)- Ghimj(index,1474)*K(index,istage,127)- Ghimj(index,1475)*K(index,istage,128)- Ghimj(index,1476)*K(index,istage,129) - Ghimj(index,1477)*K(index,istage,130)- Ghimj(index,1478)*K(index,istage,131)- Ghimj(index,1479)*K(index,istage,132)- Ghimj(index,1480)*K(index,istage,133)- Ghimj(index,1481)*K(index,istage,134)- Ghimj(index,1482)*K(index,istage,135) - Ghimj(index,1483)*K(index,istage,136)- Ghimj(index,1484)*K(index,istage,137);
K(index,istage,138) = K(index,istage,138)/ Ghimj(index,1485);
K(index,istage,137) = (K(index,istage,137)- Ghimj(index,1452)*K(index,istage,138))/(Ghimj(index,1451));
K(index,istage,136) = (K(index,istage,136)- Ghimj(index,1399)*K(index,istage,137)- Ghimj(index,1400)*K(index,istage,138))/(Ghimj(index,1398));
K(index,istage,135) = (K(index,istage,135)- Ghimj(index,1371)*K(index,istage,136)- Ghimj(index,1372)*K(index,istage,137)- Ghimj(index,1373)*K(index,istage,138))/(Ghimj(index,1370));
K(index,istage,134) = (K(index,istage,134)- Ghimj(index,1325)*K(index,istage,135)- Ghimj(index,1326)*K(index,istage,136)- Ghimj(index,1327)*K(index,istage,137)- Ghimj(index,1328)*K(index,istage,138))/(Ghimj(index,1324));
K(index,istage,133) = (K(index,istage,133)- Ghimj(index,1298)*K(index,istage,134)- Ghimj(index,1299)*K(index,istage,135)- Ghimj(index,1300)*K(index,istage,136)- Ghimj(index,1301)*K(index,istage,137)- Ghimj(index,1302)*K(index,istage,138))/(Ghimj(index,1297));
K(index,istage,132) = (K(index,istage,132)- Ghimj(index,1263)*K(index,istage,133)- Ghimj(index,1264)*K(index,istage,134)- Ghimj(index,1265)*K(index,istage,135)- Ghimj(index,1266)*K(index,istage,136)- Ghimj(index,1267)*K(index,istage,137)- Ghimj(index,1268) *K(index,istage,138))/(Ghimj(index,1262));
K(index,istage,131) = (K(index,istage,131)- Ghimj(index,1243)*K(index,istage,132)- Ghimj(index,1244)*K(index,istage,133)- Ghimj(index,1245)*K(index,istage,134)- Ghimj(index,1246)*K(index,istage,135)- Ghimj(index,1247)*K(index,istage,136)- Ghimj(index,1248)*K(index,istage,137) - Ghimj(index,1249)*K(index,istage,138))/(Ghimj(index,1242));
K(index,istage,130) = (K(index,istage,130)- Ghimj(index,1219)*K(index,istage,131)- Ghimj(index,1220)*K(index,istage,132)- Ghimj(index,1221)*K(index,istage,133)- Ghimj(index,1222)*K(index,istage,134)- Ghimj(index,1223)*K(index,istage,135)- Ghimj(index,1224)*K(index,istage,136) - Ghimj(index,1225)*K(index,istage,137)- Ghimj(index,1226)*K(index,istage,138))/(Ghimj(index,1218));
K(index,istage,129) = (K(index,istage,129)- Ghimj(index,1177)*K(index,istage,130)- Ghimj(index,1178)*K(index,istage,131)- Ghimj(index,1179)*K(index,istage,132)- Ghimj(index,1180)*K(index,istage,133)- Ghimj(index,1181)*K(index,istage,134)- Ghimj(index,1182)*K(index,istage,135) - Ghimj(index,1183)*K(index,istage,136)- Ghimj(index,1184)*K(index,istage,137)- Ghimj(index,1185)*K(index,istage,138))/(Ghimj(index,1176));
K(index,istage,128) = (K(index,istage,128)- Ghimj(index,1139)*K(index,istage,129)- Ghimj(index,1140)*K(index,istage,130)- Ghimj(index,1141)*K(index,istage,131)- Ghimj(index,1142)*K(index,istage,132)- Ghimj(index,1143)*K(index,istage,133)- Ghimj(index,1144)*K(index,istage,134) - Ghimj(index,1145)*K(index,istage,135)- Ghimj(index,1146)*K(index,istage,136)- Ghimj(index,1147)*K(index,istage,137)- Ghimj(index,1148)*K(index,istage,138))/(Ghimj(index,1138));
K(index,istage,127) = (K(index,istage,127)- Ghimj(index,1072)*K(index,istage,128)- Ghimj(index,1073)*K(index,istage,129)- Ghimj(index,1074)*K(index,istage,130)- Ghimj(index,1075)*K(index,istage,131)- Ghimj(index,1076)*K(index,istage,132)- Ghimj(index,1077)*K(index,istage,133) - Ghimj(index,1078)*K(index,istage,134)- Ghimj(index,1079)*K(index,istage,135)- Ghimj(index,1080)*K(index,istage,136)- Ghimj(index,1081)*K(index,istage,137)- Ghimj(index,1082)*K(index,istage,138))/(Ghimj(index,1071));
K(index,istage,126) = (K(index,istage,126)- Ghimj(index,1024)*K(index,istage,127)- Ghimj(index,1025)*K(index,istage,128)- Ghimj(index,1026)*K(index,istage,129)- Ghimj(index,1027)*K(index,istage,130)- Ghimj(index,1028)*K(index,istage,131)- Ghimj(index,1029)*K(index,istage,132) - Ghimj(index,1030)*K(index,istage,133)- Ghimj(index,1031)*K(index,istage,134)- Ghimj(index,1032)*K(index,istage,135)- Ghimj(index,1033)*K(index,istage,136)- Ghimj(index,1034)*K(index,istage,137)- Ghimj(index,1035)*K(index,istage,138)) /(Ghimj(index,1023));
K(index,istage,125) = (K(index,istage,125)- Ghimj(index,935)*K(index,istage,126)- Ghimj(index,936)*K(index,istage,127)- Ghimj(index,937)*K(index,istage,128)- Ghimj(index,938)*K(index,istage,129)- Ghimj(index,939)*K(index,istage,130)- Ghimj(index,940)*K(index,istage,131) - Ghimj(index,941)*K(index,istage,132)- Ghimj(index,942)*K(index,istage,133)- Ghimj(index,943)*K(index,istage,134)- Ghimj(index,944)*K(index,istage,135)- Ghimj(index,945)*K(index,istage,136)- Ghimj(index,946)*K(index,istage,137)- Ghimj(index,947) *K(index,istage,138))/(Ghimj(index,934));
K(index,istage,124) = (K(index,istage,124)- Ghimj(index,897)*K(index,istage,125)- Ghimj(index,898)*K(index,istage,126)- Ghimj(index,899)*K(index,istage,127)- Ghimj(index,900)*K(index,istage,128)- Ghimj(index,901)*K(index,istage,129)- Ghimj(index,902)*K(index,istage,130) - Ghimj(index,903)*K(index,istage,131)- Ghimj(index,904)*K(index,istage,132)- Ghimj(index,905)*K(index,istage,133)- Ghimj(index,906)*K(index,istage,135)- Ghimj(index,907)*K(index,istage,136)- Ghimj(index,908)*K(index,istage,137)- Ghimj(index,909) *K(index,istage,138))/(Ghimj(index,896));
K(index,istage,123) = (K(index,istage,123)- Ghimj(index,870)*K(index,istage,124)- Ghimj(index,871)*K(index,istage,125)- Ghimj(index,872)*K(index,istage,126)- Ghimj(index,873)*K(index,istage,127)- Ghimj(index,874)*K(index,istage,128)- Ghimj(index,875)*K(index,istage,129) - Ghimj(index,876)*K(index,istage,130)- Ghimj(index,877)*K(index,istage,131)- Ghimj(index,878)*K(index,istage,132)- Ghimj(index,879)*K(index,istage,133)- Ghimj(index,880)*K(index,istage,134)- Ghimj(index,881)*K(index,istage,135)- Ghimj(index,882) *K(index,istage,136)- Ghimj(index,883)*K(index,istage,137)- Ghimj(index,884)*K(index,istage,138))/(Ghimj(index,869));
K(index,istage,122) = (K(index,istage,122)- Ghimj(index,848)*K(index,istage,124)- Ghimj(index,849)*K(index,istage,125)- Ghimj(index,850)*K(index,istage,126)- Ghimj(index,851)*K(index,istage,127)- Ghimj(index,852)*K(index,istage,128)- Ghimj(index,853)*K(index,istage,129) - Ghimj(index,854)*K(index,istage,130)- Ghimj(index,855)*K(index,istage,131)- Ghimj(index,856)*K(index,istage,133)- Ghimj(index,857)*K(index,istage,135)- Ghimj(index,858)*K(index,istage,136)- Ghimj(index,859)*K(index,istage,137)- Ghimj(index,860) *K(index,istage,138))/(Ghimj(index,847));
K(index,istage,121) = (K(index,istage,121)- Ghimj(index,822)*K(index,istage,124)- Ghimj(index,823)*K(index,istage,125)- Ghimj(index,824)*K(index,istage,126)- Ghimj(index,825)*K(index,istage,127)- Ghimj(index,826)*K(index,istage,129)- Ghimj(index,827)*K(index,istage,133) - Ghimj(index,828)*K(index,istage,135)- Ghimj(index,829)*K(index,istage,136)- Ghimj(index,830)*K(index,istage,137))/(Ghimj(index,821));
K(index,istage,120) = (K(index,istage,120)- Ghimj(index,788)*K(index,istage,122)- Ghimj(index,789)*K(index,istage,124)- Ghimj(index,790)*K(index,istage,126)- Ghimj(index,791)*K(index,istage,127)- Ghimj(index,792)*K(index,istage,128)- Ghimj(index,793)*K(index,istage,130) - Ghimj(index,794)*K(index,istage,133)- Ghimj(index,795)*K(index,istage,135)- Ghimj(index,796)*K(index,istage,136)- Ghimj(index,797)*K(index,istage,137))/(Ghimj(index,787));
K(index,istage,119) = (K(index,istage,119)- Ghimj(index,768)*K(index,istage,121)- Ghimj(index,769)*K(index,istage,124)- Ghimj(index,770)*K(index,istage,125)- Ghimj(index,771)*K(index,istage,126)- Ghimj(index,772)*K(index,istage,127)- Ghimj(index,773)*K(index,istage,129) - Ghimj(index,774)*K(index,istage,133)- Ghimj(index,775)*K(index,istage,136)- Ghimj(index,776)*K(index,istage,137))/(Ghimj(index,767));
K(index,istage,118) = (K(index,istage,118)- Ghimj(index,746)*K(index,istage,123)- Ghimj(index,747)*K(index,istage,125)- Ghimj(index,748)*K(index,istage,126)- Ghimj(index,749)*K(index,istage,127)- Ghimj(index,750)*K(index,istage,128)- Ghimj(index,751)*K(index,istage,129) - Ghimj(index,752)*K(index,istage,131)- Ghimj(index,753)*K(index,istage,132)- Ghimj(index,754)*K(index,istage,134)- Ghimj(index,755)*K(index,istage,135)- Ghimj(index,756)*K(index,istage,137)- Ghimj(index,757)*K(index,istage,138))/(Ghimj(index,745));
K(index,istage,117) = (K(index,istage,117)- Ghimj(index,732)*K(index,istage,121)- Ghimj(index,733)*K(index,istage,124)- Ghimj(index,734)*K(index,istage,125)- Ghimj(index,735)*K(index,istage,126)- Ghimj(index,736)*K(index,istage,127)- Ghimj(index,737)*K(index,istage,129) - Ghimj(index,738)*K(index,istage,133)- Ghimj(index,739)*K(index,istage,136)- Ghimj(index,740)*K(index,istage,137))/(Ghimj(index,731));
K(index,istage,116) = (K(index,istage,116)- Ghimj(index,715)*K(index,istage,123)- Ghimj(index,716)*K(index,istage,127)- Ghimj(index,717)*K(index,istage,128)- Ghimj(index,718)*K(index,istage,131)- Ghimj(index,719)*K(index,istage,134)- Ghimj(index,720)*K(index,istage,135) - Ghimj(index,721)*K(index,istage,138))/(Ghimj(index,714));
K(index,istage,115) = (K(index,istage,115)- Ghimj(index,707)*K(index,istage,124)- Ghimj(index,708)*K(index,istage,126)- Ghimj(index,709)*K(index,istage,127)- Ghimj(index,710)*K(index,istage,129)- Ghimj(index,711)*K(index,istage,133)- Ghimj(index,712)*K(index,istage,136) - Ghimj(index,713)*K(index,istage,137))/(Ghimj(index,706));
K(index,istage,114) = (K(index,istage,114)- Ghimj(index,698)*K(index,istage,126)- Ghimj(index,699)*K(index,istage,127)- Ghimj(index,700)*K(index,istage,129)- Ghimj(index,701)*K(index,istage,132)- Ghimj(index,702)*K(index,istage,136))/(Ghimj(index,697));
K(index,istage,113) = (K(index,istage,113)- Ghimj(index,690)*K(index,istage,124)- Ghimj(index,691)*K(index,istage,125)- Ghimj(index,692)*K(index,istage,126)- Ghimj(index,693)*K(index,istage,133)- Ghimj(index,694)*K(index,istage,135)- Ghimj(index,695)*K(index,istage,136) - Ghimj(index,696)*K(index,istage,137))/(Ghimj(index,689));
K(index,istage,112) = (K(index,istage,112)- Ghimj(index,678)*K(index,istage,116)- Ghimj(index,679)*K(index,istage,123)- Ghimj(index,680)*K(index,istage,126)- Ghimj(index,681)*K(index,istage,128)- Ghimj(index,682)*K(index,istage,134)- Ghimj(index,683)*K(index,istage,137) - Ghimj(index,684)*K(index,istage,138))/(Ghimj(index,677));
K(index,istage,111) = (K(index,istage,111)- Ghimj(index,670)*K(index,istage,115)- Ghimj(index,671)*K(index,istage,124)- Ghimj(index,672)*K(index,istage,125)- Ghimj(index,673)*K(index,istage,126)- Ghimj(index,674)*K(index,istage,133)- Ghimj(index,675)*K(index,istage,136) - Ghimj(index,676)*K(index,istage,137))/(Ghimj(index,669));
K(index,istage,110) = (K(index,istage,110)- Ghimj(index,660)*K(index,istage,124)- Ghimj(index,661)*K(index,istage,125)- Ghimj(index,662)*K(index,istage,126)- Ghimj(index,663)*K(index,istage,133)- Ghimj(index,664)*K(index,istage,136)- Ghimj(index,665)*K(index,istage,137)) /(Ghimj(index,659));
K(index,istage,109) = (K(index,istage,109)- Ghimj(index,649)*K(index,istage,124)- Ghimj(index,650)*K(index,istage,125)- Ghimj(index,651)*K(index,istage,126)- Ghimj(index,652)*K(index,istage,133)- Ghimj(index,653)*K(index,istage,136)- Ghimj(index,654)*K(index,istage,137)) /(Ghimj(index,648));
K(index,istage,108) = (K(index,istage,108)- Ghimj(index,637)*K(index,istage,109)- Ghimj(index,638)*K(index,istage,113)- Ghimj(index,639)*K(index,istage,115)- Ghimj(index,640)*K(index,istage,124)- Ghimj(index,641)*K(index,istage,125)- Ghimj(index,642)*K(index,istage,126) - Ghimj(index,643)*K(index,istage,133)- Ghimj(index,644)*K(index,istage,135)- Ghimj(index,645)*K(index,istage,136)- Ghimj(index,646)*K(index,istage,137))/(Ghimj(index,636));
K(index,istage,107) = (K(index,istage,107)- Ghimj(index,627)*K(index,istage,124)- Ghimj(index,628)*K(index,istage,126)- Ghimj(index,629)*K(index,istage,136))/(Ghimj(index,626));
K(index,istage,106) = (K(index,istage,106)- Ghimj(index,623)*K(index,istage,124)- Ghimj(index,624)*K(index,istage,126)- Ghimj(index,625)*K(index,istage,136))/(Ghimj(index,622));
K(index,istage,105) = (K(index,istage,105)- Ghimj(index,617)*K(index,istage,128)- Ghimj(index,618)*K(index,istage,129)- Ghimj(index,619)*K(index,istage,132)- Ghimj(index,620)*K(index,istage,135)- Ghimj(index,621)*K(index,istage,138))/(Ghimj(index,616));
K(index,istage,104) = (K(index,istage,104)- Ghimj(index,611)*K(index,istage,125)- Ghimj(index,612)*K(index,istage,126)- Ghimj(index,613)*K(index,istage,127)- Ghimj(index,614)*K(index,istage,129)- Ghimj(index,615)*K(index,istage,137))/(Ghimj(index,610));
K(index,istage,103) = (K(index,istage,103)- Ghimj(index,606)*K(index,istage,124)- Ghimj(index,607)*K(index,istage,126)- Ghimj(index,608)*K(index,istage,127)- Ghimj(index,609)*K(index,istage,129))/(Ghimj(index,605));
K(index,istage,102) = (K(index,istage,102)- Ghimj(index,601)*K(index,istage,125)- Ghimj(index,602)*K(index,istage,126)- Ghimj(index,603)*K(index,istage,133)- Ghimj(index,604)*K(index,istage,137))/(Ghimj(index,600));
K(index,istage,101) = (K(index,istage,101)- Ghimj(index,587)*K(index,istage,105)- Ghimj(index,588)*K(index,istage,114)- Ghimj(index,589)*K(index,istage,116)- Ghimj(index,590)*K(index,istage,119)- Ghimj(index,591)*K(index,istage,123)- Ghimj(index,592)*K(index,istage,126) - Ghimj(index,593)*K(index,istage,128)- Ghimj(index,594)*K(index,istage,130)- Ghimj(index,595)*K(index,istage,135)- Ghimj(index,596)*K(index,istage,136)- Ghimj(index,597)*K(index,istage,138))/(Ghimj(index,586));
K(index,istage,100) = (K(index,istage,100)- Ghimj(index,574)*K(index,istage,105)- Ghimj(index,575)*K(index,istage,112)- Ghimj(index,576)*K(index,istage,116)- Ghimj(index,577)*K(index,istage,118)- Ghimj(index,578)*K(index,istage,123)- Ghimj(index,579)*K(index,istage,126) - Ghimj(index,580)*K(index,istage,127)- Ghimj(index,581)*K(index,istage,129)- Ghimj(index,582)*K(index,istage,132)- Ghimj(index,583)*K(index,istage,134)- Ghimj(index,584)*K(index,istage,138))/(Ghimj(index,573));
K(index,istage,99) = (K(index,istage,99)- Ghimj(index,566)*K(index,istage,102)- Ghimj(index,567)*K(index,istage,111)- Ghimj(index,568)*K(index,istage,125)- Ghimj(index,569)*K(index,istage,126)- Ghimj(index,570)*K(index,istage,133)- Ghimj(index,571)*K(index,istage,137)) /(Ghimj(index,565));
K(index,istage,98) = (K(index,istage,98)- Ghimj(index,558)*K(index,istage,107)- Ghimj(index,559)*K(index,istage,120)- Ghimj(index,560)*K(index,istage,124)- Ghimj(index,561)*K(index,istage,126)- Ghimj(index,562)*K(index,istage,127))/(Ghimj(index,557));
K(index,istage,97) = (K(index,istage,97)- Ghimj(index,550)*K(index,istage,98)- Ghimj(index,551)*K(index,istage,120)- Ghimj(index,552)*K(index,istage,122)- Ghimj(index,553)*K(index,istage,126)- Ghimj(index,554)*K(index,istage,127)- Ghimj(index,555)*K(index,istage,130)- Ghimj(index,556) *K(index,istage,137))/(Ghimj(index,549));
K(index,istage,96) = (K(index,istage,96)- Ghimj(index,539)*K(index,istage,107)- Ghimj(index,540)*K(index,istage,108)- Ghimj(index,541)*K(index,istage,109)- Ghimj(index,542)*K(index,istage,110)- Ghimj(index,543)*K(index,istage,113)- Ghimj(index,544)*K(index,istage,124) - Ghimj(index,545)*K(index,istage,125)- Ghimj(index,546)*K(index,istage,126)- Ghimj(index,547)*K(index,istage,133)- Ghimj(index,548)*K(index,istage,137))/(Ghimj(index,538));
K(index,istage,95) = (K(index,istage,95)- Ghimj(index,515)*K(index,istage,96)- Ghimj(index,516)*K(index,istage,98)- Ghimj(index,517)*K(index,istage,103)- Ghimj(index,518)*K(index,istage,106)- Ghimj(index,519)*K(index,istage,107)- Ghimj(index,520)*K(index,istage,109)- Ghimj(index,521) *K(index,istage,110)- Ghimj(index,522)*K(index,istage,113)- Ghimj(index,523)*K(index,istage,119)- Ghimj(index,524)*K(index,istage,121)- Ghimj(index,525)*K(index,istage,124)- Ghimj(index,526)*K(index,istage,125)- Ghimj(index,527)*K(index,istage,126) - Ghimj(index,528)*K(index,istage,127)- Ghimj(index,529)*K(index,istage,129)- Ghimj(index,530)*K(index,istage,130)- Ghimj(index,531)*K(index,istage,133)- Ghimj(index,532)*K(index,istage,135)- Ghimj(index,533)*K(index,istage,136)- Ghimj(index,534) *K(index,istage,137))/(Ghimj(index,514));
K(index,istage,94) = (K(index,istage,94)- Ghimj(index,506)*K(index,istage,125)- Ghimj(index,507)*K(index,istage,126)- Ghimj(index,508)*K(index,istage,133)- Ghimj(index,509)*K(index,istage,137))/(Ghimj(index,505));
K(index,istage,93) = (K(index,istage,93)- Ghimj(index,498)*K(index,istage,125)- Ghimj(index,499)*K(index,istage,126)- Ghimj(index,500)*K(index,istage,133)- Ghimj(index,501)*K(index,istage,137))/(Ghimj(index,497));
K(index,istage,92) = (K(index,istage,92)- Ghimj(index,490)*K(index,istage,124)- Ghimj(index,491)*K(index,istage,126)- Ghimj(index,492)*K(index,istage,133)- Ghimj(index,493)*K(index,istage,135)- Ghimj(index,494)*K(index,istage,137))/(Ghimj(index,489));
K(index,istage,91) = (K(index,istage,91)- Ghimj(index,482)*K(index,istage,106)- Ghimj(index,483)*K(index,istage,109)- Ghimj(index,484)*K(index,istage,126)- Ghimj(index,485)*K(index,istage,133)- Ghimj(index,486)*K(index,istage,136))/(Ghimj(index,481));
K(index,istage,90) = (K(index,istage,90)- Ghimj(index,470)*K(index,istage,100)- Ghimj(index,471)*K(index,istage,105)- Ghimj(index,472)*K(index,istage,112)- Ghimj(index,473)*K(index,istage,116)- Ghimj(index,474)*K(index,istage,118)- Ghimj(index,475)*K(index,istage,123) - Ghimj(index,476)*K(index,istage,127)- Ghimj(index,477)*K(index,istage,129)- Ghimj(index,478)*K(index,istage,132)- Ghimj(index,479)*K(index,istage,134)- Ghimj(index,480)*K(index,istage,138))/(Ghimj(index,469));
K(index,istage,89) = (K(index,istage,89)- Ghimj(index,458)*K(index,istage,93)- Ghimj(index,459)*K(index,istage,94)- Ghimj(index,460)*K(index,istage,102)- Ghimj(index,461)*K(index,istage,107)- Ghimj(index,462)*K(index,istage,109)- Ghimj(index,463)*K(index,istage,113)- Ghimj(index,464) *K(index,istage,117)- Ghimj(index,465)*K(index,istage,124)- Ghimj(index,466)*K(index,istage,125)- Ghimj(index,467)*K(index,istage,126))/(Ghimj(index,457));
K(index,istage,88) = (K(index,istage,88)- Ghimj(index,451)*K(index,istage,103)- Ghimj(index,452)*K(index,istage,106)- Ghimj(index,453)*K(index,istage,124)- Ghimj(index,454)*K(index,istage,126)- Ghimj(index,455)*K(index,istage,127)- Ghimj(index,456)*K(index,istage,137)) /(Ghimj(index,450));
K(index,istage,87) = (K(index,istage,87)- Ghimj(index,445)*K(index,istage,92)- Ghimj(index,446)*K(index,istage,124)- Ghimj(index,447)*K(index,istage,126)- Ghimj(index,448)*K(index,istage,135)- Ghimj(index,449)*K(index,istage,137))/(Ghimj(index,444));
K(index,istage,86) = (K(index,istage,86)- Ghimj(index,437)*K(index,istage,93)- Ghimj(index,438)*K(index,istage,125)- Ghimj(index,439)*K(index,istage,126)- Ghimj(index,440)*K(index,istage,133)- Ghimj(index,441)*K(index,istage,137))/(Ghimj(index,436));
K(index,istage,85) = (K(index,istage,85)- Ghimj(index,428)*K(index,istage,102)- Ghimj(index,429)*K(index,istage,111)- Ghimj(index,430)*K(index,istage,125)- Ghimj(index,431)*K(index,istage,126)- Ghimj(index,432)*K(index,istage,133)- Ghimj(index,433)*K(index,istage,137)) /(Ghimj(index,427));
K(index,istage,84) = (K(index,istage,84)- Ghimj(index,422)*K(index,istage,92)- Ghimj(index,423)*K(index,istage,124)- Ghimj(index,424)*K(index,istage,135)- Ghimj(index,425)*K(index,istage,137))/(Ghimj(index,421));
K(index,istage,83) = (K(index,istage,83)- Ghimj(index,417)*K(index,istage,128)- Ghimj(index,418)*K(index,istage,135)- Ghimj(index,419)*K(index,istage,136)- Ghimj(index,420)*K(index,istage,138))/(Ghimj(index,416));
K(index,istage,82) = (K(index,istage,82)- Ghimj(index,413)*K(index,istage,113)- Ghimj(index,414)*K(index,istage,126)- Ghimj(index,415)*K(index,istage,137))/(Ghimj(index,412));
K(index,istage,81) = (K(index,istage,81)- Ghimj(index,406)*K(index,istage,114)- Ghimj(index,407)*K(index,istage,124)- Ghimj(index,408)*K(index,istage,126)- Ghimj(index,409)*K(index,istage,127)- Ghimj(index,410)*K(index,istage,129)- Ghimj(index,411)*K(index,istage,136)) /(Ghimj(index,405));
K(index,istage,80) = (K(index,istage,80)- Ghimj(index,398)*K(index,istage,90)- Ghimj(index,399)*K(index,istage,112)- Ghimj(index,400)*K(index,istage,116)- Ghimj(index,401)*K(index,istage,127)- Ghimj(index,402)*K(index,istage,129)- Ghimj(index,403)*K(index,istage,134)- Ghimj(index,404) *K(index,istage,138))/(Ghimj(index,397));
K(index,istage,79) = (K(index,istage,79)- Ghimj(index,394)*K(index,istage,102)- Ghimj(index,395)*K(index,istage,126)- Ghimj(index,396)*K(index,istage,137))/(Ghimj(index,393));
K(index,istage,78) = (K(index,istage,78)- Ghimj(index,387)*K(index,istage,103)- Ghimj(index,388)*K(index,istage,106)- Ghimj(index,389)*K(index,istage,107)- Ghimj(index,390)*K(index,istage,110)- Ghimj(index,391)*K(index,istage,124)- Ghimj(index,392)*K(index,istage,126)) /(Ghimj(index,386));
K(index,istage,77) = (K(index,istage,77)- Ghimj(index,383)*K(index,istage,121)- Ghimj(index,384)*K(index,istage,126)- Ghimj(index,385)*K(index,istage,135))/(Ghimj(index,382));
K(index,istage,76) = (K(index,istage,76)- Ghimj(index,378)*K(index,istage,87)- Ghimj(index,379)*K(index,istage,126)- Ghimj(index,380)*K(index,istage,133)- Ghimj(index,381)*K(index,istage,135))/(Ghimj(index,377));
K(index,istage,75) = (K(index,istage,75)- Ghimj(index,375)*K(index,istage,120)- Ghimj(index,376)*K(index,istage,126))/(Ghimj(index,374));
K(index,istage,74) = (K(index,istage,74)- Ghimj(index,369)*K(index,istage,117)- Ghimj(index,370)*K(index,istage,121)- Ghimj(index,371)*K(index,istage,125)- Ghimj(index,372)*K(index,istage,126)- Ghimj(index,373)*K(index,istage,137))/(Ghimj(index,368));
K(index,istage,73) = (K(index,istage,73)- Ghimj(index,365)*K(index,istage,126)- Ghimj(index,366)*K(index,istage,135)- Ghimj(index,367)*K(index,istage,137))/(Ghimj(index,364));
K(index,istage,72) = (K(index,istage,72)- Ghimj(index,361)*K(index,istage,94)- Ghimj(index,362)*K(index,istage,126)- Ghimj(index,363)*K(index,istage,137))/(Ghimj(index,360));
K(index,istage,71) = (K(index,istage,71)- Ghimj(index,357)*K(index,istage,117)- Ghimj(index,358)*K(index,istage,126)- Ghimj(index,359)*K(index,istage,137))/(Ghimj(index,356));
K(index,istage,70) = (K(index,istage,70)- Ghimj(index,353)*K(index,istage,84)- Ghimj(index,354)*K(index,istage,87)- Ghimj(index,355)*K(index,istage,126))/(Ghimj(index,352));
K(index,istage,69) = (K(index,istage,69)- Ghimj(index,348)*K(index,istage,93)- Ghimj(index,349)*K(index,istage,126)- Ghimj(index,350)*K(index,istage,137))/(Ghimj(index,347));
K(index,istage,68) = (K(index,istage,68)- Ghimj(index,344)*K(index,istage,99)- Ghimj(index,345)*K(index,istage,126)- Ghimj(index,346)*K(index,istage,137))/(Ghimj(index,343));
K(index,istage,67) = (K(index,istage,67)- Ghimj(index,340)*K(index,istage,115)- Ghimj(index,341)*K(index,istage,126)- Ghimj(index,342)*K(index,istage,137))/(Ghimj(index,339));
K(index,istage,66) = (K(index,istage,66)- Ghimj(index,336)*K(index,istage,109)- Ghimj(index,337)*K(index,istage,126)- Ghimj(index,338)*K(index,istage,137))/(Ghimj(index,335));
K(index,istage,65) = (K(index,istage,65)- Ghimj(index,332)*K(index,istage,114)- Ghimj(index,333)*K(index,istage,126)- Ghimj(index,334)*K(index,istage,132))/(Ghimj(index,331));
K(index,istage,64) = (K(index,istage,64)- Ghimj(index,328)*K(index,istage,113)- Ghimj(index,329)*K(index,istage,126)- Ghimj(index,330)*K(index,istage,135))/(Ghimj(index,327));
K(index,istage,63) = (K(index,istage,63)- Ghimj(index,324)*K(index,istage,121)- Ghimj(index,325)*K(index,istage,126)- Ghimj(index,326)*K(index,istage,137))/(Ghimj(index,323));
K(index,istage,62) = (K(index,istage,62)- Ghimj(index,320)*K(index,istage,93)- Ghimj(index,321)*K(index,istage,126)- Ghimj(index,322)*K(index,istage,133))/(Ghimj(index,319));
K(index,istage,61) = (K(index,istage,61)- Ghimj(index,316)*K(index,istage,70)- Ghimj(index,317)*K(index,istage,87)- Ghimj(index,318)*K(index,istage,126))/(Ghimj(index,315));
K(index,istage,60) = (K(index,istage,60)- Ghimj(index,311)*K(index,istage,92)- Ghimj(index,312)*K(index,istage,120)- Ghimj(index,313)*K(index,istage,133)- Ghimj(index,314)*K(index,istage,135))/(Ghimj(index,310));
K(index,istage,59) = (K(index,istage,59)- Ghimj(index,307)*K(index,istage,133)- Ghimj(index,308)*K(index,istage,135))/(Ghimj(index,306));
K(index,istage,58) = (K(index,istage,58)- Ghimj(index,304)*K(index,istage,91)- Ghimj(index,305)*K(index,istage,126))/(Ghimj(index,303));
K(index,istage,57) = (K(index,istage,57)- Ghimj(index,301)*K(index,istage,120)- Ghimj(index,302)*K(index,istage,126))/(Ghimj(index,300));
K(index,istage,56) = (K(index,istage,56)- Ghimj(index,297)*K(index,istage,65)- Ghimj(index,298)*K(index,istage,81)- Ghimj(index,299)*K(index,istage,126))/(Ghimj(index,296));
K(index,istage,55) = (K(index,istage,55)- Ghimj(index,295)*K(index,istage,126))/(Ghimj(index,294));
K(index,istage,54) = (K(index,istage,54)- Ghimj(index,293)*K(index,istage,126))/(Ghimj(index,292));
K(index,istage,53) = (K(index,istage,53)- Ghimj(index,291)*K(index,istage,126))/(Ghimj(index,290));
K(index,istage,52) = (K(index,istage,52)- Ghimj(index,289)*K(index,istage,126))/(Ghimj(index,288));
K(index,istage,51) = (K(index,istage,51)- Ghimj(index,286)*K(index,istage,132)- Ghimj(index,287)*K(index,istage,134))/(Ghimj(index,285));
K(index,istage,50) = (K(index,istage,50)- Ghimj(index,283)*K(index,istage,83)- Ghimj(index,284)*K(index,istage,138))/(Ghimj(index,282));
K(index,istage,49) = (K(index,istage,49)- Ghimj(index,281)*K(index,istage,126))/(Ghimj(index,280));
K(index,istage,48) = (K(index,istage,48)- Ghimj(index,279)*K(index,istage,126))/(Ghimj(index,278));
K(index,istage,47) = (K(index,istage,47)- Ghimj(index,277)*K(index,istage,126))/(Ghimj(index,276));
K(index,istage,46) = (K(index,istage,46)- Ghimj(index,273)*K(index,istage,81)- Ghimj(index,274)*K(index,istage,124)- Ghimj(index,275)*K(index,istage,137))/(Ghimj(index,272));
K(index,istage,45) = (K(index,istage,45)- Ghimj(index,271)*K(index,istage,126))/(Ghimj(index,270));
K(index,istage,44) = (K(index,istage,44)- Ghimj(index,269)*K(index,istage,126))/(Ghimj(index,268));
K(index,istage,43) = (K(index,istage,43)- Ghimj(index,267)*K(index,istage,120))/(Ghimj(index,266));
K(index,istage,42) = (K(index,istage,42)- Ghimj(index,265)*K(index,istage,120))/(Ghimj(index,264));
K(index,istage,41) = (K(index,istage,41)- Ghimj(index,263)*K(index,istage,120))/(Ghimj(index,262));
K(index,istage,40) = (K(index,istage,40)- Ghimj(index,261)*K(index,istage,126))/(Ghimj(index,260));
K(index,istage,39) = (K(index,istage,39)- Ghimj(index,259)*K(index,istage,134))/(Ghimj(index,258));
K(index,istage,38) = (K(index,istage,38)- Ghimj(index,256)*K(index,istage,68)- Ghimj(index,257)*K(index,istage,126))/(Ghimj(index,255));
K(index,istage,37) = (K(index,istage,37)- Ghimj(index,252)*K(index,istage,52)- Ghimj(index,253)*K(index,istage,54)- Ghimj(index,254)*K(index,istage,55))/(Ghimj(index,251));
K(index,istage,36) = (K(index,istage,36)- Ghimj(index,245)*K(index,istage,44)- Ghimj(index,246)*K(index,istage,45)- Ghimj(index,247)*K(index,istage,52)- Ghimj(index,248)*K(index,istage,54)- Ghimj(index,249)*K(index,istage,55)- Ghimj(index,250)*K(index,istage,126))/(Ghimj(index,244));
K(index,istage,35) = (K(index,istage,35)- Ghimj(index,234)*K(index,istage,93)- Ghimj(index,235)*K(index,istage,94)- Ghimj(index,236)*K(index,istage,99)- Ghimj(index,237)*K(index,istage,102)- Ghimj(index,238)*K(index,istage,109)- Ghimj(index,239)*K(index,istage,113)- Ghimj(index,240) *K(index,istage,115)- Ghimj(index,241)*K(index,istage,117)- Ghimj(index,242)*K(index,istage,121)- Ghimj(index,243)*K(index,istage,133))/(Ghimj(index,233));
K(index,istage,34) = (K(index,istage,34)- Ghimj(index,207)*K(index,istage,50)- Ghimj(index,208)*K(index,istage,51)- Ghimj(index,209)*K(index,istage,59)- Ghimj(index,210)*K(index,istage,60)- Ghimj(index,211)*K(index,istage,65)- Ghimj(index,212)*K(index,istage,73)- Ghimj(index,213) *K(index,istage,76)- Ghimj(index,214)*K(index,istage,93)- Ghimj(index,215)*K(index,istage,94)- Ghimj(index,216)*K(index,istage,99)- Ghimj(index,217)*K(index,istage,100)- Ghimj(index,218)*K(index,istage,101)- Ghimj(index,219)*K(index,istage,102)- Ghimj(index,220) *K(index,istage,109)- Ghimj(index,221)*K(index,istage,113)- Ghimj(index,222)*K(index,istage,114)- Ghimj(index,223)*K(index,istage,115)- Ghimj(index,224)*K(index,istage,117)- Ghimj(index,225)*K(index,istage,121)- Ghimj(index,226)*K(index,istage,122) - Ghimj(index,227)*K(index,istage,125)- Ghimj(index,228)*K(index,istage,126)- Ghimj(index,229)*K(index,istage,127)- Ghimj(index,230)*K(index,istage,129)- Ghimj(index,231)*K(index,istage,133)- Ghimj(index,232)*K(index,istage,137))/(Ghimj(index,206));
K(index,istage,33) = (K(index,istage,33)- Ghimj(index,203)*K(index,istage,125)- Ghimj(index,204)*K(index,istage,133))/(Ghimj(index,202));
K(index,istage,32) = (K(index,istage,32)- Ghimj(index,195)*K(index,istage,41)- Ghimj(index,196)*K(index,istage,42)- Ghimj(index,197)*K(index,istage,43)- Ghimj(index,198)*K(index,istage,57)- Ghimj(index,199)*K(index,istage,75)- Ghimj(index,200)*K(index,istage,120)- Ghimj(index,201) *K(index,istage,126))/(Ghimj(index,194));
K(index,istage,31) = (K(index,istage,31)- Ghimj(index,191)*K(index,istage,53)- Ghimj(index,192)*K(index,istage,126))/(Ghimj(index,190));
K(index,istage,30) = (K(index,istage,30)- Ghimj(index,186)*K(index,istage,133)- Ghimj(index,187)*K(index,istage,137))/(Ghimj(index,185));
K(index,istage,29) = (K(index,istage,29)- Ghimj(index,183)*K(index,istage,124)- Ghimj(index,184)*K(index,istage,126))/(Ghimj(index,182));
K(index,istage,28) = (K(index,istage,28)- Ghimj(index,171)*K(index,istage,103)- Ghimj(index,172)*K(index,istage,106)- Ghimj(index,173)*K(index,istage,107)- Ghimj(index,174)*K(index,istage,110)- Ghimj(index,175)*K(index,istage,117)- Ghimj(index,176)*K(index,istage,119) - Ghimj(index,177)*K(index,istage,121)- Ghimj(index,178)*K(index,istage,124)- Ghimj(index,179)*K(index,istage,125)- Ghimj(index,180)*K(index,istage,130)- Ghimj(index,181)*K(index,istage,136))/(Ghimj(index,170));
K(index,istage,27) = (K(index,istage,27)- Ghimj(index,164)*K(index,istage,60)- Ghimj(index,165)*K(index,istage,98)- Ghimj(index,166)*K(index,istage,120)- Ghimj(index,167)*K(index,istage,124)- Ghimj(index,168)*K(index,istage,128)- Ghimj(index,169)*K(index,istage,131)) /(Ghimj(index,163));
K(index,istage,26) = (K(index,istage,26)- Ghimj(index,149)*K(index,istage,83)- Ghimj(index,150)*K(index,istage,84)- Ghimj(index,151)*K(index,istage,87)- Ghimj(index,152)*K(index,istage,92)- Ghimj(index,153)*K(index,istage,105)- Ghimj(index,154)*K(index,istage,116)- Ghimj(index,155) *K(index,istage,123)- Ghimj(index,156)*K(index,istage,124)- Ghimj(index,157)*K(index,istage,128)- Ghimj(index,158)*K(index,istage,131)- Ghimj(index,159)*K(index,istage,135)- Ghimj(index,160)*K(index,istage,136)- Ghimj(index,161)*K(index,istage,137) - Ghimj(index,162)*K(index,istage,138))/(Ghimj(index,148));
K(index,istage,25) = (K(index,istage,25)- Ghimj(index,141)*K(index,istage,97)- Ghimj(index,142)*K(index,istage,120)- Ghimj(index,143)*K(index,istage,122)- Ghimj(index,144)*K(index,istage,124)- Ghimj(index,145)*K(index,istage,126)- Ghimj(index,146)*K(index,istage,131)- Ghimj(index,147) *K(index,istage,137))/(Ghimj(index,140));
K(index,istage,24) = (K(index,istage,24)- Ghimj(index,124)*K(index,istage,39)- Ghimj(index,125)*K(index,istage,57)- Ghimj(index,126)*K(index,istage,75)- Ghimj(index,127)*K(index,istage,83)- Ghimj(index,128)*K(index,istage,105)- Ghimj(index,129)*K(index,istage,112)- Ghimj(index,130) *K(index,istage,116)- Ghimj(index,131)*K(index,istage,118)- Ghimj(index,132)*K(index,istage,120)- Ghimj(index,133)*K(index,istage,123)- Ghimj(index,134)*K(index,istage,125)- Ghimj(index,135)*K(index,istage,126)- Ghimj(index,136)*K(index,istage,131) - Ghimj(index,137)*K(index,istage,132)- Ghimj(index,138)*K(index,istage,134)- Ghimj(index,139)*K(index,istage,138))/(Ghimj(index,123));
K(index,istage,23) = (K(index,istage,23)- Ghimj(index,113)*K(index,istage,105)- Ghimj(index,114)*K(index,istage,112)- Ghimj(index,115)*K(index,istage,116)- Ghimj(index,116)*K(index,istage,118)- Ghimj(index,117)*K(index,istage,123)- Ghimj(index,118)*K(index,istage,125) - Ghimj(index,119)*K(index,istage,131)- Ghimj(index,120)*K(index,istage,132)- Ghimj(index,121)*K(index,istage,134)- Ghimj(index,122)*K(index,istage,138))/(Ghimj(index,112));
K(index,istage,22) = (K(index,istage,22)- Ghimj(index,76)*K(index,istage,39)- Ghimj(index,77)*K(index,istage,57)- Ghimj(index,78)*K(index,istage,60)- Ghimj(index,79)*K(index,istage,75)- Ghimj(index,80)*K(index,istage,83)- Ghimj(index,81)*K(index,istage,84)- Ghimj(index,82)*K(index,istage,87) - Ghimj(index,83)*K(index,istage,92)- Ghimj(index,84)*K(index,istage,97)- Ghimj(index,85)*K(index,istage,98)- Ghimj(index,86)*K(index,istage,103)- Ghimj(index,87)*K(index,istage,105)- Ghimj(index,88)*K(index,istage,106)- Ghimj(index,89)*K(index,istage,107)- Ghimj(index,90) *K(index,istage,110)- Ghimj(index,91)*K(index,istage,112)- Ghimj(index,92)*K(index,istage,116)- Ghimj(index,93)*K(index,istage,117)- Ghimj(index,94)*K(index,istage,118)- Ghimj(index,95)*K(index,istage,119)- Ghimj(index,96)*K(index,istage,120)- Ghimj(index,97) *K(index,istage,121)- Ghimj(index,98)*K(index,istage,122)- Ghimj(index,99)*K(index,istage,123)- Ghimj(index,100)*K(index,istage,124)- Ghimj(index,101)*K(index,istage,125)- Ghimj(index,102)*K(index,istage,126)- Ghimj(index,103)*K(index,istage,128)- Ghimj(index,104) *K(index,istage,130)- Ghimj(index,105)*K(index,istage,131)- Ghimj(index,106)*K(index,istage,132)- Ghimj(index,107)*K(index,istage,134)- Ghimj(index,108)*K(index,istage,135)- Ghimj(index,109)*K(index,istage,136)- Ghimj(index,110)*K(index,istage,137) - Ghimj(index,111)*K(index,istage,138))/(Ghimj(index,75));
K(index,istage,21) = (K(index,istage,21)- Ghimj(index,73)*K(index,istage,120)- Ghimj(index,74)*K(index,istage,128))/(Ghimj(index,72));
K(index,istage,20) = (K(index,istage,20)- Ghimj(index,70)*K(index,istage,124)- Ghimj(index,71)*K(index,istage,137))/(Ghimj(index,69));
K(index,istage,19) = K(index,istage,19)/ Ghimj(index,68);
K(index,istage,18) = (K(index,istage,18)- Ghimj(index,65)*K(index,istage,120)- Ghimj(index,66)*K(index,istage,126))/(Ghimj(index,64));
K(index,istage,17) = (K(index,istage,17)- Ghimj(index,63)*K(index,istage,120))/(Ghimj(index,62));
K(index,istage,16) = (K(index,istage,16)- Ghimj(index,61)*K(index,istage,120))/(Ghimj(index,60));
K(index,istage,15) = (K(index,istage,15)- Ghimj(index,59)*K(index,istage,120))/(Ghimj(index,58));
K(index,istage,14) = (K(index,istage,14)- Ghimj(index,53)*K(index,istage,15)- Ghimj(index,54)*K(index,istage,16)- Ghimj(index,55)*K(index,istage,17)- Ghimj(index,56)*K(index,istage,18)- Ghimj(index,57)*K(index,istage,120))/(Ghimj(index,52));
K(index,istage,13) = (K(index,istage,13)- Ghimj(index,49)*K(index,istage,83))/(Ghimj(index,48));
K(index,istage,12) = (K(index,istage,12)- Ghimj(index,47)*K(index,istage,83))/(Ghimj(index,46));
K(index,istage,11) = (K(index,istage,11)- Ghimj(index,44)*K(index,istage,56)- Ghimj(index,45)*K(index,istage,126))/(Ghimj(index,43));
K(index,istage,10) = (K(index,istage,10)- Ghimj(index,39)*K(index,istage,46)- Ghimj(index,40)*K(index,istage,65)- Ghimj(index,41)*K(index,istage,126)- Ghimj(index,42)*K(index,istage,137))/(Ghimj(index,38));
K(index,istage,9) = (K(index,istage,9)- Ghimj(index,30)*K(index,istage,42)- Ghimj(index,31)*K(index,istage,43)- Ghimj(index,32)*K(index,istage,52)- Ghimj(index,33)*K(index,istage,54)- Ghimj(index,34)*K(index,istage,55)- Ghimj(index,35)*K(index,istage,75)- Ghimj(index,36)*K(index,istage,120) - Ghimj(index,37)*K(index,istage,126))/(Ghimj(index,29));
K(index,istage,8) = (K(index,istage,8)- Ghimj(index,26)*K(index,istage,42)- Ghimj(index,27)*K(index,istage,43)- Ghimj(index,28)*K(index,istage,120))/(Ghimj(index,25));
K(index,istage,7) = (K(index,istage,7)- Ghimj(index,10)*K(index,istage,41)- Ghimj(index,11)*K(index,istage,42)- Ghimj(index,12)*K(index,istage,43)- Ghimj(index,13)*K(index,istage,44)- Ghimj(index,14)*K(index,istage,45)- Ghimj(index,15)*K(index,istage,52)- Ghimj(index,16)*K(index,istage,53)- Ghimj(index,17) *K(index,istage,54)- Ghimj(index,18)*K(index,istage,55)- Ghimj(index,19)*K(index,istage,57)- Ghimj(index,20)*K(index,istage,75)- Ghimj(index,21)*K(index,istage,120)- Ghimj(index,22)*K(index,istage,126))/(Ghimj(index,9));
K(index,istage,6) = K(index,istage,6)/ Ghimj(index,6);
K(index,istage,5) = K(index,istage,5)/ Ghimj(index,5);
K(index,istage,4) = K(index,istage,4)/ Ghimj(index,4);
K(index,istage,3) = K(index,istage,3)/ Ghimj(index,3);
K(index,istage,2) = K(index,istage,2)/ Ghimj(index,2);
K(index,istage,1) = K(index,istage,1)/ Ghimj(index,1);
K(index,istage,0) = K(index,istage,0)/ Ghimj(index,0);
}
__device__ void ros_Solve(double * __restrict__ Ghimj, double * __restrict__ K, int &Nsol, const int istage, const int ros_S)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
#pragma unroll 4
for (int i=0;i<LU_NONZERO-16;i+=16){
prefetch_ll1(&Ghimj(index,i));
}
kppSolve(Ghimj, K, istage, ros_S);
Nsol++;
}
__device__ void kppDecomp(double *Ghimj, int VL_GLO)
{
double a=0.0;
double dummy, W_0, W_1, W_2, W_3, W_4, W_5, W_6, W_7, W_8, W_9, W_10, W_11, W_12, W_13, W_14, W_15, W_16, W_17, W_18, W_19, W_20, W_21, W_22, W_23, W_24, W_25, W_26, W_27, W_28, W_29, W_30, W_31, W_32, W_33, W_34, W_35, W_36, W_37, W_38, W_39, W_40, W_41, W_42, W_43, W_44, W_45, W_46, W_47, W_48, W_49, W_50, W_51, W_52, W_53, W_54, W_55, W_56, W_57, W_58, W_59, W_60, W_61, W_62, W_63, W_64, W_65, W_66, W_67, W_68, W_69, W_70, W_71, W_72, W_73, W_74, W_75, W_76, W_77, W_78, W_79, W_80, W_81, W_82, W_83, W_84, W_85, W_86, W_87, W_88, W_89, W_90, W_91, W_92, W_93, W_94, W_95, W_96, W_97, W_98, W_99, W_100, W_101, W_102, W_103, W_104, W_105, W_106, W_107, W_108, W_109, W_110, W_111, W_112, W_113, W_114, W_115, W_116, W_117, W_118, W_119, W_120, W_121, W_122, W_123, W_124, W_125, W_126, W_127, W_128, W_129, W_130, W_131, W_132, W_133, W_134, W_135, W_136, W_137, W_138, W_139, W_140, W_141;
int index = blockIdx.x*blockDim.x+threadIdx.x;
W_1 = Ghimj(index,7);
W_2 = Ghimj(index,8);
W_7 = Ghimj(index,9);
W_41 = Ghimj(index,10);
W_42 = Ghimj(index,11);
W_43 = Ghimj(index,12);
W_44 = Ghimj(index,13);
W_45 = Ghimj(index,14);
W_52 = Ghimj(index,15);
W_53 = Ghimj(index,16);
W_54 = Ghimj(index,17);
W_55 = Ghimj(index,18);
W_57 = Ghimj(index,19);
W_75 = Ghimj(index,20);
W_120 = Ghimj(index,21);
W_126 = Ghimj(index,22);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_2/ Ghimj(index,2);
W_2 = -a;
Ghimj(index,7) = W_1;
Ghimj(index,8) = W_2;
Ghimj(index,9) = W_7;
Ghimj(index,10) = W_41;
Ghimj(index,11) = W_42;
Ghimj(index,12) = W_43;
Ghimj(index,13) = W_44;
Ghimj(index,14) = W_45;
Ghimj(index,15) = W_52;
Ghimj(index,16) = W_53;
Ghimj(index,17) = W_54;
Ghimj(index,18) = W_55;
Ghimj(index,19) = W_57;
Ghimj(index,20) = W_75;
Ghimj(index,21) = W_120;
Ghimj(index,22) = W_126;
W_1 = Ghimj(index,23);
W_2 = Ghimj(index,24);
W_8 = Ghimj(index,25);
W_42 = Ghimj(index,26);
W_43 = Ghimj(index,27);
W_120 = Ghimj(index,28);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_2/ Ghimj(index,2);
W_2 = -a;
Ghimj(index,23) = W_1;
Ghimj(index,24) = W_2;
Ghimj(index,25) = W_8;
Ghimj(index,26) = W_42;
Ghimj(index,27) = W_43;
Ghimj(index,28) = W_120;
W_5 = Ghimj(index,50);
W_6 = Ghimj(index,51);
W_14 = Ghimj(index,52);
W_15 = Ghimj(index,53);
W_16 = Ghimj(index,54);
W_17 = Ghimj(index,55);
W_18 = Ghimj(index,56);
W_120 = Ghimj(index,57);
a = - W_5/ Ghimj(index,5);
W_5 = -a;
a = - W_6/ Ghimj(index,6);
W_6 = -a;
Ghimj(index,50) = W_5;
Ghimj(index,51) = W_6;
Ghimj(index,52) = W_14;
Ghimj(index,53) = W_15;
Ghimj(index,54) = W_16;
Ghimj(index,55) = W_17;
Ghimj(index,56) = W_18;
Ghimj(index,57) = W_120;
W_4 = Ghimj(index,67);
W_19 = Ghimj(index,68);
a = - W_4/ Ghimj(index,4);
W_4 = -a;
Ghimj(index,67) = W_4;
Ghimj(index,68) = W_19;
W_1 = Ghimj(index,188);
W_2 = Ghimj(index,189);
W_31 = Ghimj(index,190);
W_53 = Ghimj(index,191);
W_126 = Ghimj(index,192);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_2/ Ghimj(index,2);
W_2 = -a;
Ghimj(index,188) = W_1;
Ghimj(index,189) = W_2;
Ghimj(index,190) = W_31;
Ghimj(index,191) = W_53;
Ghimj(index,192) = W_126;
W_1 = Ghimj(index,193);
W_32 = Ghimj(index,194);
W_41 = Ghimj(index,195);
W_42 = Ghimj(index,196);
W_43 = Ghimj(index,197);
W_57 = Ghimj(index,198);
W_75 = Ghimj(index,199);
W_120 = Ghimj(index,200);
W_126 = Ghimj(index,201);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
Ghimj(index,193) = W_1;
Ghimj(index,194) = W_32;
Ghimj(index,195) = W_41;
Ghimj(index,196) = W_42;
Ghimj(index,197) = W_43;
Ghimj(index,198) = W_57;
Ghimj(index,199) = W_75;
Ghimj(index,200) = W_120;
Ghimj(index,201) = W_126;
W_0 = Ghimj(index,205);
W_34 = Ghimj(index,206);
W_50 = Ghimj(index,207);
W_51 = Ghimj(index,208);
W_59 = Ghimj(index,209);
W_60 = Ghimj(index,210);
W_65 = Ghimj(index,211);
W_73 = Ghimj(index,212);
W_76 = Ghimj(index,213);
W_93 = Ghimj(index,214);
W_94 = Ghimj(index,215);
W_99 = Ghimj(index,216);
W_100 = Ghimj(index,217);
W_101 = Ghimj(index,218);
W_102 = Ghimj(index,219);
W_109 = Ghimj(index,220);
W_113 = Ghimj(index,221);
W_114 = Ghimj(index,222);
W_115 = Ghimj(index,223);
W_117 = Ghimj(index,224);
W_121 = Ghimj(index,225);
W_122 = Ghimj(index,226);
W_125 = Ghimj(index,227);
W_126 = Ghimj(index,228);
W_127 = Ghimj(index,229);
W_129 = Ghimj(index,230);
W_133 = Ghimj(index,231);
W_137 = Ghimj(index,232);
a = - W_0/ Ghimj(index,0);
W_0 = -a;
Ghimj(index,205) = W_0;
Ghimj(index,206) = W_34;
Ghimj(index,207) = W_50;
Ghimj(index,208) = W_51;
Ghimj(index,209) = W_59;
Ghimj(index,210) = W_60;
Ghimj(index,211) = W_65;
Ghimj(index,212) = W_73;
Ghimj(index,213) = W_76;
Ghimj(index,214) = W_93;
Ghimj(index,215) = W_94;
Ghimj(index,216) = W_99;
Ghimj(index,217) = W_100;
Ghimj(index,218) = W_101;
Ghimj(index,219) = W_102;
Ghimj(index,220) = W_109;
Ghimj(index,221) = W_113;
Ghimj(index,222) = W_114;
Ghimj(index,223) = W_115;
Ghimj(index,224) = W_117;
Ghimj(index,225) = W_121;
Ghimj(index,226) = W_122;
Ghimj(index,227) = W_125;
Ghimj(index,228) = W_126;
Ghimj(index,229) = W_127;
Ghimj(index,230) = W_129;
Ghimj(index,231) = W_133;
Ghimj(index,232) = W_137;
W_59 = Ghimj(index,309);
W_60 = Ghimj(index,310);
W_92 = Ghimj(index,311);
W_120 = Ghimj(index,312);
W_133 = Ghimj(index,313);
W_135 = Ghimj(index,314);
a = - W_59/ Ghimj(index,306);
W_59 = -a;
W_133 = W_133+ a *Ghimj(index,307);
W_135 = W_135+ a *Ghimj(index,308);
Ghimj(index,309) = W_59;
Ghimj(index,310) = W_60;
Ghimj(index,311) = W_92;
Ghimj(index,312) = W_120;
Ghimj(index,313) = W_133;
Ghimj(index,314) = W_135;
W_61 = Ghimj(index,351);
W_70 = Ghimj(index,352);
W_84 = Ghimj(index,353);
W_87 = Ghimj(index,354);
W_126 = Ghimj(index,355);
a = - W_61/ Ghimj(index,315);
W_61 = -a;
W_70 = W_70+ a *Ghimj(index,316);
W_87 = W_87+ a *Ghimj(index,317);
W_126 = W_126+ a *Ghimj(index,318);
Ghimj(index,351) = W_61;
Ghimj(index,352) = W_70;
Ghimj(index,353) = W_84;
Ghimj(index,354) = W_87;
Ghimj(index,355) = W_126;
W_79 = Ghimj(index,426);
W_85 = Ghimj(index,427);
W_102 = Ghimj(index,428);
W_111 = Ghimj(index,429);
W_125 = Ghimj(index,430);
W_126 = Ghimj(index,431);
W_133 = Ghimj(index,432);
W_137 = Ghimj(index,433);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
Ghimj(index,426) = W_79;
Ghimj(index,427) = W_85;
Ghimj(index,428) = W_102;
Ghimj(index,429) = W_111;
Ghimj(index,430) = W_125;
Ghimj(index,431) = W_126;
Ghimj(index,432) = W_133;
Ghimj(index,433) = W_137;
W_62 = Ghimj(index,434);
W_69 = Ghimj(index,435);
W_86 = Ghimj(index,436);
W_93 = Ghimj(index,437);
W_125 = Ghimj(index,438);
W_126 = Ghimj(index,439);
W_133 = Ghimj(index,440);
W_137 = Ghimj(index,441);
a = - W_62/ Ghimj(index,319);
W_62 = -a;
W_93 = W_93+ a *Ghimj(index,320);
W_126 = W_126+ a *Ghimj(index,321);
W_133 = W_133+ a *Ghimj(index,322);
a = - W_69/ Ghimj(index,347);
W_69 = -a;
W_93 = W_93+ a *Ghimj(index,348);
W_126 = W_126+ a *Ghimj(index,349);
W_137 = W_137+ a *Ghimj(index,350);
Ghimj(index,434) = W_62;
Ghimj(index,435) = W_69;
Ghimj(index,436) = W_86;
Ghimj(index,437) = W_93;
Ghimj(index,438) = W_125;
Ghimj(index,439) = W_126;
Ghimj(index,440) = W_133;
Ghimj(index,441) = W_137;
W_70 = Ghimj(index,442);
W_84 = Ghimj(index,443);
W_87 = Ghimj(index,444);
W_92 = Ghimj(index,445);
W_124 = Ghimj(index,446);
W_126 = Ghimj(index,447);
W_135 = Ghimj(index,448);
W_137 = Ghimj(index,449);
a = - W_70/ Ghimj(index,352);
W_70 = -a;
W_84 = W_84+ a *Ghimj(index,353);
W_87 = W_87+ a *Ghimj(index,354);
W_126 = W_126+ a *Ghimj(index,355);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
Ghimj(index,442) = W_70;
Ghimj(index,443) = W_84;
Ghimj(index,444) = W_87;
Ghimj(index,445) = W_92;
Ghimj(index,446) = W_124;
Ghimj(index,447) = W_126;
Ghimj(index,448) = W_135;
Ghimj(index,449) = W_137;
W_80 = Ghimj(index,468);
W_90 = Ghimj(index,469);
W_100 = Ghimj(index,470);
W_105 = Ghimj(index,471);
W_112 = Ghimj(index,472);
W_116 = Ghimj(index,473);
W_118 = Ghimj(index,474);
W_123 = Ghimj(index,475);
W_127 = Ghimj(index,476);
W_129 = Ghimj(index,477);
W_132 = Ghimj(index,478);
W_134 = Ghimj(index,479);
W_138 = Ghimj(index,480);
a = - W_80/ Ghimj(index,397);
W_80 = -a;
W_90 = W_90+ a *Ghimj(index,398);
W_112 = W_112+ a *Ghimj(index,399);
W_116 = W_116+ a *Ghimj(index,400);
W_127 = W_127+ a *Ghimj(index,401);
W_129 = W_129+ a *Ghimj(index,402);
W_134 = W_134+ a *Ghimj(index,403);
W_138 = W_138+ a *Ghimj(index,404);
Ghimj(index,468) = W_80;
Ghimj(index,469) = W_90;
Ghimj(index,470) = W_100;
Ghimj(index,471) = W_105;
Ghimj(index,472) = W_112;
Ghimj(index,473) = W_116;
Ghimj(index,474) = W_118;
Ghimj(index,475) = W_123;
Ghimj(index,476) = W_127;
Ghimj(index,477) = W_129;
Ghimj(index,478) = W_132;
Ghimj(index,479) = W_134;
Ghimj(index,480) = W_138;
W_47 = Ghimj(index,487);
W_84 = Ghimj(index,488);
W_92 = Ghimj(index,489);
W_124 = Ghimj(index,490);
W_126 = Ghimj(index,491);
W_133 = Ghimj(index,492);
W_135 = Ghimj(index,493);
W_137 = Ghimj(index,494);
a = - W_47/ Ghimj(index,276);
W_47 = -a;
W_126 = W_126+ a *Ghimj(index,277);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
Ghimj(index,487) = W_47;
Ghimj(index,488) = W_84;
Ghimj(index,489) = W_92;
Ghimj(index,490) = W_124;
Ghimj(index,491) = W_126;
Ghimj(index,492) = W_133;
Ghimj(index,493) = W_135;
Ghimj(index,494) = W_137;
W_49 = Ghimj(index,495);
W_69 = Ghimj(index,496);
W_93 = Ghimj(index,497);
W_125 = Ghimj(index,498);
W_126 = Ghimj(index,499);
W_133 = Ghimj(index,500);
W_137 = Ghimj(index,501);
a = - W_49/ Ghimj(index,280);
W_49 = -a;
W_126 = W_126+ a *Ghimj(index,281);
a = - W_69/ Ghimj(index,347);
W_69 = -a;
W_93 = W_93+ a *Ghimj(index,348);
W_126 = W_126+ a *Ghimj(index,349);
W_137 = W_137+ a *Ghimj(index,350);
Ghimj(index,495) = W_49;
Ghimj(index,496) = W_69;
Ghimj(index,497) = W_93;
Ghimj(index,498) = W_125;
Ghimj(index,499) = W_126;
Ghimj(index,500) = W_133;
Ghimj(index,501) = W_137;
W_72 = Ghimj(index,502);
W_86 = Ghimj(index,503);
W_93 = Ghimj(index,504);
W_94 = Ghimj(index,505);
W_125 = Ghimj(index,506);
W_126 = Ghimj(index,507);
W_133 = Ghimj(index,508);
W_137 = Ghimj(index,509);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
Ghimj(index,502) = W_72;
Ghimj(index,503) = W_86;
Ghimj(index,504) = W_93;
Ghimj(index,505) = W_94;
Ghimj(index,506) = W_125;
Ghimj(index,507) = W_126;
Ghimj(index,508) = W_133;
Ghimj(index,509) = W_137;
W_58 = Ghimj(index,510);
W_77 = Ghimj(index,511);
W_82 = Ghimj(index,512);
W_91 = Ghimj(index,513);
W_95 = Ghimj(index,514);
W_96 = Ghimj(index,515);
W_98 = Ghimj(index,516);
W_103 = Ghimj(index,517);
W_106 = Ghimj(index,518);
W_107 = Ghimj(index,519);
W_109 = Ghimj(index,520);
W_110 = Ghimj(index,521);
W_113 = Ghimj(index,522);
W_119 = Ghimj(index,523);
W_121 = Ghimj(index,524);
W_124 = Ghimj(index,525);
W_125 = Ghimj(index,526);
W_126 = Ghimj(index,527);
W_127 = Ghimj(index,528);
W_129 = Ghimj(index,529);
W_130 = Ghimj(index,530);
W_133 = Ghimj(index,531);
W_135 = Ghimj(index,532);
W_136 = Ghimj(index,533);
W_137 = Ghimj(index,534);
a = - W_58/ Ghimj(index,303);
W_58 = -a;
W_91 = W_91+ a *Ghimj(index,304);
W_126 = W_126+ a *Ghimj(index,305);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
Ghimj(index,510) = W_58;
Ghimj(index,511) = W_77;
Ghimj(index,512) = W_82;
Ghimj(index,513) = W_91;
Ghimj(index,514) = W_95;
Ghimj(index,515) = W_96;
Ghimj(index,516) = W_98;
Ghimj(index,517) = W_103;
Ghimj(index,518) = W_106;
Ghimj(index,519) = W_107;
Ghimj(index,520) = W_109;
Ghimj(index,521) = W_110;
Ghimj(index,522) = W_113;
Ghimj(index,523) = W_119;
Ghimj(index,524) = W_121;
Ghimj(index,525) = W_124;
Ghimj(index,526) = W_125;
Ghimj(index,527) = W_126;
Ghimj(index,528) = W_127;
Ghimj(index,529) = W_129;
Ghimj(index,530) = W_130;
Ghimj(index,531) = W_133;
Ghimj(index,532) = W_135;
Ghimj(index,533) = W_136;
Ghimj(index,534) = W_137;
W_72 = Ghimj(index,535);
W_82 = Ghimj(index,536);
W_94 = Ghimj(index,537);
W_96 = Ghimj(index,538);
W_107 = Ghimj(index,539);
W_108 = Ghimj(index,540);
W_109 = Ghimj(index,541);
W_110 = Ghimj(index,542);
W_113 = Ghimj(index,543);
W_124 = Ghimj(index,544);
W_125 = Ghimj(index,545);
W_126 = Ghimj(index,546);
W_133 = Ghimj(index,547);
W_137 = Ghimj(index,548);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
Ghimj(index,535) = W_72;
Ghimj(index,536) = W_82;
Ghimj(index,537) = W_94;
Ghimj(index,538) = W_96;
Ghimj(index,539) = W_107;
Ghimj(index,540) = W_108;
Ghimj(index,541) = W_109;
Ghimj(index,542) = W_110;
Ghimj(index,543) = W_113;
Ghimj(index,544) = W_124;
Ghimj(index,545) = W_125;
Ghimj(index,546) = W_126;
Ghimj(index,547) = W_133;
Ghimj(index,548) = W_137;
W_68 = Ghimj(index,563);
W_85 = Ghimj(index,564);
W_99 = Ghimj(index,565);
W_102 = Ghimj(index,566);
W_111 = Ghimj(index,567);
W_125 = Ghimj(index,568);
W_126 = Ghimj(index,569);
W_133 = Ghimj(index,570);
W_137 = Ghimj(index,571);
a = - W_68/ Ghimj(index,343);
W_68 = -a;
W_99 = W_99+ a *Ghimj(index,344);
W_126 = W_126+ a *Ghimj(index,345);
W_137 = W_137+ a *Ghimj(index,346);
a = - W_85/ Ghimj(index,427);
W_85 = -a;
W_102 = W_102+ a *Ghimj(index,428);
W_111 = W_111+ a *Ghimj(index,429);
W_125 = W_125+ a *Ghimj(index,430);
W_126 = W_126+ a *Ghimj(index,431);
W_133 = W_133+ a *Ghimj(index,432);
W_137 = W_137+ a *Ghimj(index,433);
Ghimj(index,563) = W_68;
Ghimj(index,564) = W_85;
Ghimj(index,565) = W_99;
Ghimj(index,566) = W_102;
Ghimj(index,567) = W_111;
Ghimj(index,568) = W_125;
Ghimj(index,569) = W_126;
Ghimj(index,570) = W_133;
Ghimj(index,571) = W_137;
W_90 = Ghimj(index,572);
W_100 = Ghimj(index,573);
W_105 = Ghimj(index,574);
W_112 = Ghimj(index,575);
W_116 = Ghimj(index,576);
W_118 = Ghimj(index,577);
W_123 = Ghimj(index,578);
W_126 = Ghimj(index,579);
W_127 = Ghimj(index,580);
W_129 = Ghimj(index,581);
W_132 = Ghimj(index,582);
W_134 = Ghimj(index,583);
W_138 = Ghimj(index,584);
a = - W_90/ Ghimj(index,469);
W_90 = -a;
W_100 = W_100+ a *Ghimj(index,470);
W_105 = W_105+ a *Ghimj(index,471);
W_112 = W_112+ a *Ghimj(index,472);
W_116 = W_116+ a *Ghimj(index,473);
W_118 = W_118+ a *Ghimj(index,474);
W_123 = W_123+ a *Ghimj(index,475);
W_127 = W_127+ a *Ghimj(index,476);
W_129 = W_129+ a *Ghimj(index,477);
W_132 = W_132+ a *Ghimj(index,478);
W_134 = W_134+ a *Ghimj(index,479);
W_138 = W_138+ a *Ghimj(index,480);
Ghimj(index,572) = W_90;
Ghimj(index,573) = W_100;
Ghimj(index,574) = W_105;
Ghimj(index,575) = W_112;
Ghimj(index,576) = W_116;
Ghimj(index,577) = W_118;
Ghimj(index,578) = W_123;
Ghimj(index,579) = W_126;
Ghimj(index,580) = W_127;
Ghimj(index,581) = W_129;
Ghimj(index,582) = W_132;
Ghimj(index,583) = W_134;
Ghimj(index,584) = W_138;
W_83 = Ghimj(index,585);
W_101 = Ghimj(index,586);
W_105 = Ghimj(index,587);
W_114 = Ghimj(index,588);
W_116 = Ghimj(index,589);
W_119 = Ghimj(index,590);
W_123 = Ghimj(index,591);
W_126 = Ghimj(index,592);
W_128 = Ghimj(index,593);
W_130 = Ghimj(index,594);
W_135 = Ghimj(index,595);
W_136 = Ghimj(index,596);
W_138 = Ghimj(index,597);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
Ghimj(index,585) = W_83;
Ghimj(index,586) = W_101;
Ghimj(index,587) = W_105;
Ghimj(index,588) = W_114;
Ghimj(index,589) = W_116;
Ghimj(index,590) = W_119;
Ghimj(index,591) = W_123;
Ghimj(index,592) = W_126;
Ghimj(index,593) = W_128;
Ghimj(index,594) = W_130;
Ghimj(index,595) = W_135;
Ghimj(index,596) = W_136;
Ghimj(index,597) = W_138;
W_40 = Ghimj(index,598);
W_79 = Ghimj(index,599);
W_102 = Ghimj(index,600);
W_125 = Ghimj(index,601);
W_126 = Ghimj(index,602);
W_133 = Ghimj(index,603);
W_137 = Ghimj(index,604);
a = - W_40/ Ghimj(index,260);
W_40 = -a;
W_126 = W_126+ a *Ghimj(index,261);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
Ghimj(index,598) = W_40;
Ghimj(index,599) = W_79;
Ghimj(index,600) = W_102;
Ghimj(index,601) = W_125;
Ghimj(index,602) = W_126;
Ghimj(index,603) = W_133;
Ghimj(index,604) = W_137;
W_64 = Ghimj(index,630);
W_67 = Ghimj(index,631);
W_82 = Ghimj(index,632);
W_91 = Ghimj(index,633);
W_94 = Ghimj(index,634);
W_106 = Ghimj(index,635);
W_108 = Ghimj(index,636);
W_109 = Ghimj(index,637);
W_113 = Ghimj(index,638);
W_115 = Ghimj(index,639);
W_124 = Ghimj(index,640);
W_125 = Ghimj(index,641);
W_126 = Ghimj(index,642);
W_133 = Ghimj(index,643);
W_135 = Ghimj(index,644);
W_136 = Ghimj(index,645);
W_137 = Ghimj(index,646);
a = - W_64/ Ghimj(index,327);
W_64 = -a;
W_113 = W_113+ a *Ghimj(index,328);
W_126 = W_126+ a *Ghimj(index,329);
W_135 = W_135+ a *Ghimj(index,330);
a = - W_67/ Ghimj(index,339);
W_67 = -a;
W_115 = W_115+ a *Ghimj(index,340);
W_126 = W_126+ a *Ghimj(index,341);
W_137 = W_137+ a *Ghimj(index,342);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
Ghimj(index,630) = W_64;
Ghimj(index,631) = W_67;
Ghimj(index,632) = W_82;
Ghimj(index,633) = W_91;
Ghimj(index,634) = W_94;
Ghimj(index,635) = W_106;
Ghimj(index,636) = W_108;
Ghimj(index,637) = W_109;
Ghimj(index,638) = W_113;
Ghimj(index,639) = W_115;
Ghimj(index,640) = W_124;
Ghimj(index,641) = W_125;
Ghimj(index,642) = W_126;
Ghimj(index,643) = W_133;
Ghimj(index,644) = W_135;
Ghimj(index,645) = W_136;
Ghimj(index,646) = W_137;
W_106 = Ghimj(index,647);
W_109 = Ghimj(index,648);
W_124 = Ghimj(index,649);
W_125 = Ghimj(index,650);
W_126 = Ghimj(index,651);
W_133 = Ghimj(index,652);
W_136 = Ghimj(index,653);
W_137 = Ghimj(index,654);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
Ghimj(index,647) = W_106;
Ghimj(index,648) = W_109;
Ghimj(index,649) = W_124;
Ghimj(index,650) = W_125;
Ghimj(index,651) = W_126;
Ghimj(index,652) = W_133;
Ghimj(index,653) = W_136;
Ghimj(index,654) = W_137;
W_66 = Ghimj(index,655);
W_91 = Ghimj(index,656);
W_106 = Ghimj(index,657);
W_109 = Ghimj(index,658);
W_110 = Ghimj(index,659);
W_124 = Ghimj(index,660);
W_125 = Ghimj(index,661);
W_126 = Ghimj(index,662);
W_133 = Ghimj(index,663);
W_136 = Ghimj(index,664);
W_137 = Ghimj(index,665);
a = - W_66/ Ghimj(index,335);
W_66 = -a;
W_109 = W_109+ a *Ghimj(index,336);
W_126 = W_126+ a *Ghimj(index,337);
W_137 = W_137+ a *Ghimj(index,338);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
Ghimj(index,655) = W_66;
Ghimj(index,656) = W_91;
Ghimj(index,657) = W_106;
Ghimj(index,658) = W_109;
Ghimj(index,659) = W_110;
Ghimj(index,660) = W_124;
Ghimj(index,661) = W_125;
Ghimj(index,662) = W_126;
Ghimj(index,663) = W_133;
Ghimj(index,664) = W_136;
Ghimj(index,665) = W_137;
W_99 = Ghimj(index,666);
W_102 = Ghimj(index,667);
W_107 = Ghimj(index,668);
W_111 = Ghimj(index,669);
W_115 = Ghimj(index,670);
W_124 = Ghimj(index,671);
W_125 = Ghimj(index,672);
W_126 = Ghimj(index,673);
W_133 = Ghimj(index,674);
W_136 = Ghimj(index,675);
W_137 = Ghimj(index,676);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
Ghimj(index,666) = W_99;
Ghimj(index,667) = W_102;
Ghimj(index,668) = W_107;
Ghimj(index,669) = W_111;
Ghimj(index,670) = W_115;
Ghimj(index,671) = W_124;
Ghimj(index,672) = W_125;
Ghimj(index,673) = W_126;
Ghimj(index,674) = W_133;
Ghimj(index,675) = W_136;
Ghimj(index,676) = W_137;
W_64 = Ghimj(index,685);
W_82 = Ghimj(index,686);
W_106 = Ghimj(index,687);
W_110 = Ghimj(index,688);
W_113 = Ghimj(index,689);
W_124 = Ghimj(index,690);
W_125 = Ghimj(index,691);
W_126 = Ghimj(index,692);
W_133 = Ghimj(index,693);
W_135 = Ghimj(index,694);
W_136 = Ghimj(index,695);
W_137 = Ghimj(index,696);
a = - W_64/ Ghimj(index,327);
W_64 = -a;
W_113 = W_113+ a *Ghimj(index,328);
W_126 = W_126+ a *Ghimj(index,329);
W_135 = W_135+ a *Ghimj(index,330);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
Ghimj(index,685) = W_64;
Ghimj(index,686) = W_82;
Ghimj(index,687) = W_106;
Ghimj(index,688) = W_110;
Ghimj(index,689) = W_113;
Ghimj(index,690) = W_124;
Ghimj(index,691) = W_125;
Ghimj(index,692) = W_126;
Ghimj(index,693) = W_133;
Ghimj(index,694) = W_135;
Ghimj(index,695) = W_136;
Ghimj(index,696) = W_137;
W_67 = Ghimj(index,703);
W_103 = Ghimj(index,704);
W_107 = Ghimj(index,705);
W_115 = Ghimj(index,706);
W_124 = Ghimj(index,707);
W_126 = Ghimj(index,708);
W_127 = Ghimj(index,709);
W_129 = Ghimj(index,710);
W_133 = Ghimj(index,711);
W_136 = Ghimj(index,712);
W_137 = Ghimj(index,713);
a = - W_67/ Ghimj(index,339);
W_67 = -a;
W_115 = W_115+ a *Ghimj(index,340);
W_126 = W_126+ a *Ghimj(index,341);
W_137 = W_137+ a *Ghimj(index,342);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
Ghimj(index,703) = W_67;
Ghimj(index,704) = W_103;
Ghimj(index,705) = W_107;
Ghimj(index,706) = W_115;
Ghimj(index,707) = W_124;
Ghimj(index,708) = W_126;
Ghimj(index,709) = W_127;
Ghimj(index,710) = W_129;
Ghimj(index,711) = W_133;
Ghimj(index,712) = W_136;
Ghimj(index,713) = W_137;
W_48 = Ghimj(index,722);
W_49 = Ghimj(index,723);
W_71 = Ghimj(index,724);
W_79 = Ghimj(index,725);
W_85 = Ghimj(index,726);
W_102 = Ghimj(index,727);
W_107 = Ghimj(index,728);
W_111 = Ghimj(index,729);
W_115 = Ghimj(index,730);
W_117 = Ghimj(index,731);
W_121 = Ghimj(index,732);
W_124 = Ghimj(index,733);
W_125 = Ghimj(index,734);
W_126 = Ghimj(index,735);
W_127 = Ghimj(index,736);
W_129 = Ghimj(index,737);
W_133 = Ghimj(index,738);
W_136 = Ghimj(index,739);
W_137 = Ghimj(index,740);
a = - W_48/ Ghimj(index,278);
W_48 = -a;
W_126 = W_126+ a *Ghimj(index,279);
a = - W_49/ Ghimj(index,280);
W_49 = -a;
W_126 = W_126+ a *Ghimj(index,281);
a = - W_71/ Ghimj(index,356);
W_71 = -a;
W_117 = W_117+ a *Ghimj(index,357);
W_126 = W_126+ a *Ghimj(index,358);
W_137 = W_137+ a *Ghimj(index,359);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_85/ Ghimj(index,427);
W_85 = -a;
W_102 = W_102+ a *Ghimj(index,428);
W_111 = W_111+ a *Ghimj(index,429);
W_125 = W_125+ a *Ghimj(index,430);
W_126 = W_126+ a *Ghimj(index,431);
W_133 = W_133+ a *Ghimj(index,432);
W_137 = W_137+ a *Ghimj(index,433);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
Ghimj(index,722) = W_48;
Ghimj(index,723) = W_49;
Ghimj(index,724) = W_71;
Ghimj(index,725) = W_79;
Ghimj(index,726) = W_85;
Ghimj(index,727) = W_102;
Ghimj(index,728) = W_107;
Ghimj(index,729) = W_111;
Ghimj(index,730) = W_115;
Ghimj(index,731) = W_117;
Ghimj(index,732) = W_121;
Ghimj(index,733) = W_124;
Ghimj(index,734) = W_125;
Ghimj(index,735) = W_126;
Ghimj(index,736) = W_127;
Ghimj(index,737) = W_129;
Ghimj(index,738) = W_133;
Ghimj(index,739) = W_136;
Ghimj(index,740) = W_137;
W_100 = Ghimj(index,741);
W_105 = Ghimj(index,742);
W_112 = Ghimj(index,743);
W_116 = Ghimj(index,744);
W_118 = Ghimj(index,745);
W_123 = Ghimj(index,746);
W_125 = Ghimj(index,747);
W_126 = Ghimj(index,748);
W_127 = Ghimj(index,749);
W_128 = Ghimj(index,750);
W_129 = Ghimj(index,751);
W_131 = Ghimj(index,752);
W_132 = Ghimj(index,753);
W_134 = Ghimj(index,754);
W_135 = Ghimj(index,755);
W_137 = Ghimj(index,756);
W_138 = Ghimj(index,757);
a = - W_100/ Ghimj(index,573);
W_100 = -a;
W_105 = W_105+ a *Ghimj(index,574);
W_112 = W_112+ a *Ghimj(index,575);
W_116 = W_116+ a *Ghimj(index,576);
W_118 = W_118+ a *Ghimj(index,577);
W_123 = W_123+ a *Ghimj(index,578);
W_126 = W_126+ a *Ghimj(index,579);
W_127 = W_127+ a *Ghimj(index,580);
W_129 = W_129+ a *Ghimj(index,581);
W_132 = W_132+ a *Ghimj(index,582);
W_134 = W_134+ a *Ghimj(index,583);
W_138 = W_138+ a *Ghimj(index,584);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
Ghimj(index,741) = W_100;
Ghimj(index,742) = W_105;
Ghimj(index,743) = W_112;
Ghimj(index,744) = W_116;
Ghimj(index,745) = W_118;
Ghimj(index,746) = W_123;
Ghimj(index,747) = W_125;
Ghimj(index,748) = W_126;
Ghimj(index,749) = W_127;
Ghimj(index,750) = W_128;
Ghimj(index,751) = W_129;
Ghimj(index,752) = W_131;
Ghimj(index,753) = W_132;
Ghimj(index,754) = W_134;
Ghimj(index,755) = W_135;
Ghimj(index,756) = W_137;
Ghimj(index,757) = W_138;
W_68 = Ghimj(index,758);
W_71 = Ghimj(index,759);
W_79 = Ghimj(index,760);
W_99 = Ghimj(index,761);
W_102 = Ghimj(index,762);
W_107 = Ghimj(index,763);
W_111 = Ghimj(index,764);
W_115 = Ghimj(index,765);
W_117 = Ghimj(index,766);
W_119 = Ghimj(index,767);
W_121 = Ghimj(index,768);
W_124 = Ghimj(index,769);
W_125 = Ghimj(index,770);
W_126 = Ghimj(index,771);
W_127 = Ghimj(index,772);
W_129 = Ghimj(index,773);
W_133 = Ghimj(index,774);
W_136 = Ghimj(index,775);
W_137 = Ghimj(index,776);
a = - W_68/ Ghimj(index,343);
W_68 = -a;
W_99 = W_99+ a *Ghimj(index,344);
W_126 = W_126+ a *Ghimj(index,345);
W_137 = W_137+ a *Ghimj(index,346);
a = - W_71/ Ghimj(index,356);
W_71 = -a;
W_117 = W_117+ a *Ghimj(index,357);
W_126 = W_126+ a *Ghimj(index,358);
W_137 = W_137+ a *Ghimj(index,359);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
Ghimj(index,758) = W_68;
Ghimj(index,759) = W_71;
Ghimj(index,760) = W_79;
Ghimj(index,761) = W_99;
Ghimj(index,762) = W_102;
Ghimj(index,763) = W_107;
Ghimj(index,764) = W_111;
Ghimj(index,765) = W_115;
Ghimj(index,766) = W_117;
Ghimj(index,767) = W_119;
Ghimj(index,768) = W_121;
Ghimj(index,769) = W_124;
Ghimj(index,770) = W_125;
Ghimj(index,771) = W_126;
Ghimj(index,772) = W_127;
Ghimj(index,773) = W_129;
Ghimj(index,774) = W_133;
Ghimj(index,775) = W_136;
Ghimj(index,776) = W_137;
W_41 = Ghimj(index,777);
W_42 = Ghimj(index,778);
W_43 = Ghimj(index,779);
W_57 = Ghimj(index,780);
W_60 = Ghimj(index,781);
W_75 = Ghimj(index,782);
W_92 = Ghimj(index,783);
W_97 = Ghimj(index,784);
W_98 = Ghimj(index,785);
W_107 = Ghimj(index,786);
W_120 = Ghimj(index,787);
W_122 = Ghimj(index,788);
W_124 = Ghimj(index,789);
W_126 = Ghimj(index,790);
W_127 = Ghimj(index,791);
W_128 = Ghimj(index,792);
W_130 = Ghimj(index,793);
W_133 = Ghimj(index,794);
W_135 = Ghimj(index,795);
W_136 = Ghimj(index,796);
W_137 = Ghimj(index,797);
a = - W_41/ Ghimj(index,262);
W_41 = -a;
W_120 = W_120+ a *Ghimj(index,263);
a = - W_42/ Ghimj(index,264);
W_42 = -a;
W_120 = W_120+ a *Ghimj(index,265);
a = - W_43/ Ghimj(index,266);
W_43 = -a;
W_120 = W_120+ a *Ghimj(index,267);
a = - W_57/ Ghimj(index,300);
W_57 = -a;
W_120 = W_120+ a *Ghimj(index,301);
W_126 = W_126+ a *Ghimj(index,302);
a = - W_60/ Ghimj(index,310);
W_60 = -a;
W_92 = W_92+ a *Ghimj(index,311);
W_120 = W_120+ a *Ghimj(index,312);
W_133 = W_133+ a *Ghimj(index,313);
W_135 = W_135+ a *Ghimj(index,314);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
Ghimj(index,777) = W_41;
Ghimj(index,778) = W_42;
Ghimj(index,779) = W_43;
Ghimj(index,780) = W_57;
Ghimj(index,781) = W_60;
Ghimj(index,782) = W_75;
Ghimj(index,783) = W_92;
Ghimj(index,784) = W_97;
Ghimj(index,785) = W_98;
Ghimj(index,786) = W_107;
Ghimj(index,787) = W_120;
Ghimj(index,788) = W_122;
Ghimj(index,789) = W_124;
Ghimj(index,790) = W_126;
Ghimj(index,791) = W_127;
Ghimj(index,792) = W_128;
Ghimj(index,793) = W_130;
Ghimj(index,794) = W_133;
Ghimj(index,795) = W_135;
Ghimj(index,796) = W_136;
Ghimj(index,797) = W_137;
W_38 = Ghimj(index,798);
W_63 = Ghimj(index,799);
W_68 = Ghimj(index,800);
W_72 = Ghimj(index,801);
W_77 = Ghimj(index,802);
W_82 = Ghimj(index,803);
W_85 = Ghimj(index,804);
W_86 = Ghimj(index,805);
W_93 = Ghimj(index,806);
W_94 = Ghimj(index,807);
W_96 = Ghimj(index,808);
W_99 = Ghimj(index,809);
W_102 = Ghimj(index,810);
W_106 = Ghimj(index,811);
W_107 = Ghimj(index,812);
W_108 = Ghimj(index,813);
W_109 = Ghimj(index,814);
W_110 = Ghimj(index,815);
W_111 = Ghimj(index,816);
W_113 = Ghimj(index,817);
W_115 = Ghimj(index,818);
W_117 = Ghimj(index,819);
W_119 = Ghimj(index,820);
W_121 = Ghimj(index,821);
W_124 = Ghimj(index,822);
W_125 = Ghimj(index,823);
W_126 = Ghimj(index,824);
W_127 = Ghimj(index,825);
W_129 = Ghimj(index,826);
W_133 = Ghimj(index,827);
W_135 = Ghimj(index,828);
W_136 = Ghimj(index,829);
W_137 = Ghimj(index,830);
a = - W_38/ Ghimj(index,255);
W_38 = -a;
W_68 = W_68+ a *Ghimj(index,256);
W_126 = W_126+ a *Ghimj(index,257);
a = - W_63/ Ghimj(index,323);
W_63 = -a;
W_121 = W_121+ a *Ghimj(index,324);
W_126 = W_126+ a *Ghimj(index,325);
W_137 = W_137+ a *Ghimj(index,326);
a = - W_68/ Ghimj(index,343);
W_68 = -a;
W_99 = W_99+ a *Ghimj(index,344);
W_126 = W_126+ a *Ghimj(index,345);
W_137 = W_137+ a *Ghimj(index,346);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_85/ Ghimj(index,427);
W_85 = -a;
W_102 = W_102+ a *Ghimj(index,428);
W_111 = W_111+ a *Ghimj(index,429);
W_125 = W_125+ a *Ghimj(index,430);
W_126 = W_126+ a *Ghimj(index,431);
W_133 = W_133+ a *Ghimj(index,432);
W_137 = W_137+ a *Ghimj(index,433);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_96/ Ghimj(index,538);
W_96 = -a;
W_107 = W_107+ a *Ghimj(index,539);
W_108 = W_108+ a *Ghimj(index,540);
W_109 = W_109+ a *Ghimj(index,541);
W_110 = W_110+ a *Ghimj(index,542);
W_113 = W_113+ a *Ghimj(index,543);
W_124 = W_124+ a *Ghimj(index,544);
W_125 = W_125+ a *Ghimj(index,545);
W_126 = W_126+ a *Ghimj(index,546);
W_133 = W_133+ a *Ghimj(index,547);
W_137 = W_137+ a *Ghimj(index,548);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
Ghimj(index,798) = W_38;
Ghimj(index,799) = W_63;
Ghimj(index,800) = W_68;
Ghimj(index,801) = W_72;
Ghimj(index,802) = W_77;
Ghimj(index,803) = W_82;
Ghimj(index,804) = W_85;
Ghimj(index,805) = W_86;
Ghimj(index,806) = W_93;
Ghimj(index,807) = W_94;
Ghimj(index,808) = W_96;
Ghimj(index,809) = W_99;
Ghimj(index,810) = W_102;
Ghimj(index,811) = W_106;
Ghimj(index,812) = W_107;
Ghimj(index,813) = W_108;
Ghimj(index,814) = W_109;
Ghimj(index,815) = W_110;
Ghimj(index,816) = W_111;
Ghimj(index,817) = W_113;
Ghimj(index,818) = W_115;
Ghimj(index,819) = W_117;
Ghimj(index,820) = W_119;
Ghimj(index,821) = W_121;
Ghimj(index,822) = W_124;
Ghimj(index,823) = W_125;
Ghimj(index,824) = W_126;
Ghimj(index,825) = W_127;
Ghimj(index,826) = W_129;
Ghimj(index,827) = W_133;
Ghimj(index,828) = W_135;
Ghimj(index,829) = W_136;
Ghimj(index,830) = W_137;
W_75 = Ghimj(index,831);
W_95 = Ghimj(index,832);
W_96 = Ghimj(index,833);
W_97 = Ghimj(index,834);
W_98 = Ghimj(index,835);
W_103 = Ghimj(index,836);
W_106 = Ghimj(index,837);
W_107 = Ghimj(index,838);
W_108 = Ghimj(index,839);
W_109 = Ghimj(index,840);
W_110 = Ghimj(index,841);
W_113 = Ghimj(index,842);
W_115 = Ghimj(index,843);
W_119 = Ghimj(index,844);
W_120 = Ghimj(index,845);
W_121 = Ghimj(index,846);
W_122 = Ghimj(index,847);
W_124 = Ghimj(index,848);
W_125 = Ghimj(index,849);
W_126 = Ghimj(index,850);
W_127 = Ghimj(index,851);
W_128 = Ghimj(index,852);
W_129 = Ghimj(index,853);
W_130 = Ghimj(index,854);
W_131 = Ghimj(index,855);
W_133 = Ghimj(index,856);
W_135 = Ghimj(index,857);
W_136 = Ghimj(index,858);
W_137 = Ghimj(index,859);
W_138 = Ghimj(index,860);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_95/ Ghimj(index,514);
W_95 = -a;
W_96 = W_96+ a *Ghimj(index,515);
W_98 = W_98+ a *Ghimj(index,516);
W_103 = W_103+ a *Ghimj(index,517);
W_106 = W_106+ a *Ghimj(index,518);
W_107 = W_107+ a *Ghimj(index,519);
W_109 = W_109+ a *Ghimj(index,520);
W_110 = W_110+ a *Ghimj(index,521);
W_113 = W_113+ a *Ghimj(index,522);
W_119 = W_119+ a *Ghimj(index,523);
W_121 = W_121+ a *Ghimj(index,524);
W_124 = W_124+ a *Ghimj(index,525);
W_125 = W_125+ a *Ghimj(index,526);
W_126 = W_126+ a *Ghimj(index,527);
W_127 = W_127+ a *Ghimj(index,528);
W_129 = W_129+ a *Ghimj(index,529);
W_130 = W_130+ a *Ghimj(index,530);
W_133 = W_133+ a *Ghimj(index,531);
W_135 = W_135+ a *Ghimj(index,532);
W_136 = W_136+ a *Ghimj(index,533);
W_137 = W_137+ a *Ghimj(index,534);
a = - W_96/ Ghimj(index,538);
W_96 = -a;
W_107 = W_107+ a *Ghimj(index,539);
W_108 = W_108+ a *Ghimj(index,540);
W_109 = W_109+ a *Ghimj(index,541);
W_110 = W_110+ a *Ghimj(index,542);
W_113 = W_113+ a *Ghimj(index,543);
W_124 = W_124+ a *Ghimj(index,544);
W_125 = W_125+ a *Ghimj(index,545);
W_126 = W_126+ a *Ghimj(index,546);
W_133 = W_133+ a *Ghimj(index,547);
W_137 = W_137+ a *Ghimj(index,548);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
Ghimj(index,831) = W_75;
Ghimj(index,832) = W_95;
Ghimj(index,833) = W_96;
Ghimj(index,834) = W_97;
Ghimj(index,835) = W_98;
Ghimj(index,836) = W_103;
Ghimj(index,837) = W_106;
Ghimj(index,838) = W_107;
Ghimj(index,839) = W_108;
Ghimj(index,840) = W_109;
Ghimj(index,841) = W_110;
Ghimj(index,842) = W_113;
Ghimj(index,843) = W_115;
Ghimj(index,844) = W_119;
Ghimj(index,845) = W_120;
Ghimj(index,846) = W_121;
Ghimj(index,847) = W_122;
Ghimj(index,848) = W_124;
Ghimj(index,849) = W_125;
Ghimj(index,850) = W_126;
Ghimj(index,851) = W_127;
Ghimj(index,852) = W_128;
Ghimj(index,853) = W_129;
Ghimj(index,854) = W_130;
Ghimj(index,855) = W_131;
Ghimj(index,856) = W_133;
Ghimj(index,857) = W_135;
Ghimj(index,858) = W_136;
Ghimj(index,859) = W_137;
Ghimj(index,860) = W_138;
W_103 = Ghimj(index,861);
W_104 = Ghimj(index,862);
W_112 = Ghimj(index,863);
W_114 = Ghimj(index,864);
W_116 = Ghimj(index,865);
W_118 = Ghimj(index,866);
W_119 = Ghimj(index,867);
W_121 = Ghimj(index,868);
W_123 = Ghimj(index,869);
W_124 = Ghimj(index,870);
W_125 = Ghimj(index,871);
W_126 = Ghimj(index,872);
W_127 = Ghimj(index,873);
W_128 = Ghimj(index,874);
W_129 = Ghimj(index,875);
W_130 = Ghimj(index,876);
W_131 = Ghimj(index,877);
W_132 = Ghimj(index,878);
W_133 = Ghimj(index,879);
W_134 = Ghimj(index,880);
W_135 = Ghimj(index,881);
W_136 = Ghimj(index,882);
W_137 = Ghimj(index,883);
W_138 = Ghimj(index,884);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
Ghimj(index,861) = W_103;
Ghimj(index,862) = W_104;
Ghimj(index,863) = W_112;
Ghimj(index,864) = W_114;
Ghimj(index,865) = W_116;
Ghimj(index,866) = W_118;
Ghimj(index,867) = W_119;
Ghimj(index,868) = W_121;
Ghimj(index,869) = W_123;
Ghimj(index,870) = W_124;
Ghimj(index,871) = W_125;
Ghimj(index,872) = W_126;
Ghimj(index,873) = W_127;
Ghimj(index,874) = W_128;
Ghimj(index,875) = W_129;
Ghimj(index,876) = W_130;
Ghimj(index,877) = W_131;
Ghimj(index,878) = W_132;
Ghimj(index,879) = W_133;
Ghimj(index,880) = W_134;
Ghimj(index,881) = W_135;
Ghimj(index,882) = W_136;
Ghimj(index,883) = W_137;
Ghimj(index,884) = W_138;
W_81 = Ghimj(index,885);
W_84 = Ghimj(index,886);
W_92 = Ghimj(index,887);
W_103 = Ghimj(index,888);
W_106 = Ghimj(index,889);
W_107 = Ghimj(index,890);
W_110 = Ghimj(index,891);
W_114 = Ghimj(index,892);
W_120 = Ghimj(index,893);
W_121 = Ghimj(index,894);
W_122 = Ghimj(index,895);
W_124 = Ghimj(index,896);
W_125 = Ghimj(index,897);
W_126 = Ghimj(index,898);
W_127 = Ghimj(index,899);
W_128 = Ghimj(index,900);
W_129 = Ghimj(index,901);
W_130 = Ghimj(index,902);
W_131 = Ghimj(index,903);
W_132 = Ghimj(index,904);
W_133 = Ghimj(index,905);
W_135 = Ghimj(index,906);
W_136 = Ghimj(index,907);
W_137 = Ghimj(index,908);
W_138 = Ghimj(index,909);
a = - W_81/ Ghimj(index,405);
W_81 = -a;
W_114 = W_114+ a *Ghimj(index,406);
W_124 = W_124+ a *Ghimj(index,407);
W_126 = W_126+ a *Ghimj(index,408);
W_127 = W_127+ a *Ghimj(index,409);
W_129 = W_129+ a *Ghimj(index,410);
W_136 = W_136+ a *Ghimj(index,411);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
Ghimj(index,885) = W_81;
Ghimj(index,886) = W_84;
Ghimj(index,887) = W_92;
Ghimj(index,888) = W_103;
Ghimj(index,889) = W_106;
Ghimj(index,890) = W_107;
Ghimj(index,891) = W_110;
Ghimj(index,892) = W_114;
Ghimj(index,893) = W_120;
Ghimj(index,894) = W_121;
Ghimj(index,895) = W_122;
Ghimj(index,896) = W_124;
Ghimj(index,897) = W_125;
Ghimj(index,898) = W_126;
Ghimj(index,899) = W_127;
Ghimj(index,900) = W_128;
Ghimj(index,901) = W_129;
Ghimj(index,902) = W_130;
Ghimj(index,903) = W_131;
Ghimj(index,904) = W_132;
Ghimj(index,905) = W_133;
Ghimj(index,906) = W_135;
Ghimj(index,907) = W_136;
Ghimj(index,908) = W_137;
Ghimj(index,909) = W_138;
W_3 = Ghimj(index,910);
W_53 = Ghimj(index,911);
W_63 = Ghimj(index,912);
W_65 = Ghimj(index,913);
W_74 = Ghimj(index,914);
W_75 = Ghimj(index,915);
W_81 = Ghimj(index,916);
W_86 = Ghimj(index,917);
W_93 = Ghimj(index,918);
W_94 = Ghimj(index,919);
W_98 = Ghimj(index,920);
W_102 = Ghimj(index,921);
W_104 = Ghimj(index,922);
W_106 = Ghimj(index,923);
W_107 = Ghimj(index,924);
W_109 = Ghimj(index,925);
W_113 = Ghimj(index,926);
W_114 = Ghimj(index,927);
W_117 = Ghimj(index,928);
W_119 = Ghimj(index,929);
W_120 = Ghimj(index,930);
W_121 = Ghimj(index,931);
W_122 = Ghimj(index,932);
W_124 = Ghimj(index,933);
W_125 = Ghimj(index,934);
W_126 = Ghimj(index,935);
W_127 = Ghimj(index,936);
W_128 = Ghimj(index,937);
W_129 = Ghimj(index,938);
W_130 = Ghimj(index,939);
W_131 = Ghimj(index,940);
W_132 = Ghimj(index,941);
W_133 = Ghimj(index,942);
W_134 = Ghimj(index,943);
W_135 = Ghimj(index,944);
W_136 = Ghimj(index,945);
W_137 = Ghimj(index,946);
W_138 = Ghimj(index,947);
a = - W_3/ Ghimj(index,3);
W_3 = -a;
a = - W_53/ Ghimj(index,290);
W_53 = -a;
W_126 = W_126+ a *Ghimj(index,291);
a = - W_63/ Ghimj(index,323);
W_63 = -a;
W_121 = W_121+ a *Ghimj(index,324);
W_126 = W_126+ a *Ghimj(index,325);
W_137 = W_137+ a *Ghimj(index,326);
a = - W_65/ Ghimj(index,331);
W_65 = -a;
W_114 = W_114+ a *Ghimj(index,332);
W_126 = W_126+ a *Ghimj(index,333);
W_132 = W_132+ a *Ghimj(index,334);
a = - W_74/ Ghimj(index,368);
W_74 = -a;
W_117 = W_117+ a *Ghimj(index,369);
W_121 = W_121+ a *Ghimj(index,370);
W_125 = W_125+ a *Ghimj(index,371);
W_126 = W_126+ a *Ghimj(index,372);
W_137 = W_137+ a *Ghimj(index,373);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_81/ Ghimj(index,405);
W_81 = -a;
W_114 = W_114+ a *Ghimj(index,406);
W_124 = W_124+ a *Ghimj(index,407);
W_126 = W_126+ a *Ghimj(index,408);
W_127 = W_127+ a *Ghimj(index,409);
W_129 = W_129+ a *Ghimj(index,410);
W_136 = W_136+ a *Ghimj(index,411);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
Ghimj(index,910) = W_3;
Ghimj(index,911) = W_53;
Ghimj(index,912) = W_63;
Ghimj(index,913) = W_65;
Ghimj(index,914) = W_74;
Ghimj(index,915) = W_75;
Ghimj(index,916) = W_81;
Ghimj(index,917) = W_86;
Ghimj(index,918) = W_93;
Ghimj(index,919) = W_94;
Ghimj(index,920) = W_98;
Ghimj(index,921) = W_102;
Ghimj(index,922) = W_104;
Ghimj(index,923) = W_106;
Ghimj(index,924) = W_107;
Ghimj(index,925) = W_109;
Ghimj(index,926) = W_113;
Ghimj(index,927) = W_114;
Ghimj(index,928) = W_117;
Ghimj(index,929) = W_119;
Ghimj(index,930) = W_120;
Ghimj(index,931) = W_121;
Ghimj(index,932) = W_122;
Ghimj(index,933) = W_124;
Ghimj(index,934) = W_125;
Ghimj(index,935) = W_126;
Ghimj(index,936) = W_127;
Ghimj(index,937) = W_128;
Ghimj(index,938) = W_129;
Ghimj(index,939) = W_130;
Ghimj(index,940) = W_131;
Ghimj(index,941) = W_132;
Ghimj(index,942) = W_133;
Ghimj(index,943) = W_134;
Ghimj(index,944) = W_135;
Ghimj(index,945) = W_136;
Ghimj(index,946) = W_137;
Ghimj(index,947) = W_138;
W_40 = Ghimj(index,948);
W_44 = Ghimj(index,949);
W_45 = Ghimj(index,950);
W_47 = Ghimj(index,951);
W_48 = Ghimj(index,952);
W_49 = Ghimj(index,953);
W_52 = Ghimj(index,954);
W_53 = Ghimj(index,955);
W_54 = Ghimj(index,956);
W_55 = Ghimj(index,957);
W_56 = Ghimj(index,958);
W_57 = Ghimj(index,959);
W_58 = Ghimj(index,960);
W_61 = Ghimj(index,961);
W_62 = Ghimj(index,962);
W_63 = Ghimj(index,963);
W_64 = Ghimj(index,964);
W_65 = Ghimj(index,965);
W_66 = Ghimj(index,966);
W_67 = Ghimj(index,967);
W_68 = Ghimj(index,968);
W_69 = Ghimj(index,969);
W_70 = Ghimj(index,970);
W_71 = Ghimj(index,971);
W_72 = Ghimj(index,972);
W_73 = Ghimj(index,973);
W_74 = Ghimj(index,974);
W_75 = Ghimj(index,975);
W_76 = Ghimj(index,976);
W_77 = Ghimj(index,977);
W_78 = Ghimj(index,978);
W_79 = Ghimj(index,979);
W_81 = Ghimj(index,980);
W_82 = Ghimj(index,981);
W_84 = Ghimj(index,982);
W_85 = Ghimj(index,983);
W_86 = Ghimj(index,984);
W_87 = Ghimj(index,985);
W_88 = Ghimj(index,986);
W_89 = Ghimj(index,987);
W_91 = Ghimj(index,988);
W_92 = Ghimj(index,989);
W_93 = Ghimj(index,990);
W_94 = Ghimj(index,991);
W_95 = Ghimj(index,992);
W_96 = Ghimj(index,993);
W_97 = Ghimj(index,994);
W_98 = Ghimj(index,995);
W_99 = Ghimj(index,996);
W_100 = Ghimj(index,997);
W_101 = Ghimj(index,998);
W_102 = Ghimj(index,999);
W_103 = Ghimj(index,1000);
W_104 = Ghimj(index,1001);
W_105 = Ghimj(index,1002);
W_106 = Ghimj(index,1003);
W_107 = Ghimj(index,1004);
W_108 = Ghimj(index,1005);
W_109 = Ghimj(index,1006);
W_110 = Ghimj(index,1007);
W_111 = Ghimj(index,1008);
W_112 = Ghimj(index,1009);
W_113 = Ghimj(index,1010);
W_114 = Ghimj(index,1011);
W_115 = Ghimj(index,1012);
W_116 = Ghimj(index,1013);
W_117 = Ghimj(index,1014);
W_118 = Ghimj(index,1015);
W_119 = Ghimj(index,1016);
W_120 = Ghimj(index,1017);
W_121 = Ghimj(index,1018);
W_122 = Ghimj(index,1019);
W_123 = Ghimj(index,1020);
W_124 = Ghimj(index,1021);
W_125 = Ghimj(index,1022);
W_126 = Ghimj(index,1023);
W_127 = Ghimj(index,1024);
W_128 = Ghimj(index,1025);
W_129 = Ghimj(index,1026);
W_130 = Ghimj(index,1027);
W_131 = Ghimj(index,1028);
W_132 = Ghimj(index,1029);
W_133 = Ghimj(index,1030);
W_134 = Ghimj(index,1031);
W_135 = Ghimj(index,1032);
W_136 = Ghimj(index,1033);
W_137 = Ghimj(index,1034);
W_138 = Ghimj(index,1035);
a = - W_40/ Ghimj(index,260);
W_40 = -a;
W_126 = W_126+ a *Ghimj(index,261);
a = - W_44/ Ghimj(index,268);
W_44 = -a;
W_126 = W_126+ a *Ghimj(index,269);
a = - W_45/ Ghimj(index,270);
W_45 = -a;
W_126 = W_126+ a *Ghimj(index,271);
a = - W_47/ Ghimj(index,276);
W_47 = -a;
W_126 = W_126+ a *Ghimj(index,277);
a = - W_48/ Ghimj(index,278);
W_48 = -a;
W_126 = W_126+ a *Ghimj(index,279);
a = - W_49/ Ghimj(index,280);
W_49 = -a;
W_126 = W_126+ a *Ghimj(index,281);
a = - W_52/ Ghimj(index,288);
W_52 = -a;
W_126 = W_126+ a *Ghimj(index,289);
a = - W_53/ Ghimj(index,290);
W_53 = -a;
W_126 = W_126+ a *Ghimj(index,291);
a = - W_54/ Ghimj(index,292);
W_54 = -a;
W_126 = W_126+ a *Ghimj(index,293);
a = - W_55/ Ghimj(index,294);
W_55 = -a;
W_126 = W_126+ a *Ghimj(index,295);
a = - W_56/ Ghimj(index,296);
W_56 = -a;
W_65 = W_65+ a *Ghimj(index,297);
W_81 = W_81+ a *Ghimj(index,298);
W_126 = W_126+ a *Ghimj(index,299);
a = - W_57/ Ghimj(index,300);
W_57 = -a;
W_120 = W_120+ a *Ghimj(index,301);
W_126 = W_126+ a *Ghimj(index,302);
a = - W_58/ Ghimj(index,303);
W_58 = -a;
W_91 = W_91+ a *Ghimj(index,304);
W_126 = W_126+ a *Ghimj(index,305);
a = - W_61/ Ghimj(index,315);
W_61 = -a;
W_70 = W_70+ a *Ghimj(index,316);
W_87 = W_87+ a *Ghimj(index,317);
W_126 = W_126+ a *Ghimj(index,318);
a = - W_62/ Ghimj(index,319);
W_62 = -a;
W_93 = W_93+ a *Ghimj(index,320);
W_126 = W_126+ a *Ghimj(index,321);
W_133 = W_133+ a *Ghimj(index,322);
a = - W_63/ Ghimj(index,323);
W_63 = -a;
W_121 = W_121+ a *Ghimj(index,324);
W_126 = W_126+ a *Ghimj(index,325);
W_137 = W_137+ a *Ghimj(index,326);
a = - W_64/ Ghimj(index,327);
W_64 = -a;
W_113 = W_113+ a *Ghimj(index,328);
W_126 = W_126+ a *Ghimj(index,329);
W_135 = W_135+ a *Ghimj(index,330);
a = - W_65/ Ghimj(index,331);
W_65 = -a;
W_114 = W_114+ a *Ghimj(index,332);
W_126 = W_126+ a *Ghimj(index,333);
W_132 = W_132+ a *Ghimj(index,334);
a = - W_66/ Ghimj(index,335);
W_66 = -a;
W_109 = W_109+ a *Ghimj(index,336);
W_126 = W_126+ a *Ghimj(index,337);
W_137 = W_137+ a *Ghimj(index,338);
a = - W_67/ Ghimj(index,339);
W_67 = -a;
W_115 = W_115+ a *Ghimj(index,340);
W_126 = W_126+ a *Ghimj(index,341);
W_137 = W_137+ a *Ghimj(index,342);
a = - W_68/ Ghimj(index,343);
W_68 = -a;
W_99 = W_99+ a *Ghimj(index,344);
W_126 = W_126+ a *Ghimj(index,345);
W_137 = W_137+ a *Ghimj(index,346);
a = - W_69/ Ghimj(index,347);
W_69 = -a;
W_93 = W_93+ a *Ghimj(index,348);
W_126 = W_126+ a *Ghimj(index,349);
W_137 = W_137+ a *Ghimj(index,350);
a = - W_70/ Ghimj(index,352);
W_70 = -a;
W_84 = W_84+ a *Ghimj(index,353);
W_87 = W_87+ a *Ghimj(index,354);
W_126 = W_126+ a *Ghimj(index,355);
a = - W_71/ Ghimj(index,356);
W_71 = -a;
W_117 = W_117+ a *Ghimj(index,357);
W_126 = W_126+ a *Ghimj(index,358);
W_137 = W_137+ a *Ghimj(index,359);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_74/ Ghimj(index,368);
W_74 = -a;
W_117 = W_117+ a *Ghimj(index,369);
W_121 = W_121+ a *Ghimj(index,370);
W_125 = W_125+ a *Ghimj(index,371);
W_126 = W_126+ a *Ghimj(index,372);
W_137 = W_137+ a *Ghimj(index,373);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_76/ Ghimj(index,377);
W_76 = -a;
W_87 = W_87+ a *Ghimj(index,378);
W_126 = W_126+ a *Ghimj(index,379);
W_133 = W_133+ a *Ghimj(index,380);
W_135 = W_135+ a *Ghimj(index,381);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_78/ Ghimj(index,386);
W_78 = -a;
W_103 = W_103+ a *Ghimj(index,387);
W_106 = W_106+ a *Ghimj(index,388);
W_107 = W_107+ a *Ghimj(index,389);
W_110 = W_110+ a *Ghimj(index,390);
W_124 = W_124+ a *Ghimj(index,391);
W_126 = W_126+ a *Ghimj(index,392);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_81/ Ghimj(index,405);
W_81 = -a;
W_114 = W_114+ a *Ghimj(index,406);
W_124 = W_124+ a *Ghimj(index,407);
W_126 = W_126+ a *Ghimj(index,408);
W_127 = W_127+ a *Ghimj(index,409);
W_129 = W_129+ a *Ghimj(index,410);
W_136 = W_136+ a *Ghimj(index,411);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
a = - W_85/ Ghimj(index,427);
W_85 = -a;
W_102 = W_102+ a *Ghimj(index,428);
W_111 = W_111+ a *Ghimj(index,429);
W_125 = W_125+ a *Ghimj(index,430);
W_126 = W_126+ a *Ghimj(index,431);
W_133 = W_133+ a *Ghimj(index,432);
W_137 = W_137+ a *Ghimj(index,433);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_89/ Ghimj(index,457);
W_89 = -a;
W_93 = W_93+ a *Ghimj(index,458);
W_94 = W_94+ a *Ghimj(index,459);
W_102 = W_102+ a *Ghimj(index,460);
W_107 = W_107+ a *Ghimj(index,461);
W_109 = W_109+ a *Ghimj(index,462);
W_113 = W_113+ a *Ghimj(index,463);
W_117 = W_117+ a *Ghimj(index,464);
W_124 = W_124+ a *Ghimj(index,465);
W_125 = W_125+ a *Ghimj(index,466);
W_126 = W_126+ a *Ghimj(index,467);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_95/ Ghimj(index,514);
W_95 = -a;
W_96 = W_96+ a *Ghimj(index,515);
W_98 = W_98+ a *Ghimj(index,516);
W_103 = W_103+ a *Ghimj(index,517);
W_106 = W_106+ a *Ghimj(index,518);
W_107 = W_107+ a *Ghimj(index,519);
W_109 = W_109+ a *Ghimj(index,520);
W_110 = W_110+ a *Ghimj(index,521);
W_113 = W_113+ a *Ghimj(index,522);
W_119 = W_119+ a *Ghimj(index,523);
W_121 = W_121+ a *Ghimj(index,524);
W_124 = W_124+ a *Ghimj(index,525);
W_125 = W_125+ a *Ghimj(index,526);
W_126 = W_126+ a *Ghimj(index,527);
W_127 = W_127+ a *Ghimj(index,528);
W_129 = W_129+ a *Ghimj(index,529);
W_130 = W_130+ a *Ghimj(index,530);
W_133 = W_133+ a *Ghimj(index,531);
W_135 = W_135+ a *Ghimj(index,532);
W_136 = W_136+ a *Ghimj(index,533);
W_137 = W_137+ a *Ghimj(index,534);
a = - W_96/ Ghimj(index,538);
W_96 = -a;
W_107 = W_107+ a *Ghimj(index,539);
W_108 = W_108+ a *Ghimj(index,540);
W_109 = W_109+ a *Ghimj(index,541);
W_110 = W_110+ a *Ghimj(index,542);
W_113 = W_113+ a *Ghimj(index,543);
W_124 = W_124+ a *Ghimj(index,544);
W_125 = W_125+ a *Ghimj(index,545);
W_126 = W_126+ a *Ghimj(index,546);
W_133 = W_133+ a *Ghimj(index,547);
W_137 = W_137+ a *Ghimj(index,548);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_100/ Ghimj(index,573);
W_100 = -a;
W_105 = W_105+ a *Ghimj(index,574);
W_112 = W_112+ a *Ghimj(index,575);
W_116 = W_116+ a *Ghimj(index,576);
W_118 = W_118+ a *Ghimj(index,577);
W_123 = W_123+ a *Ghimj(index,578);
W_126 = W_126+ a *Ghimj(index,579);
W_127 = W_127+ a *Ghimj(index,580);
W_129 = W_129+ a *Ghimj(index,581);
W_132 = W_132+ a *Ghimj(index,582);
W_134 = W_134+ a *Ghimj(index,583);
W_138 = W_138+ a *Ghimj(index,584);
a = - W_101/ Ghimj(index,586);
W_101 = -a;
W_105 = W_105+ a *Ghimj(index,587);
W_114 = W_114+ a *Ghimj(index,588);
W_116 = W_116+ a *Ghimj(index,589);
W_119 = W_119+ a *Ghimj(index,590);
W_123 = W_123+ a *Ghimj(index,591);
W_126 = W_126+ a *Ghimj(index,592);
W_128 = W_128+ a *Ghimj(index,593);
W_130 = W_130+ a *Ghimj(index,594);
W_135 = W_135+ a *Ghimj(index,595);
W_136 = W_136+ a *Ghimj(index,596);
W_138 = W_138+ a *Ghimj(index,597);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
Ghimj(index,948) = W_40;
Ghimj(index,949) = W_44;
Ghimj(index,950) = W_45;
Ghimj(index,951) = W_47;
Ghimj(index,952) = W_48;
Ghimj(index,953) = W_49;
Ghimj(index,954) = W_52;
Ghimj(index,955) = W_53;
Ghimj(index,956) = W_54;
Ghimj(index,957) = W_55;
Ghimj(index,958) = W_56;
Ghimj(index,959) = W_57;
Ghimj(index,960) = W_58;
Ghimj(index,961) = W_61;
Ghimj(index,962) = W_62;
Ghimj(index,963) = W_63;
Ghimj(index,964) = W_64;
Ghimj(index,965) = W_65;
Ghimj(index,966) = W_66;
Ghimj(index,967) = W_67;
Ghimj(index,968) = W_68;
Ghimj(index,969) = W_69;
Ghimj(index,970) = W_70;
Ghimj(index,971) = W_71;
Ghimj(index,972) = W_72;
Ghimj(index,973) = W_73;
Ghimj(index,974) = W_74;
Ghimj(index,975) = W_75;
Ghimj(index,976) = W_76;
Ghimj(index,977) = W_77;
Ghimj(index,978) = W_78;
Ghimj(index,979) = W_79;
Ghimj(index,980) = W_81;
Ghimj(index,981) = W_82;
Ghimj(index,982) = W_84;
Ghimj(index,983) = W_85;
Ghimj(index,984) = W_86;
Ghimj(index,985) = W_87;
Ghimj(index,986) = W_88;
Ghimj(index,987) = W_89;
Ghimj(index,988) = W_91;
Ghimj(index,989) = W_92;
Ghimj(index,990) = W_93;
Ghimj(index,991) = W_94;
Ghimj(index,992) = W_95;
Ghimj(index,993) = W_96;
Ghimj(index,994) = W_97;
Ghimj(index,995) = W_98;
Ghimj(index,996) = W_99;
Ghimj(index,997) = W_100;
Ghimj(index,998) = W_101;
Ghimj(index,999) = W_102;
Ghimj(index,1000) = W_103;
Ghimj(index,1001) = W_104;
Ghimj(index,1002) = W_105;
Ghimj(index,1003) = W_106;
Ghimj(index,1004) = W_107;
Ghimj(index,1005) = W_108;
Ghimj(index,1006) = W_109;
Ghimj(index,1007) = W_110;
Ghimj(index,1008) = W_111;
Ghimj(index,1009) = W_112;
Ghimj(index,1010) = W_113;
Ghimj(index,1011) = W_114;
Ghimj(index,1012) = W_115;
Ghimj(index,1013) = W_116;
Ghimj(index,1014) = W_117;
Ghimj(index,1015) = W_118;
Ghimj(index,1016) = W_119;
Ghimj(index,1017) = W_120;
Ghimj(index,1018) = W_121;
Ghimj(index,1019) = W_122;
Ghimj(index,1020) = W_123;
Ghimj(index,1021) = W_124;
Ghimj(index,1022) = W_125;
Ghimj(index,1023) = W_126;
Ghimj(index,1024) = W_127;
Ghimj(index,1025) = W_128;
Ghimj(index,1026) = W_129;
Ghimj(index,1027) = W_130;
Ghimj(index,1028) = W_131;
Ghimj(index,1029) = W_132;
Ghimj(index,1030) = W_133;
Ghimj(index,1031) = W_134;
Ghimj(index,1032) = W_135;
Ghimj(index,1033) = W_136;
Ghimj(index,1034) = W_137;
Ghimj(index,1035) = W_138;
W_1 = Ghimj(index,1036);
W_39 = Ghimj(index,1037);
W_41 = Ghimj(index,1038);
W_42 = Ghimj(index,1039);
W_43 = Ghimj(index,1040);
W_50 = Ghimj(index,1041);
W_52 = Ghimj(index,1042);
W_54 = Ghimj(index,1043);
W_55 = Ghimj(index,1044);
W_57 = Ghimj(index,1045);
W_75 = Ghimj(index,1046);
W_80 = Ghimj(index,1047);
W_83 = Ghimj(index,1048);
W_88 = Ghimj(index,1049);
W_90 = Ghimj(index,1050);
W_97 = Ghimj(index,1051);
W_98 = Ghimj(index,1052);
W_100 = Ghimj(index,1053);
W_103 = Ghimj(index,1054);
W_104 = Ghimj(index,1055);
W_105 = Ghimj(index,1056);
W_106 = Ghimj(index,1057);
W_107 = Ghimj(index,1058);
W_112 = Ghimj(index,1059);
W_114 = Ghimj(index,1060);
W_116 = Ghimj(index,1061);
W_118 = Ghimj(index,1062);
W_119 = Ghimj(index,1063);
W_120 = Ghimj(index,1064);
W_121 = Ghimj(index,1065);
W_122 = Ghimj(index,1066);
W_123 = Ghimj(index,1067);
W_124 = Ghimj(index,1068);
W_125 = Ghimj(index,1069);
W_126 = Ghimj(index,1070);
W_127 = Ghimj(index,1071);
W_128 = Ghimj(index,1072);
W_129 = Ghimj(index,1073);
W_130 = Ghimj(index,1074);
W_131 = Ghimj(index,1075);
W_132 = Ghimj(index,1076);
W_133 = Ghimj(index,1077);
W_134 = Ghimj(index,1078);
W_135 = Ghimj(index,1079);
W_136 = Ghimj(index,1080);
W_137 = Ghimj(index,1081);
W_138 = Ghimj(index,1082);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_39/ Ghimj(index,258);
W_39 = -a;
W_134 = W_134+ a *Ghimj(index,259);
a = - W_41/ Ghimj(index,262);
W_41 = -a;
W_120 = W_120+ a *Ghimj(index,263);
a = - W_42/ Ghimj(index,264);
W_42 = -a;
W_120 = W_120+ a *Ghimj(index,265);
a = - W_43/ Ghimj(index,266);
W_43 = -a;
W_120 = W_120+ a *Ghimj(index,267);
a = - W_50/ Ghimj(index,282);
W_50 = -a;
W_83 = W_83+ a *Ghimj(index,283);
W_138 = W_138+ a *Ghimj(index,284);
a = - W_52/ Ghimj(index,288);
W_52 = -a;
W_126 = W_126+ a *Ghimj(index,289);
a = - W_54/ Ghimj(index,292);
W_54 = -a;
W_126 = W_126+ a *Ghimj(index,293);
a = - W_55/ Ghimj(index,294);
W_55 = -a;
W_126 = W_126+ a *Ghimj(index,295);
a = - W_57/ Ghimj(index,300);
W_57 = -a;
W_120 = W_120+ a *Ghimj(index,301);
W_126 = W_126+ a *Ghimj(index,302);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_80/ Ghimj(index,397);
W_80 = -a;
W_90 = W_90+ a *Ghimj(index,398);
W_112 = W_112+ a *Ghimj(index,399);
W_116 = W_116+ a *Ghimj(index,400);
W_127 = W_127+ a *Ghimj(index,401);
W_129 = W_129+ a *Ghimj(index,402);
W_134 = W_134+ a *Ghimj(index,403);
W_138 = W_138+ a *Ghimj(index,404);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_90/ Ghimj(index,469);
W_90 = -a;
W_100 = W_100+ a *Ghimj(index,470);
W_105 = W_105+ a *Ghimj(index,471);
W_112 = W_112+ a *Ghimj(index,472);
W_116 = W_116+ a *Ghimj(index,473);
W_118 = W_118+ a *Ghimj(index,474);
W_123 = W_123+ a *Ghimj(index,475);
W_127 = W_127+ a *Ghimj(index,476);
W_129 = W_129+ a *Ghimj(index,477);
W_132 = W_132+ a *Ghimj(index,478);
W_134 = W_134+ a *Ghimj(index,479);
W_138 = W_138+ a *Ghimj(index,480);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_100/ Ghimj(index,573);
W_100 = -a;
W_105 = W_105+ a *Ghimj(index,574);
W_112 = W_112+ a *Ghimj(index,575);
W_116 = W_116+ a *Ghimj(index,576);
W_118 = W_118+ a *Ghimj(index,577);
W_123 = W_123+ a *Ghimj(index,578);
W_126 = W_126+ a *Ghimj(index,579);
W_127 = W_127+ a *Ghimj(index,580);
W_129 = W_129+ a *Ghimj(index,581);
W_132 = W_132+ a *Ghimj(index,582);
W_134 = W_134+ a *Ghimj(index,583);
W_138 = W_138+ a *Ghimj(index,584);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
Ghimj(index,1036) = W_1;
Ghimj(index,1037) = W_39;
Ghimj(index,1038) = W_41;
Ghimj(index,1039) = W_42;
Ghimj(index,1040) = W_43;
Ghimj(index,1041) = W_50;
Ghimj(index,1042) = W_52;
Ghimj(index,1043) = W_54;
Ghimj(index,1044) = W_55;
Ghimj(index,1045) = W_57;
Ghimj(index,1046) = W_75;
Ghimj(index,1047) = W_80;
Ghimj(index,1048) = W_83;
Ghimj(index,1049) = W_88;
Ghimj(index,1050) = W_90;
Ghimj(index,1051) = W_97;
Ghimj(index,1052) = W_98;
Ghimj(index,1053) = W_100;
Ghimj(index,1054) = W_103;
Ghimj(index,1055) = W_104;
Ghimj(index,1056) = W_105;
Ghimj(index,1057) = W_106;
Ghimj(index,1058) = W_107;
Ghimj(index,1059) = W_112;
Ghimj(index,1060) = W_114;
Ghimj(index,1061) = W_116;
Ghimj(index,1062) = W_118;
Ghimj(index,1063) = W_119;
Ghimj(index,1064) = W_120;
Ghimj(index,1065) = W_121;
Ghimj(index,1066) = W_122;
Ghimj(index,1067) = W_123;
Ghimj(index,1068) = W_124;
Ghimj(index,1069) = W_125;
Ghimj(index,1070) = W_126;
Ghimj(index,1071) = W_127;
Ghimj(index,1072) = W_128;
Ghimj(index,1073) = W_129;
Ghimj(index,1074) = W_130;
Ghimj(index,1075) = W_131;
Ghimj(index,1076) = W_132;
Ghimj(index,1077) = W_133;
Ghimj(index,1078) = W_134;
Ghimj(index,1079) = W_135;
Ghimj(index,1080) = W_136;
Ghimj(index,1081) = W_137;
Ghimj(index,1082) = W_138;
W_40 = Ghimj(index,1083);
W_44 = Ghimj(index,1084);
W_45 = Ghimj(index,1085);
W_47 = Ghimj(index,1086);
W_48 = Ghimj(index,1087);
W_49 = Ghimj(index,1088);
W_52 = Ghimj(index,1089);
W_53 = Ghimj(index,1090);
W_54 = Ghimj(index,1091);
W_55 = Ghimj(index,1092);
W_57 = Ghimj(index,1093);
W_61 = Ghimj(index,1094);
W_63 = Ghimj(index,1095);
W_67 = Ghimj(index,1096);
W_70 = Ghimj(index,1097);
W_73 = Ghimj(index,1098);
W_74 = Ghimj(index,1099);
W_75 = Ghimj(index,1100);
W_76 = Ghimj(index,1101);
W_77 = Ghimj(index,1102);
W_78 = Ghimj(index,1103);
W_79 = Ghimj(index,1104);
W_83 = Ghimj(index,1105);
W_84 = Ghimj(index,1106);
W_86 = Ghimj(index,1107);
W_87 = Ghimj(index,1108);
W_88 = Ghimj(index,1109);
W_92 = Ghimj(index,1110);
W_93 = Ghimj(index,1111);
W_97 = Ghimj(index,1112);
W_98 = Ghimj(index,1113);
W_101 = Ghimj(index,1114);
W_102 = Ghimj(index,1115);
W_103 = Ghimj(index,1116);
W_104 = Ghimj(index,1117);
W_105 = Ghimj(index,1118);
W_106 = Ghimj(index,1119);
W_107 = Ghimj(index,1120);
W_110 = Ghimj(index,1121);
W_111 = Ghimj(index,1122);
W_112 = Ghimj(index,1123);
W_114 = Ghimj(index,1124);
W_115 = Ghimj(index,1125);
W_116 = Ghimj(index,1126);
W_117 = Ghimj(index,1127);
W_118 = Ghimj(index,1128);
W_119 = Ghimj(index,1129);
W_120 = Ghimj(index,1130);
W_121 = Ghimj(index,1131);
W_122 = Ghimj(index,1132);
W_123 = Ghimj(index,1133);
W_124 = Ghimj(index,1134);
W_125 = Ghimj(index,1135);
W_126 = Ghimj(index,1136);
W_127 = Ghimj(index,1137);
W_128 = Ghimj(index,1138);
W_129 = Ghimj(index,1139);
W_130 = Ghimj(index,1140);
W_131 = Ghimj(index,1141);
W_132 = Ghimj(index,1142);
W_133 = Ghimj(index,1143);
W_134 = Ghimj(index,1144);
W_135 = Ghimj(index,1145);
W_136 = Ghimj(index,1146);
W_137 = Ghimj(index,1147);
W_138 = Ghimj(index,1148);
a = - W_40/ Ghimj(index,260);
W_40 = -a;
W_126 = W_126+ a *Ghimj(index,261);
a = - W_44/ Ghimj(index,268);
W_44 = -a;
W_126 = W_126+ a *Ghimj(index,269);
a = - W_45/ Ghimj(index,270);
W_45 = -a;
W_126 = W_126+ a *Ghimj(index,271);
a = - W_47/ Ghimj(index,276);
W_47 = -a;
W_126 = W_126+ a *Ghimj(index,277);
a = - W_48/ Ghimj(index,278);
W_48 = -a;
W_126 = W_126+ a *Ghimj(index,279);
a = - W_49/ Ghimj(index,280);
W_49 = -a;
W_126 = W_126+ a *Ghimj(index,281);
a = - W_52/ Ghimj(index,288);
W_52 = -a;
W_126 = W_126+ a *Ghimj(index,289);
a = - W_53/ Ghimj(index,290);
W_53 = -a;
W_126 = W_126+ a *Ghimj(index,291);
a = - W_54/ Ghimj(index,292);
W_54 = -a;
W_126 = W_126+ a *Ghimj(index,293);
a = - W_55/ Ghimj(index,294);
W_55 = -a;
W_126 = W_126+ a *Ghimj(index,295);
a = - W_57/ Ghimj(index,300);
W_57 = -a;
W_120 = W_120+ a *Ghimj(index,301);
W_126 = W_126+ a *Ghimj(index,302);
a = - W_61/ Ghimj(index,315);
W_61 = -a;
W_70 = W_70+ a *Ghimj(index,316);
W_87 = W_87+ a *Ghimj(index,317);
W_126 = W_126+ a *Ghimj(index,318);
a = - W_63/ Ghimj(index,323);
W_63 = -a;
W_121 = W_121+ a *Ghimj(index,324);
W_126 = W_126+ a *Ghimj(index,325);
W_137 = W_137+ a *Ghimj(index,326);
a = - W_67/ Ghimj(index,339);
W_67 = -a;
W_115 = W_115+ a *Ghimj(index,340);
W_126 = W_126+ a *Ghimj(index,341);
W_137 = W_137+ a *Ghimj(index,342);
a = - W_70/ Ghimj(index,352);
W_70 = -a;
W_84 = W_84+ a *Ghimj(index,353);
W_87 = W_87+ a *Ghimj(index,354);
W_126 = W_126+ a *Ghimj(index,355);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_74/ Ghimj(index,368);
W_74 = -a;
W_117 = W_117+ a *Ghimj(index,369);
W_121 = W_121+ a *Ghimj(index,370);
W_125 = W_125+ a *Ghimj(index,371);
W_126 = W_126+ a *Ghimj(index,372);
W_137 = W_137+ a *Ghimj(index,373);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_76/ Ghimj(index,377);
W_76 = -a;
W_87 = W_87+ a *Ghimj(index,378);
W_126 = W_126+ a *Ghimj(index,379);
W_133 = W_133+ a *Ghimj(index,380);
W_135 = W_135+ a *Ghimj(index,381);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_78/ Ghimj(index,386);
W_78 = -a;
W_103 = W_103+ a *Ghimj(index,387);
W_106 = W_106+ a *Ghimj(index,388);
W_107 = W_107+ a *Ghimj(index,389);
W_110 = W_110+ a *Ghimj(index,390);
W_124 = W_124+ a *Ghimj(index,391);
W_126 = W_126+ a *Ghimj(index,392);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_101/ Ghimj(index,586);
W_101 = -a;
W_105 = W_105+ a *Ghimj(index,587);
W_114 = W_114+ a *Ghimj(index,588);
W_116 = W_116+ a *Ghimj(index,589);
W_119 = W_119+ a *Ghimj(index,590);
W_123 = W_123+ a *Ghimj(index,591);
W_126 = W_126+ a *Ghimj(index,592);
W_128 = W_128+ a *Ghimj(index,593);
W_130 = W_130+ a *Ghimj(index,594);
W_135 = W_135+ a *Ghimj(index,595);
W_136 = W_136+ a *Ghimj(index,596);
W_138 = W_138+ a *Ghimj(index,597);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
Ghimj(index,1083) = W_40;
Ghimj(index,1084) = W_44;
Ghimj(index,1085) = W_45;
Ghimj(index,1086) = W_47;
Ghimj(index,1087) = W_48;
Ghimj(index,1088) = W_49;
Ghimj(index,1089) = W_52;
Ghimj(index,1090) = W_53;
Ghimj(index,1091) = W_54;
Ghimj(index,1092) = W_55;
Ghimj(index,1093) = W_57;
Ghimj(index,1094) = W_61;
Ghimj(index,1095) = W_63;
Ghimj(index,1096) = W_67;
Ghimj(index,1097) = W_70;
Ghimj(index,1098) = W_73;
Ghimj(index,1099) = W_74;
Ghimj(index,1100) = W_75;
Ghimj(index,1101) = W_76;
Ghimj(index,1102) = W_77;
Ghimj(index,1103) = W_78;
Ghimj(index,1104) = W_79;
Ghimj(index,1105) = W_83;
Ghimj(index,1106) = W_84;
Ghimj(index,1107) = W_86;
Ghimj(index,1108) = W_87;
Ghimj(index,1109) = W_88;
Ghimj(index,1110) = W_92;
Ghimj(index,1111) = W_93;
Ghimj(index,1112) = W_97;
Ghimj(index,1113) = W_98;
Ghimj(index,1114) = W_101;
Ghimj(index,1115) = W_102;
Ghimj(index,1116) = W_103;
Ghimj(index,1117) = W_104;
Ghimj(index,1118) = W_105;
Ghimj(index,1119) = W_106;
Ghimj(index,1120) = W_107;
Ghimj(index,1121) = W_110;
Ghimj(index,1122) = W_111;
Ghimj(index,1123) = W_112;
Ghimj(index,1124) = W_114;
Ghimj(index,1125) = W_115;
Ghimj(index,1126) = W_116;
Ghimj(index,1127) = W_117;
Ghimj(index,1128) = W_118;
Ghimj(index,1129) = W_119;
Ghimj(index,1130) = W_120;
Ghimj(index,1131) = W_121;
Ghimj(index,1132) = W_122;
Ghimj(index,1133) = W_123;
Ghimj(index,1134) = W_124;
Ghimj(index,1135) = W_125;
Ghimj(index,1136) = W_126;
Ghimj(index,1137) = W_127;
Ghimj(index,1138) = W_128;
Ghimj(index,1139) = W_129;
Ghimj(index,1140) = W_130;
Ghimj(index,1141) = W_131;
Ghimj(index,1142) = W_132;
Ghimj(index,1143) = W_133;
Ghimj(index,1144) = W_134;
Ghimj(index,1145) = W_135;
Ghimj(index,1146) = W_136;
Ghimj(index,1147) = W_137;
Ghimj(index,1148) = W_138;
W_0 = Ghimj(index,1149);
W_1 = Ghimj(index,1150);
W_2 = Ghimj(index,1151);
W_44 = Ghimj(index,1152);
W_45 = Ghimj(index,1153);
W_52 = Ghimj(index,1154);
W_53 = Ghimj(index,1155);
W_54 = Ghimj(index,1156);
W_55 = Ghimj(index,1157);
W_80 = Ghimj(index,1158);
W_90 = Ghimj(index,1159);
W_100 = Ghimj(index,1160);
W_103 = Ghimj(index,1161);
W_104 = Ghimj(index,1162);
W_105 = Ghimj(index,1163);
W_112 = Ghimj(index,1164);
W_114 = Ghimj(index,1165);
W_116 = Ghimj(index,1166);
W_118 = Ghimj(index,1167);
W_119 = Ghimj(index,1168);
W_121 = Ghimj(index,1169);
W_123 = Ghimj(index,1170);
W_124 = Ghimj(index,1171);
W_125 = Ghimj(index,1172);
W_126 = Ghimj(index,1173);
W_127 = Ghimj(index,1174);
W_128 = Ghimj(index,1175);
W_129 = Ghimj(index,1176);
W_130 = Ghimj(index,1177);
W_131 = Ghimj(index,1178);
W_132 = Ghimj(index,1179);
W_133 = Ghimj(index,1180);
W_134 = Ghimj(index,1181);
W_135 = Ghimj(index,1182);
W_136 = Ghimj(index,1183);
W_137 = Ghimj(index,1184);
W_138 = Ghimj(index,1185);
a = - W_0/ Ghimj(index,0);
W_0 = -a;
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_2/ Ghimj(index,2);
W_2 = -a;
a = - W_44/ Ghimj(index,268);
W_44 = -a;
W_126 = W_126+ a *Ghimj(index,269);
a = - W_45/ Ghimj(index,270);
W_45 = -a;
W_126 = W_126+ a *Ghimj(index,271);
a = - W_52/ Ghimj(index,288);
W_52 = -a;
W_126 = W_126+ a *Ghimj(index,289);
a = - W_53/ Ghimj(index,290);
W_53 = -a;
W_126 = W_126+ a *Ghimj(index,291);
a = - W_54/ Ghimj(index,292);
W_54 = -a;
W_126 = W_126+ a *Ghimj(index,293);
a = - W_55/ Ghimj(index,294);
W_55 = -a;
W_126 = W_126+ a *Ghimj(index,295);
a = - W_80/ Ghimj(index,397);
W_80 = -a;
W_90 = W_90+ a *Ghimj(index,398);
W_112 = W_112+ a *Ghimj(index,399);
W_116 = W_116+ a *Ghimj(index,400);
W_127 = W_127+ a *Ghimj(index,401);
W_129 = W_129+ a *Ghimj(index,402);
W_134 = W_134+ a *Ghimj(index,403);
W_138 = W_138+ a *Ghimj(index,404);
a = - W_90/ Ghimj(index,469);
W_90 = -a;
W_100 = W_100+ a *Ghimj(index,470);
W_105 = W_105+ a *Ghimj(index,471);
W_112 = W_112+ a *Ghimj(index,472);
W_116 = W_116+ a *Ghimj(index,473);
W_118 = W_118+ a *Ghimj(index,474);
W_123 = W_123+ a *Ghimj(index,475);
W_127 = W_127+ a *Ghimj(index,476);
W_129 = W_129+ a *Ghimj(index,477);
W_132 = W_132+ a *Ghimj(index,478);
W_134 = W_134+ a *Ghimj(index,479);
W_138 = W_138+ a *Ghimj(index,480);
a = - W_100/ Ghimj(index,573);
W_100 = -a;
W_105 = W_105+ a *Ghimj(index,574);
W_112 = W_112+ a *Ghimj(index,575);
W_116 = W_116+ a *Ghimj(index,576);
W_118 = W_118+ a *Ghimj(index,577);
W_123 = W_123+ a *Ghimj(index,578);
W_126 = W_126+ a *Ghimj(index,579);
W_127 = W_127+ a *Ghimj(index,580);
W_129 = W_129+ a *Ghimj(index,581);
W_132 = W_132+ a *Ghimj(index,582);
W_134 = W_134+ a *Ghimj(index,583);
W_138 = W_138+ a *Ghimj(index,584);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
Ghimj(index,1149) = W_0;
Ghimj(index,1150) = W_1;
Ghimj(index,1151) = W_2;
Ghimj(index,1152) = W_44;
Ghimj(index,1153) = W_45;
Ghimj(index,1154) = W_52;
Ghimj(index,1155) = W_53;
Ghimj(index,1156) = W_54;
Ghimj(index,1157) = W_55;
Ghimj(index,1158) = W_80;
Ghimj(index,1159) = W_90;
Ghimj(index,1160) = W_100;
Ghimj(index,1161) = W_103;
Ghimj(index,1162) = W_104;
Ghimj(index,1163) = W_105;
Ghimj(index,1164) = W_112;
Ghimj(index,1165) = W_114;
Ghimj(index,1166) = W_116;
Ghimj(index,1167) = W_118;
Ghimj(index,1168) = W_119;
Ghimj(index,1169) = W_121;
Ghimj(index,1170) = W_123;
Ghimj(index,1171) = W_124;
Ghimj(index,1172) = W_125;
Ghimj(index,1173) = W_126;
Ghimj(index,1174) = W_127;
Ghimj(index,1175) = W_128;
Ghimj(index,1176) = W_129;
Ghimj(index,1177) = W_130;
Ghimj(index,1178) = W_131;
Ghimj(index,1179) = W_132;
Ghimj(index,1180) = W_133;
Ghimj(index,1181) = W_134;
Ghimj(index,1182) = W_135;
Ghimj(index,1183) = W_136;
Ghimj(index,1184) = W_137;
Ghimj(index,1185) = W_138;
W_58 = Ghimj(index,1186);
W_65 = Ghimj(index,1187);
W_66 = Ghimj(index,1188);
W_72 = Ghimj(index,1189);
W_77 = Ghimj(index,1190);
W_82 = Ghimj(index,1191);
W_89 = Ghimj(index,1192);
W_91 = Ghimj(index,1193);
W_93 = Ghimj(index,1194);
W_94 = Ghimj(index,1195);
W_98 = Ghimj(index,1196);
W_102 = Ghimj(index,1197);
W_103 = Ghimj(index,1198);
W_104 = Ghimj(index,1199);
W_106 = Ghimj(index,1200);
W_107 = Ghimj(index,1201);
W_108 = Ghimj(index,1202);
W_109 = Ghimj(index,1203);
W_110 = Ghimj(index,1204);
W_113 = Ghimj(index,1205);
W_114 = Ghimj(index,1206);
W_115 = Ghimj(index,1207);
W_117 = Ghimj(index,1208);
W_120 = Ghimj(index,1209);
W_121 = Ghimj(index,1210);
W_122 = Ghimj(index,1211);
W_124 = Ghimj(index,1212);
W_125 = Ghimj(index,1213);
W_126 = Ghimj(index,1214);
W_127 = Ghimj(index,1215);
W_128 = Ghimj(index,1216);
W_129 = Ghimj(index,1217);
W_130 = Ghimj(index,1218);
W_131 = Ghimj(index,1219);
W_132 = Ghimj(index,1220);
W_133 = Ghimj(index,1221);
W_134 = Ghimj(index,1222);
W_135 = Ghimj(index,1223);
W_136 = Ghimj(index,1224);
W_137 = Ghimj(index,1225);
W_138 = Ghimj(index,1226);
a = - W_58/ Ghimj(index,303);
W_58 = -a;
W_91 = W_91+ a *Ghimj(index,304);
W_126 = W_126+ a *Ghimj(index,305);
a = - W_65/ Ghimj(index,331);
W_65 = -a;
W_114 = W_114+ a *Ghimj(index,332);
W_126 = W_126+ a *Ghimj(index,333);
W_132 = W_132+ a *Ghimj(index,334);
a = - W_66/ Ghimj(index,335);
W_66 = -a;
W_109 = W_109+ a *Ghimj(index,336);
W_126 = W_126+ a *Ghimj(index,337);
W_137 = W_137+ a *Ghimj(index,338);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_89/ Ghimj(index,457);
W_89 = -a;
W_93 = W_93+ a *Ghimj(index,458);
W_94 = W_94+ a *Ghimj(index,459);
W_102 = W_102+ a *Ghimj(index,460);
W_107 = W_107+ a *Ghimj(index,461);
W_109 = W_109+ a *Ghimj(index,462);
W_113 = W_113+ a *Ghimj(index,463);
W_117 = W_117+ a *Ghimj(index,464);
W_124 = W_124+ a *Ghimj(index,465);
W_125 = W_125+ a *Ghimj(index,466);
W_126 = W_126+ a *Ghimj(index,467);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
Ghimj(index,1186) = W_58;
Ghimj(index,1187) = W_65;
Ghimj(index,1188) = W_66;
Ghimj(index,1189) = W_72;
Ghimj(index,1190) = W_77;
Ghimj(index,1191) = W_82;
Ghimj(index,1192) = W_89;
Ghimj(index,1193) = W_91;
Ghimj(index,1194) = W_93;
Ghimj(index,1195) = W_94;
Ghimj(index,1196) = W_98;
Ghimj(index,1197) = W_102;
Ghimj(index,1198) = W_103;
Ghimj(index,1199) = W_104;
Ghimj(index,1200) = W_106;
Ghimj(index,1201) = W_107;
Ghimj(index,1202) = W_108;
Ghimj(index,1203) = W_109;
Ghimj(index,1204) = W_110;
Ghimj(index,1205) = W_113;
Ghimj(index,1206) = W_114;
Ghimj(index,1207) = W_115;
Ghimj(index,1208) = W_117;
Ghimj(index,1209) = W_120;
Ghimj(index,1210) = W_121;
Ghimj(index,1211) = W_122;
Ghimj(index,1212) = W_124;
Ghimj(index,1213) = W_125;
Ghimj(index,1214) = W_126;
Ghimj(index,1215) = W_127;
Ghimj(index,1216) = W_128;
Ghimj(index,1217) = W_129;
Ghimj(index,1218) = W_130;
Ghimj(index,1219) = W_131;
Ghimj(index,1220) = W_132;
Ghimj(index,1221) = W_133;
Ghimj(index,1222) = W_134;
Ghimj(index,1223) = W_135;
Ghimj(index,1224) = W_136;
Ghimj(index,1225) = W_137;
Ghimj(index,1226) = W_138;
W_51 = Ghimj(index,1227);
W_59 = Ghimj(index,1228);
W_75 = Ghimj(index,1229);
W_116 = Ghimj(index,1230);
W_118 = Ghimj(index,1231);
W_120 = Ghimj(index,1232);
W_122 = Ghimj(index,1233);
W_123 = Ghimj(index,1234);
W_124 = Ghimj(index,1235);
W_125 = Ghimj(index,1236);
W_126 = Ghimj(index,1237);
W_127 = Ghimj(index,1238);
W_128 = Ghimj(index,1239);
W_129 = Ghimj(index,1240);
W_130 = Ghimj(index,1241);
W_131 = Ghimj(index,1242);
W_132 = Ghimj(index,1243);
W_133 = Ghimj(index,1244);
W_134 = Ghimj(index,1245);
W_135 = Ghimj(index,1246);
W_136 = Ghimj(index,1247);
W_137 = Ghimj(index,1248);
W_138 = Ghimj(index,1249);
a = - W_51/ Ghimj(index,285);
W_51 = -a;
W_132 = W_132+ a *Ghimj(index,286);
W_134 = W_134+ a *Ghimj(index,287);
a = - W_59/ Ghimj(index,306);
W_59 = -a;
W_133 = W_133+ a *Ghimj(index,307);
W_135 = W_135+ a *Ghimj(index,308);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
Ghimj(index,1227) = W_51;
Ghimj(index,1228) = W_59;
Ghimj(index,1229) = W_75;
Ghimj(index,1230) = W_116;
Ghimj(index,1231) = W_118;
Ghimj(index,1232) = W_120;
Ghimj(index,1233) = W_122;
Ghimj(index,1234) = W_123;
Ghimj(index,1235) = W_124;
Ghimj(index,1236) = W_125;
Ghimj(index,1237) = W_126;
Ghimj(index,1238) = W_127;
Ghimj(index,1239) = W_128;
Ghimj(index,1240) = W_129;
Ghimj(index,1241) = W_130;
Ghimj(index,1242) = W_131;
Ghimj(index,1243) = W_132;
Ghimj(index,1244) = W_133;
Ghimj(index,1245) = W_134;
Ghimj(index,1246) = W_135;
Ghimj(index,1247) = W_136;
Ghimj(index,1248) = W_137;
Ghimj(index,1249) = W_138;
W_105 = Ghimj(index,1250);
W_114 = Ghimj(index,1251);
W_118 = Ghimj(index,1252);
W_123 = Ghimj(index,1253);
W_124 = Ghimj(index,1254);
W_125 = Ghimj(index,1255);
W_126 = Ghimj(index,1256);
W_127 = Ghimj(index,1257);
W_128 = Ghimj(index,1258);
W_129 = Ghimj(index,1259);
W_130 = Ghimj(index,1260);
W_131 = Ghimj(index,1261);
W_132 = Ghimj(index,1262);
W_133 = Ghimj(index,1263);
W_134 = Ghimj(index,1264);
W_135 = Ghimj(index,1265);
W_136 = Ghimj(index,1266);
W_137 = Ghimj(index,1267);
W_138 = Ghimj(index,1268);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
Ghimj(index,1250) = W_105;
Ghimj(index,1251) = W_114;
Ghimj(index,1252) = W_118;
Ghimj(index,1253) = W_123;
Ghimj(index,1254) = W_124;
Ghimj(index,1255) = W_125;
Ghimj(index,1256) = W_126;
Ghimj(index,1257) = W_127;
Ghimj(index,1258) = W_128;
Ghimj(index,1259) = W_129;
Ghimj(index,1260) = W_130;
Ghimj(index,1261) = W_131;
Ghimj(index,1262) = W_132;
Ghimj(index,1263) = W_133;
Ghimj(index,1264) = W_134;
Ghimj(index,1265) = W_135;
Ghimj(index,1266) = W_136;
Ghimj(index,1267) = W_137;
Ghimj(index,1268) = W_138;
W_59 = Ghimj(index,1269);
W_60 = Ghimj(index,1270);
W_70 = Ghimj(index,1271);
W_76 = Ghimj(index,1272);
W_84 = Ghimj(index,1273);
W_87 = Ghimj(index,1274);
W_92 = Ghimj(index,1275);
W_93 = Ghimj(index,1276);
W_94 = Ghimj(index,1277);
W_99 = Ghimj(index,1278);
W_102 = Ghimj(index,1279);
W_109 = Ghimj(index,1280);
W_111 = Ghimj(index,1281);
W_113 = Ghimj(index,1282);
W_115 = Ghimj(index,1283);
W_117 = Ghimj(index,1284);
W_120 = Ghimj(index,1285);
W_121 = Ghimj(index,1286);
W_122 = Ghimj(index,1287);
W_124 = Ghimj(index,1288);
W_125 = Ghimj(index,1289);
W_126 = Ghimj(index,1290);
W_127 = Ghimj(index,1291);
W_128 = Ghimj(index,1292);
W_129 = Ghimj(index,1293);
W_130 = Ghimj(index,1294);
W_131 = Ghimj(index,1295);
W_132 = Ghimj(index,1296);
W_133 = Ghimj(index,1297);
W_134 = Ghimj(index,1298);
W_135 = Ghimj(index,1299);
W_136 = Ghimj(index,1300);
W_137 = Ghimj(index,1301);
W_138 = Ghimj(index,1302);
a = - W_59/ Ghimj(index,306);
W_59 = -a;
W_133 = W_133+ a *Ghimj(index,307);
W_135 = W_135+ a *Ghimj(index,308);
a = - W_60/ Ghimj(index,310);
W_60 = -a;
W_92 = W_92+ a *Ghimj(index,311);
W_120 = W_120+ a *Ghimj(index,312);
W_133 = W_133+ a *Ghimj(index,313);
W_135 = W_135+ a *Ghimj(index,314);
a = - W_70/ Ghimj(index,352);
W_70 = -a;
W_84 = W_84+ a *Ghimj(index,353);
W_87 = W_87+ a *Ghimj(index,354);
W_126 = W_126+ a *Ghimj(index,355);
a = - W_76/ Ghimj(index,377);
W_76 = -a;
W_87 = W_87+ a *Ghimj(index,378);
W_126 = W_126+ a *Ghimj(index,379);
W_133 = W_133+ a *Ghimj(index,380);
W_135 = W_135+ a *Ghimj(index,381);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
Ghimj(index,1269) = W_59;
Ghimj(index,1270) = W_60;
Ghimj(index,1271) = W_70;
Ghimj(index,1272) = W_76;
Ghimj(index,1273) = W_84;
Ghimj(index,1274) = W_87;
Ghimj(index,1275) = W_92;
Ghimj(index,1276) = W_93;
Ghimj(index,1277) = W_94;
Ghimj(index,1278) = W_99;
Ghimj(index,1279) = W_102;
Ghimj(index,1280) = W_109;
Ghimj(index,1281) = W_111;
Ghimj(index,1282) = W_113;
Ghimj(index,1283) = W_115;
Ghimj(index,1284) = W_117;
Ghimj(index,1285) = W_120;
Ghimj(index,1286) = W_121;
Ghimj(index,1287) = W_122;
Ghimj(index,1288) = W_124;
Ghimj(index,1289) = W_125;
Ghimj(index,1290) = W_126;
Ghimj(index,1291) = W_127;
Ghimj(index,1292) = W_128;
Ghimj(index,1293) = W_129;
Ghimj(index,1294) = W_130;
Ghimj(index,1295) = W_131;
Ghimj(index,1296) = W_132;
Ghimj(index,1297) = W_133;
Ghimj(index,1298) = W_134;
Ghimj(index,1299) = W_135;
Ghimj(index,1300) = W_136;
Ghimj(index,1301) = W_137;
Ghimj(index,1302) = W_138;
W_39 = Ghimj(index,1303);
W_41 = Ghimj(index,1304);
W_42 = Ghimj(index,1305);
W_43 = Ghimj(index,1306);
W_51 = Ghimj(index,1307);
W_75 = Ghimj(index,1308);
W_112 = Ghimj(index,1309);
W_116 = Ghimj(index,1310);
W_120 = Ghimj(index,1311);
W_122 = Ghimj(index,1312);
W_123 = Ghimj(index,1313);
W_124 = Ghimj(index,1314);
W_125 = Ghimj(index,1315);
W_126 = Ghimj(index,1316);
W_127 = Ghimj(index,1317);
W_128 = Ghimj(index,1318);
W_129 = Ghimj(index,1319);
W_130 = Ghimj(index,1320);
W_131 = Ghimj(index,1321);
W_132 = Ghimj(index,1322);
W_133 = Ghimj(index,1323);
W_134 = Ghimj(index,1324);
W_135 = Ghimj(index,1325);
W_136 = Ghimj(index,1326);
W_137 = Ghimj(index,1327);
W_138 = Ghimj(index,1328);
a = - W_39/ Ghimj(index,258);
W_39 = -a;
W_134 = W_134+ a *Ghimj(index,259);
a = - W_41/ Ghimj(index,262);
W_41 = -a;
W_120 = W_120+ a *Ghimj(index,263);
a = - W_42/ Ghimj(index,264);
W_42 = -a;
W_120 = W_120+ a *Ghimj(index,265);
a = - W_43/ Ghimj(index,266);
W_43 = -a;
W_120 = W_120+ a *Ghimj(index,267);
a = - W_51/ Ghimj(index,285);
W_51 = -a;
W_132 = W_132+ a *Ghimj(index,286);
W_134 = W_134+ a *Ghimj(index,287);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
Ghimj(index,1303) = W_39;
Ghimj(index,1304) = W_41;
Ghimj(index,1305) = W_42;
Ghimj(index,1306) = W_43;
Ghimj(index,1307) = W_51;
Ghimj(index,1308) = W_75;
Ghimj(index,1309) = W_112;
Ghimj(index,1310) = W_116;
Ghimj(index,1311) = W_120;
Ghimj(index,1312) = W_122;
Ghimj(index,1313) = W_123;
Ghimj(index,1314) = W_124;
Ghimj(index,1315) = W_125;
Ghimj(index,1316) = W_126;
Ghimj(index,1317) = W_127;
Ghimj(index,1318) = W_128;
Ghimj(index,1319) = W_129;
Ghimj(index,1320) = W_130;
Ghimj(index,1321) = W_131;
Ghimj(index,1322) = W_132;
Ghimj(index,1323) = W_133;
Ghimj(index,1324) = W_134;
Ghimj(index,1325) = W_135;
Ghimj(index,1326) = W_136;
Ghimj(index,1327) = W_137;
Ghimj(index,1328) = W_138;
W_0 = Ghimj(index,1329);
W_50 = Ghimj(index,1330);
W_58 = Ghimj(index,1331);
W_59 = Ghimj(index,1332);
W_62 = Ghimj(index,1333);
W_64 = Ghimj(index,1334);
W_73 = Ghimj(index,1335);
W_76 = Ghimj(index,1336);
W_77 = Ghimj(index,1337);
W_83 = Ghimj(index,1338);
W_87 = Ghimj(index,1339);
W_91 = Ghimj(index,1340);
W_92 = Ghimj(index,1341);
W_93 = Ghimj(index,1342);
W_94 = Ghimj(index,1343);
W_99 = Ghimj(index,1344);
W_101 = Ghimj(index,1345);
W_102 = Ghimj(index,1346);
W_105 = Ghimj(index,1347);
W_106 = Ghimj(index,1348);
W_109 = Ghimj(index,1349);
W_111 = Ghimj(index,1350);
W_113 = Ghimj(index,1351);
W_114 = Ghimj(index,1352);
W_115 = Ghimj(index,1353);
W_116 = Ghimj(index,1354);
W_117 = Ghimj(index,1355);
W_119 = Ghimj(index,1356);
W_121 = Ghimj(index,1357);
W_123 = Ghimj(index,1358);
W_124 = Ghimj(index,1359);
W_125 = Ghimj(index,1360);
W_126 = Ghimj(index,1361);
W_127 = Ghimj(index,1362);
W_128 = Ghimj(index,1363);
W_129 = Ghimj(index,1364);
W_130 = Ghimj(index,1365);
W_131 = Ghimj(index,1366);
W_132 = Ghimj(index,1367);
W_133 = Ghimj(index,1368);
W_134 = Ghimj(index,1369);
W_135 = Ghimj(index,1370);
W_136 = Ghimj(index,1371);
W_137 = Ghimj(index,1372);
W_138 = Ghimj(index,1373);
a = - W_0/ Ghimj(index,0);
W_0 = -a;
a = - W_50/ Ghimj(index,282);
W_50 = -a;
W_83 = W_83+ a *Ghimj(index,283);
W_138 = W_138+ a *Ghimj(index,284);
a = - W_58/ Ghimj(index,303);
W_58 = -a;
W_91 = W_91+ a *Ghimj(index,304);
W_126 = W_126+ a *Ghimj(index,305);
a = - W_59/ Ghimj(index,306);
W_59 = -a;
W_133 = W_133+ a *Ghimj(index,307);
W_135 = W_135+ a *Ghimj(index,308);
a = - W_62/ Ghimj(index,319);
W_62 = -a;
W_93 = W_93+ a *Ghimj(index,320);
W_126 = W_126+ a *Ghimj(index,321);
W_133 = W_133+ a *Ghimj(index,322);
a = - W_64/ Ghimj(index,327);
W_64 = -a;
W_113 = W_113+ a *Ghimj(index,328);
W_126 = W_126+ a *Ghimj(index,329);
W_135 = W_135+ a *Ghimj(index,330);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_76/ Ghimj(index,377);
W_76 = -a;
W_87 = W_87+ a *Ghimj(index,378);
W_126 = W_126+ a *Ghimj(index,379);
W_133 = W_133+ a *Ghimj(index,380);
W_135 = W_135+ a *Ghimj(index,381);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_101/ Ghimj(index,586);
W_101 = -a;
W_105 = W_105+ a *Ghimj(index,587);
W_114 = W_114+ a *Ghimj(index,588);
W_116 = W_116+ a *Ghimj(index,589);
W_119 = W_119+ a *Ghimj(index,590);
W_123 = W_123+ a *Ghimj(index,591);
W_126 = W_126+ a *Ghimj(index,592);
W_128 = W_128+ a *Ghimj(index,593);
W_130 = W_130+ a *Ghimj(index,594);
W_135 = W_135+ a *Ghimj(index,595);
W_136 = W_136+ a *Ghimj(index,596);
W_138 = W_138+ a *Ghimj(index,597);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
a = - W_134/ Ghimj(index,1324);
W_134 = -a;
W_135 = W_135+ a *Ghimj(index,1325);
W_136 = W_136+ a *Ghimj(index,1326);
W_137 = W_137+ a *Ghimj(index,1327);
W_138 = W_138+ a *Ghimj(index,1328);
Ghimj(index,1329) = W_0;
Ghimj(index,1330) = W_50;
Ghimj(index,1331) = W_58;
Ghimj(index,1332) = W_59;
Ghimj(index,1333) = W_62;
Ghimj(index,1334) = W_64;
Ghimj(index,1335) = W_73;
Ghimj(index,1336) = W_76;
Ghimj(index,1337) = W_77;
Ghimj(index,1338) = W_83;
Ghimj(index,1339) = W_87;
Ghimj(index,1340) = W_91;
Ghimj(index,1341) = W_92;
Ghimj(index,1342) = W_93;
Ghimj(index,1343) = W_94;
Ghimj(index,1344) = W_99;
Ghimj(index,1345) = W_101;
Ghimj(index,1346) = W_102;
Ghimj(index,1347) = W_105;
Ghimj(index,1348) = W_106;
Ghimj(index,1349) = W_109;
Ghimj(index,1350) = W_111;
Ghimj(index,1351) = W_113;
Ghimj(index,1352) = W_114;
Ghimj(index,1353) = W_115;
Ghimj(index,1354) = W_116;
Ghimj(index,1355) = W_117;
Ghimj(index,1356) = W_119;
Ghimj(index,1357) = W_121;
Ghimj(index,1358) = W_123;
Ghimj(index,1359) = W_124;
Ghimj(index,1360) = W_125;
Ghimj(index,1361) = W_126;
Ghimj(index,1362) = W_127;
Ghimj(index,1363) = W_128;
Ghimj(index,1364) = W_129;
Ghimj(index,1365) = W_130;
Ghimj(index,1366) = W_131;
Ghimj(index,1367) = W_132;
Ghimj(index,1368) = W_133;
Ghimj(index,1369) = W_134;
Ghimj(index,1370) = W_135;
Ghimj(index,1371) = W_136;
Ghimj(index,1372) = W_137;
Ghimj(index,1373) = W_138;
W_73 = Ghimj(index,1374);
W_83 = Ghimj(index,1375);
W_101 = Ghimj(index,1376);
W_105 = Ghimj(index,1377);
W_106 = Ghimj(index,1378);
W_107 = Ghimj(index,1379);
W_114 = Ghimj(index,1380);
W_116 = Ghimj(index,1381);
W_117 = Ghimj(index,1382);
W_119 = Ghimj(index,1383);
W_121 = Ghimj(index,1384);
W_123 = Ghimj(index,1385);
W_124 = Ghimj(index,1386);
W_125 = Ghimj(index,1387);
W_126 = Ghimj(index,1388);
W_127 = Ghimj(index,1389);
W_128 = Ghimj(index,1390);
W_129 = Ghimj(index,1391);
W_130 = Ghimj(index,1392);
W_131 = Ghimj(index,1393);
W_132 = Ghimj(index,1394);
W_133 = Ghimj(index,1395);
W_134 = Ghimj(index,1396);
W_135 = Ghimj(index,1397);
W_136 = Ghimj(index,1398);
W_137 = Ghimj(index,1399);
W_138 = Ghimj(index,1400);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_101/ Ghimj(index,586);
W_101 = -a;
W_105 = W_105+ a *Ghimj(index,587);
W_114 = W_114+ a *Ghimj(index,588);
W_116 = W_116+ a *Ghimj(index,589);
W_119 = W_119+ a *Ghimj(index,590);
W_123 = W_123+ a *Ghimj(index,591);
W_126 = W_126+ a *Ghimj(index,592);
W_128 = W_128+ a *Ghimj(index,593);
W_130 = W_130+ a *Ghimj(index,594);
W_135 = W_135+ a *Ghimj(index,595);
W_136 = W_136+ a *Ghimj(index,596);
W_138 = W_138+ a *Ghimj(index,597);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
a = - W_134/ Ghimj(index,1324);
W_134 = -a;
W_135 = W_135+ a *Ghimj(index,1325);
W_136 = W_136+ a *Ghimj(index,1326);
W_137 = W_137+ a *Ghimj(index,1327);
W_138 = W_138+ a *Ghimj(index,1328);
a = - W_135/ Ghimj(index,1370);
W_135 = -a;
W_136 = W_136+ a *Ghimj(index,1371);
W_137 = W_137+ a *Ghimj(index,1372);
W_138 = W_138+ a *Ghimj(index,1373);
Ghimj(index,1374) = W_73;
Ghimj(index,1375) = W_83;
Ghimj(index,1376) = W_101;
Ghimj(index,1377) = W_105;
Ghimj(index,1378) = W_106;
Ghimj(index,1379) = W_107;
Ghimj(index,1380) = W_114;
Ghimj(index,1381) = W_116;
Ghimj(index,1382) = W_117;
Ghimj(index,1383) = W_119;
Ghimj(index,1384) = W_121;
Ghimj(index,1385) = W_123;
Ghimj(index,1386) = W_124;
Ghimj(index,1387) = W_125;
Ghimj(index,1388) = W_126;
Ghimj(index,1389) = W_127;
Ghimj(index,1390) = W_128;
Ghimj(index,1391) = W_129;
Ghimj(index,1392) = W_130;
Ghimj(index,1393) = W_131;
Ghimj(index,1394) = W_132;
Ghimj(index,1395) = W_133;
Ghimj(index,1396) = W_134;
Ghimj(index,1397) = W_135;
Ghimj(index,1398) = W_136;
Ghimj(index,1399) = W_137;
Ghimj(index,1400) = W_138;
W_46 = Ghimj(index,1401);
W_56 = Ghimj(index,1402);
W_62 = Ghimj(index,1403);
W_65 = Ghimj(index,1404);
W_66 = Ghimj(index,1405);
W_69 = Ghimj(index,1406);
W_71 = Ghimj(index,1407);
W_73 = Ghimj(index,1408);
W_78 = Ghimj(index,1409);
W_79 = Ghimj(index,1410);
W_81 = Ghimj(index,1411);
W_82 = Ghimj(index,1412);
W_87 = Ghimj(index,1413);
W_88 = Ghimj(index,1414);
W_89 = Ghimj(index,1415);
W_91 = Ghimj(index,1416);
W_92 = Ghimj(index,1417);
W_93 = Ghimj(index,1418);
W_94 = Ghimj(index,1419);
W_96 = Ghimj(index,1420);
W_99 = Ghimj(index,1421);
W_102 = Ghimj(index,1422);
W_103 = Ghimj(index,1423);
W_104 = Ghimj(index,1424);
W_106 = Ghimj(index,1425);
W_107 = Ghimj(index,1426);
W_108 = Ghimj(index,1427);
W_109 = Ghimj(index,1428);
W_110 = Ghimj(index,1429);
W_111 = Ghimj(index,1430);
W_113 = Ghimj(index,1431);
W_114 = Ghimj(index,1432);
W_115 = Ghimj(index,1433);
W_117 = Ghimj(index,1434);
W_119 = Ghimj(index,1435);
W_121 = Ghimj(index,1436);
W_122 = Ghimj(index,1437);
W_124 = Ghimj(index,1438);
W_125 = Ghimj(index,1439);
W_126 = Ghimj(index,1440);
W_127 = Ghimj(index,1441);
W_128 = Ghimj(index,1442);
W_129 = Ghimj(index,1443);
W_130 = Ghimj(index,1444);
W_131 = Ghimj(index,1445);
W_132 = Ghimj(index,1446);
W_133 = Ghimj(index,1447);
W_134 = Ghimj(index,1448);
W_135 = Ghimj(index,1449);
W_136 = Ghimj(index,1450);
W_137 = Ghimj(index,1451);
W_138 = Ghimj(index,1452);
a = - W_46/ Ghimj(index,272);
W_46 = -a;
W_81 = W_81+ a *Ghimj(index,273);
W_124 = W_124+ a *Ghimj(index,274);
W_137 = W_137+ a *Ghimj(index,275);
a = - W_56/ Ghimj(index,296);
W_56 = -a;
W_65 = W_65+ a *Ghimj(index,297);
W_81 = W_81+ a *Ghimj(index,298);
W_126 = W_126+ a *Ghimj(index,299);
a = - W_62/ Ghimj(index,319);
W_62 = -a;
W_93 = W_93+ a *Ghimj(index,320);
W_126 = W_126+ a *Ghimj(index,321);
W_133 = W_133+ a *Ghimj(index,322);
a = - W_65/ Ghimj(index,331);
W_65 = -a;
W_114 = W_114+ a *Ghimj(index,332);
W_126 = W_126+ a *Ghimj(index,333);
W_132 = W_132+ a *Ghimj(index,334);
a = - W_66/ Ghimj(index,335);
W_66 = -a;
W_109 = W_109+ a *Ghimj(index,336);
W_126 = W_126+ a *Ghimj(index,337);
W_137 = W_137+ a *Ghimj(index,338);
a = - W_69/ Ghimj(index,347);
W_69 = -a;
W_93 = W_93+ a *Ghimj(index,348);
W_126 = W_126+ a *Ghimj(index,349);
W_137 = W_137+ a *Ghimj(index,350);
a = - W_71/ Ghimj(index,356);
W_71 = -a;
W_117 = W_117+ a *Ghimj(index,357);
W_126 = W_126+ a *Ghimj(index,358);
W_137 = W_137+ a *Ghimj(index,359);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_78/ Ghimj(index,386);
W_78 = -a;
W_103 = W_103+ a *Ghimj(index,387);
W_106 = W_106+ a *Ghimj(index,388);
W_107 = W_107+ a *Ghimj(index,389);
W_110 = W_110+ a *Ghimj(index,390);
W_124 = W_124+ a *Ghimj(index,391);
W_126 = W_126+ a *Ghimj(index,392);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_81/ Ghimj(index,405);
W_81 = -a;
W_114 = W_114+ a *Ghimj(index,406);
W_124 = W_124+ a *Ghimj(index,407);
W_126 = W_126+ a *Ghimj(index,408);
W_127 = W_127+ a *Ghimj(index,409);
W_129 = W_129+ a *Ghimj(index,410);
W_136 = W_136+ a *Ghimj(index,411);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_89/ Ghimj(index,457);
W_89 = -a;
W_93 = W_93+ a *Ghimj(index,458);
W_94 = W_94+ a *Ghimj(index,459);
W_102 = W_102+ a *Ghimj(index,460);
W_107 = W_107+ a *Ghimj(index,461);
W_109 = W_109+ a *Ghimj(index,462);
W_113 = W_113+ a *Ghimj(index,463);
W_117 = W_117+ a *Ghimj(index,464);
W_124 = W_124+ a *Ghimj(index,465);
W_125 = W_125+ a *Ghimj(index,466);
W_126 = W_126+ a *Ghimj(index,467);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_96/ Ghimj(index,538);
W_96 = -a;
W_107 = W_107+ a *Ghimj(index,539);
W_108 = W_108+ a *Ghimj(index,540);
W_109 = W_109+ a *Ghimj(index,541);
W_110 = W_110+ a *Ghimj(index,542);
W_113 = W_113+ a *Ghimj(index,543);
W_124 = W_124+ a *Ghimj(index,544);
W_125 = W_125+ a *Ghimj(index,545);
W_126 = W_126+ a *Ghimj(index,546);
W_133 = W_133+ a *Ghimj(index,547);
W_137 = W_137+ a *Ghimj(index,548);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
a = - W_134/ Ghimj(index,1324);
W_134 = -a;
W_135 = W_135+ a *Ghimj(index,1325);
W_136 = W_136+ a *Ghimj(index,1326);
W_137 = W_137+ a *Ghimj(index,1327);
W_138 = W_138+ a *Ghimj(index,1328);
a = - W_135/ Ghimj(index,1370);
W_135 = -a;
W_136 = W_136+ a *Ghimj(index,1371);
W_137 = W_137+ a *Ghimj(index,1372);
W_138 = W_138+ a *Ghimj(index,1373);
a = - W_136/ Ghimj(index,1398);
W_136 = -a;
W_137 = W_137+ a *Ghimj(index,1399);
W_138 = W_138+ a *Ghimj(index,1400);
Ghimj(index,1401) = W_46;
Ghimj(index,1402) = W_56;
Ghimj(index,1403) = W_62;
Ghimj(index,1404) = W_65;
Ghimj(index,1405) = W_66;
Ghimj(index,1406) = W_69;
Ghimj(index,1407) = W_71;
Ghimj(index,1408) = W_73;
Ghimj(index,1409) = W_78;
Ghimj(index,1410) = W_79;
Ghimj(index,1411) = W_81;
Ghimj(index,1412) = W_82;
Ghimj(index,1413) = W_87;
Ghimj(index,1414) = W_88;
Ghimj(index,1415) = W_89;
Ghimj(index,1416) = W_91;
Ghimj(index,1417) = W_92;
Ghimj(index,1418) = W_93;
Ghimj(index,1419) = W_94;
Ghimj(index,1420) = W_96;
Ghimj(index,1421) = W_99;
Ghimj(index,1422) = W_102;
Ghimj(index,1423) = W_103;
Ghimj(index,1424) = W_104;
Ghimj(index,1425) = W_106;
Ghimj(index,1426) = W_107;
Ghimj(index,1427) = W_108;
Ghimj(index,1428) = W_109;
Ghimj(index,1429) = W_110;
Ghimj(index,1430) = W_111;
Ghimj(index,1431) = W_113;
Ghimj(index,1432) = W_114;
Ghimj(index,1433) = W_115;
Ghimj(index,1434) = W_117;
Ghimj(index,1435) = W_119;
Ghimj(index,1436) = W_121;
Ghimj(index,1437) = W_122;
Ghimj(index,1438) = W_124;
Ghimj(index,1439) = W_125;
Ghimj(index,1440) = W_126;
Ghimj(index,1441) = W_127;
Ghimj(index,1442) = W_128;
Ghimj(index,1443) = W_129;
Ghimj(index,1444) = W_130;
Ghimj(index,1445) = W_131;
Ghimj(index,1446) = W_132;
Ghimj(index,1447) = W_133;
Ghimj(index,1448) = W_134;
Ghimj(index,1449) = W_135;
Ghimj(index,1450) = W_136;
Ghimj(index,1451) = W_137;
Ghimj(index,1452) = W_138;
W_83 = Ghimj(index,1453);
W_88 = Ghimj(index,1454);
W_97 = Ghimj(index,1455);
W_98 = Ghimj(index,1456);
W_103 = Ghimj(index,1457);
W_104 = Ghimj(index,1458);
W_105 = Ghimj(index,1459);
W_106 = Ghimj(index,1460);
W_107 = Ghimj(index,1461);
W_112 = Ghimj(index,1462);
W_114 = Ghimj(index,1463);
W_116 = Ghimj(index,1464);
W_118 = Ghimj(index,1465);
W_119 = Ghimj(index,1466);
W_120 = Ghimj(index,1467);
W_121 = Ghimj(index,1468);
W_122 = Ghimj(index,1469);
W_123 = Ghimj(index,1470);
W_124 = Ghimj(index,1471);
W_125 = Ghimj(index,1472);
W_126 = Ghimj(index,1473);
W_127 = Ghimj(index,1474);
W_128 = Ghimj(index,1475);
W_129 = Ghimj(index,1476);
W_130 = Ghimj(index,1477);
W_131 = Ghimj(index,1478);
W_132 = Ghimj(index,1479);
W_133 = Ghimj(index,1480);
W_134 = Ghimj(index,1481);
W_135 = Ghimj(index,1482);
W_136 = Ghimj(index,1483);
W_137 = Ghimj(index,1484);
W_138 = Ghimj(index,1485);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
a = - W_134/ Ghimj(index,1324);
W_134 = -a;
W_135 = W_135+ a *Ghimj(index,1325);
W_136 = W_136+ a *Ghimj(index,1326);
W_137 = W_137+ a *Ghimj(index,1327);
W_138 = W_138+ a *Ghimj(index,1328);
a = - W_135/ Ghimj(index,1370);
W_135 = -a;
W_136 = W_136+ a *Ghimj(index,1371);
W_137 = W_137+ a *Ghimj(index,1372);
W_138 = W_138+ a *Ghimj(index,1373);
a = - W_136/ Ghimj(index,1398);
W_136 = -a;
W_137 = W_137+ a *Ghimj(index,1399);
W_138 = W_138+ a *Ghimj(index,1400);
a = - W_137/ Ghimj(index,1451);
W_137 = -a;
W_138 = W_138+ a *Ghimj(index,1452);
Ghimj(index,1453) = W_83;
Ghimj(index,1454) = W_88;
Ghimj(index,1455) = W_97;
Ghimj(index,1456) = W_98;
Ghimj(index,1457) = W_103;
Ghimj(index,1458) = W_104;
Ghimj(index,1459) = W_105;
Ghimj(index,1460) = W_106;
Ghimj(index,1461) = W_107;
Ghimj(index,1462) = W_112;
Ghimj(index,1463) = W_114;
Ghimj(index,1464) = W_116;
Ghimj(index,1465) = W_118;
Ghimj(index,1466) = W_119;
Ghimj(index,1467) = W_120;
Ghimj(index,1468) = W_121;
Ghimj(index,1469) = W_122;
Ghimj(index,1470) = W_123;
Ghimj(index,1471) = W_124;
Ghimj(index,1472) = W_125;
Ghimj(index,1473) = W_126;
Ghimj(index,1474) = W_127;
Ghimj(index,1475) = W_128;
Ghimj(index,1476) = W_129;
Ghimj(index,1477) = W_130;
Ghimj(index,1478) = W_131;
Ghimj(index,1479) = W_132;
Ghimj(index,1480) = W_133;
Ghimj(index,1481) = W_134;
Ghimj(index,1482) = W_135;
Ghimj(index,1483) = W_136;
Ghimj(index,1484) = W_137;
Ghimj(index,1485) = W_138;
}
__device__ void ros_Decomp(double * __restrict__ Ghimj, int &Ndec, int VL_GLO)
{
kppDecomp(Ghimj, VL_GLO);
Ndec++;
}
__device__ void ros_PrepareMatrix(double &H, int direction, double gam, double *jac0, double *Ghimj, int &Nsng, int &Ndec, int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int ising, nConsecutive;
double ghinv;
ghinv = ONE/(direction*H*gam);
for (int i=0; i<LU_NONZERO; i++)
Ghimj(index,i) = -jac0(index,i);
Ghimj(index,0) += ghinv;
Ghimj(index,1) += ghinv;
Ghimj(index,2) += ghinv;
Ghimj(index,3) += ghinv;
Ghimj(index,4) += ghinv;
Ghimj(index,5) += ghinv;
Ghimj(index,6) += ghinv;
Ghimj(index,9) += ghinv;
Ghimj(index,25) += ghinv;
Ghimj(index,29) += ghinv;
Ghimj(index,38) += ghinv;
Ghimj(index,43) += ghinv;
Ghimj(index,46) += ghinv;
Ghimj(index,48) += ghinv;
Ghimj(index,52) += ghinv;
Ghimj(index,58) += ghinv;
Ghimj(index,60) += ghinv;
Ghimj(index,62) += ghinv;
Ghimj(index,64) += ghinv;
Ghimj(index,68) += ghinv;
Ghimj(index,69) += ghinv;
Ghimj(index,72) += ghinv;
Ghimj(index,75) += ghinv;
Ghimj(index,112) += ghinv;
Ghimj(index,123) += ghinv;
Ghimj(index,140) += ghinv;
Ghimj(index,148) += ghinv;
Ghimj(index,163) += ghinv;
Ghimj(index,170) += ghinv;
Ghimj(index,182) += ghinv;
Ghimj(index,185) += ghinv;
Ghimj(index,190) += ghinv;
Ghimj(index,194) += ghinv;
Ghimj(index,202) += ghinv;
Ghimj(index,206) += ghinv;
Ghimj(index,233) += ghinv;
Ghimj(index,244) += ghinv;
Ghimj(index,251) += ghinv;
Ghimj(index,255) += ghinv;
Ghimj(index,258) += ghinv;
Ghimj(index,260) += ghinv;
Ghimj(index,262) += ghinv;
Ghimj(index,264) += ghinv;
Ghimj(index,266) += ghinv;
Ghimj(index,268) += ghinv;
Ghimj(index,270) += ghinv;
Ghimj(index,272) += ghinv;
Ghimj(index,276) += ghinv;
Ghimj(index,278) += ghinv;
Ghimj(index,280) += ghinv;
Ghimj(index,282) += ghinv;
Ghimj(index,285) += ghinv;
Ghimj(index,288) += ghinv;
Ghimj(index,290) += ghinv;
Ghimj(index,292) += ghinv;
Ghimj(index,294) += ghinv;
Ghimj(index,296) += ghinv;
Ghimj(index,300) += ghinv;
Ghimj(index,303) += ghinv;
Ghimj(index,306) += ghinv;
Ghimj(index,310) += ghinv;
Ghimj(index,315) += ghinv;
Ghimj(index,319) += ghinv;
Ghimj(index,323) += ghinv;
Ghimj(index,327) += ghinv;
Ghimj(index,331) += ghinv;
Ghimj(index,335) += ghinv;
Ghimj(index,339) += ghinv;
Ghimj(index,343) += ghinv;
Ghimj(index,347) += ghinv;
Ghimj(index,352) += ghinv;
Ghimj(index,356) += ghinv;
Ghimj(index,360) += ghinv;
Ghimj(index,364) += ghinv;
Ghimj(index,368) += ghinv;
Ghimj(index,374) += ghinv;
Ghimj(index,377) += ghinv;
Ghimj(index,382) += ghinv;
Ghimj(index,386) += ghinv;
Ghimj(index,393) += ghinv;
Ghimj(index,397) += ghinv;
Ghimj(index,405) += ghinv;
Ghimj(index,412) += ghinv;
Ghimj(index,416) += ghinv;
Ghimj(index,421) += ghinv;
Ghimj(index,427) += ghinv;
Ghimj(index,436) += ghinv;
Ghimj(index,444) += ghinv;
Ghimj(index,450) += ghinv;
Ghimj(index,457) += ghinv;
Ghimj(index,469) += ghinv;
Ghimj(index,481) += ghinv;
Ghimj(index,489) += ghinv;
Ghimj(index,497) += ghinv;
Ghimj(index,505) += ghinv;
Ghimj(index,514) += ghinv;
Ghimj(index,538) += ghinv;
Ghimj(index,549) += ghinv;
Ghimj(index,557) += ghinv;
Ghimj(index,565) += ghinv;
Ghimj(index,573) += ghinv;
Ghimj(index,586) += ghinv;
Ghimj(index,600) += ghinv;
Ghimj(index,605) += ghinv;
Ghimj(index,610) += ghinv;
Ghimj(index,616) += ghinv;
Ghimj(index,622) += ghinv;
Ghimj(index,626) += ghinv;
Ghimj(index,636) += ghinv;
Ghimj(index,648) += ghinv;
Ghimj(index,659) += ghinv;
Ghimj(index,669) += ghinv;
Ghimj(index,677) += ghinv;
Ghimj(index,689) += ghinv;
Ghimj(index,697) += ghinv;
Ghimj(index,706) += ghinv;
Ghimj(index,714) += ghinv;
Ghimj(index,731) += ghinv;
Ghimj(index,745) += ghinv;
Ghimj(index,767) += ghinv;
Ghimj(index,787) += ghinv;
Ghimj(index,821) += ghinv;
Ghimj(index,847) += ghinv;
Ghimj(index,869) += ghinv;
Ghimj(index,896) += ghinv;
Ghimj(index,934) += ghinv;
Ghimj(index,1023) += ghinv;
Ghimj(index,1071) += ghinv;
Ghimj(index,1138) += ghinv;
Ghimj(index,1176) += ghinv;
Ghimj(index,1218) += ghinv;
Ghimj(index,1242) += ghinv;
Ghimj(index,1262) += ghinv;
Ghimj(index,1297) += ghinv;
Ghimj(index,1324) += ghinv;
Ghimj(index,1370) += ghinv;
Ghimj(index,1398) += ghinv;
Ghimj(index,1451) += ghinv;
Ghimj(index,1485) += ghinv;
Ghimj(index,1486) += ghinv;
ros_Decomp(Ghimj, Ndec, VL_GLO);
}
__device__ void Jac_sp(const double * __restrict__ var, const double * __restrict__ fix,
const double * __restrict__ rconst, double * __restrict__ jcb, int &Njac, const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
double dummy, B_0, B_1, B_2, B_3, B_4, B_5, B_6, B_7, B_8, B_9, B_10, B_11, B_12, B_13, B_14, B_15, B_16, B_17, B_18, B_19, B_20, B_21, B_22, B_23, B_24, B_25, B_26, B_27, B_28, B_29, B_30, B_31, B_32, B_33, B_34, B_35, B_36, B_37, B_38, B_39, B_40, B_41, B_42, B_43, B_44, B_45, B_46, B_47, B_48, B_49, B_50, B_51, B_52, B_53, B_54, B_55, B_56, B_57, B_58, B_59, B_60, B_61, B_62, B_63, B_64, B_65, B_66, B_67, B_68, B_69, B_70, B_71, B_72, B_73, B_74, B_75, B_76, B_77, B_78, B_79, B_80, B_81, B_82, B_83, B_84, B_85, B_86, B_87, B_88, B_89, B_90, B_91, B_92, B_93, B_94, B_95, B_96, B_97, B_98, B_99, B_100, B_101, B_102, B_103, B_104, B_105, B_106, B_107, B_108, B_109, B_110, B_111, B_112, B_113, B_114, B_115, B_116, B_117, B_118, B_119, B_120, B_121, B_122, B_123, B_124, B_125, B_126, B_127, B_128, B_129, B_130, B_131, B_132, B_133, B_134, B_135, B_136, B_137, B_138, B_139, B_140, B_141, B_142, B_143, B_144, B_145, B_146, B_147, B_148, B_149, B_150, B_151, B_152, B_153, B_154, B_155, B_156, B_157, B_158, B_159, B_160, B_161, B_162, B_163, B_164, B_165, B_166, B_167, B_168, B_169, B_170, B_171, B_172, B_173, B_174, B_175, B_176, B_177, B_178, B_179, B_180, B_181, B_182, B_183, B_184, B_185, B_186, B_187, B_188, B_189, B_190, B_191, B_192, B_193, B_194, B_195, B_196, B_197, B_198, B_199, B_200, B_201, B_202, B_203, B_204, B_205, B_206, B_207, B_208, B_209, B_210, B_211, B_212, B_213, B_214, B_215, B_216, B_217, B_218, B_219, B_220, B_221, B_222, B_223, B_224, B_225, B_226, B_227, B_228, B_229, B_230, B_231, B_232, B_233, B_234, B_235, B_236, B_237, B_238, B_239, B_240, B_241, B_242, B_243, B_244, B_245, B_246, B_247, B_248, B_249, B_250, B_251, B_252, B_253, B_254, B_255, B_256, B_257, B_258, B_259, B_260, B_261, B_262, B_263, B_264, B_265, B_266, B_267, B_268, B_269, B_270, B_271, B_272, B_273, B_274, B_275, B_276, B_277, B_278, B_279, B_280, B_281, B_282, B_283, B_284, B_285, B_286, B_287, B_288, B_289, B_290, B_291, B_292, B_293, B_294, B_295, B_296, B_297, B_298, B_299, B_300, B_301, B_302, B_303, B_304, B_305, B_306, B_307, B_308, B_309, B_310, B_311, B_312, B_313, B_314, B_315, B_316, B_317, B_318, B_319, B_320, B_321, B_322, B_323, B_324, B_325, B_326, B_327, B_328, B_329, B_330, B_331, B_332, B_333, B_334, B_335, B_336, B_337, B_338, B_339, B_340, B_341, B_342, B_343, B_344, B_345, B_346, B_347, B_348, B_349, B_350, B_351, B_352, B_353, B_354, B_355, B_356, B_357, B_358, B_359, B_360, B_361, B_362, B_363, B_364, B_365, B_366, B_367, B_368, B_369, B_370, B_371, B_372, B_373, B_374, B_375, B_376, B_377, B_378, B_379, B_380, B_381, B_382, B_383, B_384, B_385, B_386, B_387, B_388, B_389, B_390, B_391, B_392, B_393, B_394, B_395, B_396, B_397, B_398, B_399, B_400, B_401, B_402, B_403, B_404, B_405, B_406, B_407, B_408, B_409, B_410, B_411, B_412, B_413, B_414, B_415, B_416, B_417, B_418, B_419, B_420, B_421, B_422, B_423, B_424, B_425, B_426, B_427, B_428, B_429, B_430, B_431, B_432, B_433, B_434, B_435, B_436, B_437, B_438, B_439, B_440, B_441, B_442, B_443, B_444, B_445, B_446, B_447, B_448, B_449, B_450, B_451, B_452, B_453, B_454, B_455, B_456, B_457, B_458, B_459, B_460, B_461, B_462, B_463, B_464, B_465, B_466, B_467, B_468, B_469, B_470, B_471, B_472, B_473, B_474, B_475, B_476, B_477, B_478, B_479, B_480, B_481, B_482, B_483, B_484, B_485, B_486, B_487, B_488, B_489, B_490, B_491, B_492, B_493, B_494, B_495, B_496, B_497, B_498, B_499, B_500, B_501, B_502, B_503, B_504, B_505, B_506, B_507, B_508, B_509, B_510, B_511, B_512, B_513, B_514, B_515, B_516, B_517, B_518, B_519, B_520, B_521, B_522;
Njac++;
B_0 = rconst(index,0)*fix(index,0);
B_2 = rconst(index,1)*fix(index,0);
B_4 = 1.2e-10*var(index,124);
B_5 = 1.2e-10*var(index,120);
B_6 = rconst(index,3)*var(index,131);
B_7 = rconst(index,3)*var(index,124);
B_8 = rconst(index,4)*fix(index,0);
B_10 = rconst(index,5)*var(index,124);
B_11 = rconst(index,5)*var(index,122);
B_12 = 1.2e-10*var(index,120);
B_13 = 1.2e-10*var(index,97);
B_14 = rconst(index,7)*var(index,131);
B_15 = rconst(index,7)*var(index,126);
B_16 = rconst(index,8)*var(index,126);
B_17 = rconst(index,8)*var(index,124);
B_18 = rconst(index,9)*var(index,126);
B_19 = rconst(index,9)*var(index,97);
B_20 = rconst(index,10)*var(index,137);
B_21 = rconst(index,10)*var(index,131);
B_22 = rconst(index,11)*var(index,137);
B_23 = rconst(index,11)*var(index,124);
B_24 = 7.2e-11*var(index,137);
B_25 = 7.2e-11*var(index,122);
B_26 = 6.9e-12*var(index,137);
B_27 = 6.9e-12*var(index,122);
B_28 = 1.6e-12*var(index,137);
B_29 = 1.6e-12*var(index,122);
B_30 = rconst(index,15)*var(index,137);
B_31 = rconst(index,15)*var(index,126);
B_32 = rconst(index,16)*2*var(index,137);
B_33 = rconst(index,17)*var(index,128);
B_34 = rconst(index,17)*var(index,120);
B_35 = 1.8e-12*var(index,126);
B_36 = 1.8e-12*var(index,88);
B_37 = rconst(index,19)*fix(index,0);
B_39 = rconst(index,20)*fix(index,1);
B_41 = rconst(index,21)*var(index,120);
B_42 = rconst(index,21)*var(index,60);
B_43 = rconst(index,22)*var(index,120);
B_44 = rconst(index,22)*var(index,60);
B_45 = rconst(index,23)*var(index,133);
B_46 = rconst(index,23)*var(index,124);
B_47 = rconst(index,24)*var(index,133);
B_48 = rconst(index,24)*var(index,59);
B_49 = rconst(index,25)*var(index,135);
B_50 = rconst(index,25)*var(index,131);
B_51 = rconst(index,26)*var(index,135);
B_52 = rconst(index,26)*var(index,124);
B_53 = rconst(index,27)*var(index,135);
B_54 = rconst(index,27)*var(index,59);
B_55 = rconst(index,28)*var(index,136);
B_56 = rconst(index,28)*var(index,133);
B_57 = rconst(index,29)*var(index,136);
B_58 = rconst(index,29)*var(index,135);
B_59 = rconst(index,30);
B_60 = rconst(index,31)*var(index,133);
B_61 = rconst(index,31)*var(index,126);
B_62 = rconst(index,32)*var(index,137);
B_63 = rconst(index,32)*var(index,133);
B_64 = rconst(index,33)*var(index,135);
B_65 = rconst(index,33)*var(index,126);
B_66 = rconst(index,34)*var(index,137);
B_67 = rconst(index,34)*var(index,135);
B_68 = 3.5e-12*var(index,137);
B_69 = 3.5e-12*var(index,136);
B_70 = rconst(index,36)*var(index,126);
B_71 = rconst(index,36)*var(index,76);
B_72 = rconst(index,37)*var(index,126);
B_73 = rconst(index,37)*var(index,101);
B_74 = rconst(index,38);
B_75 = rconst(index,39)*var(index,126);
B_76 = rconst(index,39)*var(index,73);
B_77 = rconst(index,40)*var(index,126);
B_78 = rconst(index,40)*var(index,47);
B_79 = rconst(index,41)*var(index,124);
B_80 = rconst(index,41)*var(index,92);
B_81 = rconst(index,42)*var(index,137);
B_82 = rconst(index,42)*var(index,92);
B_83 = rconst(index,43)*var(index,137);
B_84 = rconst(index,43)*var(index,92);
B_85 = rconst(index,44)*var(index,133);
B_86 = rconst(index,44)*var(index,92);
B_87 = rconst(index,45)*var(index,133);
B_88 = rconst(index,45)*var(index,92);
B_89 = rconst(index,46)*var(index,135);
B_90 = rconst(index,46)*var(index,92);
B_91 = rconst(index,47)*var(index,135);
B_92 = rconst(index,47)*var(index,92);
B_93 = 1.2e-14*var(index,124);
B_94 = 1.2e-14*var(index,84);
B_95 = 1300;
B_96 = rconst(index,50)*var(index,126);
B_97 = rconst(index,50)*var(index,87);
B_98 = rconst(index,51)*var(index,87);
B_99 = rconst(index,51)*var(index,70);
B_100 = rconst(index,52)*var(index,135);
B_101 = rconst(index,52)*var(index,87);
B_102 = 1.66e-12*var(index,126);
B_103 = 1.66e-12*var(index,70);
B_104 = rconst(index,54)*var(index,126);
B_105 = rconst(index,54)*var(index,61);
B_106 = rconst(index,55)*fix(index,0);
B_108 = 1.75e-10*var(index,120);
B_109 = 1.75e-10*var(index,98);
B_110 = rconst(index,57)*var(index,126);
B_111 = rconst(index,57)*var(index,98);
B_112 = rconst(index,58)*var(index,126);
B_113 = rconst(index,58)*var(index,89);
B_114 = rconst(index,59)*var(index,137);
B_115 = rconst(index,59)*var(index,125);
B_116 = rconst(index,60)*var(index,133);
B_117 = rconst(index,60)*var(index,125);
B_118 = 1.3e-12*var(index,136);
B_119 = 1.3e-12*var(index,125);
B_120 = rconst(index,62)*2*var(index,125);
B_121 = rconst(index,63)*2*var(index,125);
B_122 = rconst(index,64)*var(index,126);
B_123 = rconst(index,64)*var(index,104);
B_124 = rconst(index,65)*var(index,130);
B_125 = rconst(index,65)*var(index,126);
B_126 = rconst(index,66)*var(index,136);
B_127 = rconst(index,66)*var(index,130);
B_128 = rconst(index,67)*var(index,126);
B_129 = rconst(index,67)*var(index,95);
B_130 = 4e-13*var(index,126);
B_131 = 4e-13*var(index,78);
B_132 = rconst(index,69)*var(index,126);
B_133 = rconst(index,69)*var(index,48);
B_134 = rconst(index,70)*var(index,124);
B_135 = rconst(index,70)*var(index,103);
B_136 = rconst(index,71)*var(index,126);
B_137 = rconst(index,71)*var(index,103);
B_138 = rconst(index,72)*var(index,137);
B_139 = rconst(index,72)*var(index,117);
B_140 = rconst(index,73)*var(index,133);
B_141 = rconst(index,73)*var(index,117);
B_142 = 2.3e-12*var(index,136);
B_143 = 2.3e-12*var(index,117);
B_144 = rconst(index,75)*var(index,125);
B_145 = rconst(index,75)*var(index,117);
B_146 = rconst(index,76)*var(index,126);
B_147 = rconst(index,76)*var(index,71);
B_148 = rconst(index,77)*var(index,126);
B_149 = rconst(index,77)*var(index,119);
B_150 = rconst(index,78)*var(index,136);
B_151 = rconst(index,78)*var(index,119);
B_152 = rconst(index,79)*var(index,126);
B_153 = rconst(index,79)*var(index,74);
B_154 = rconst(index,80)*var(index,137);
B_155 = rconst(index,80)*var(index,121);
B_156 = rconst(index,81)*var(index,137);
B_157 = rconst(index,81)*var(index,121);
B_158 = rconst(index,82)*var(index,133);
B_159 = rconst(index,82)*var(index,121);
B_160 = rconst(index,83)*var(index,135);
B_161 = rconst(index,83)*var(index,121);
B_162 = 4e-12*var(index,136);
B_163 = 4e-12*var(index,121);
B_164 = rconst(index,85)*var(index,125);
B_165 = rconst(index,85)*var(index,121);
B_166 = rconst(index,86)*var(index,125);
B_167 = rconst(index,86)*var(index,121);
B_168 = rconst(index,87)*var(index,121);
B_169 = rconst(index,87)*var(index,117);
B_170 = rconst(index,88)*2*var(index,121);
B_171 = rconst(index,89)*var(index,126);
B_172 = rconst(index,89)*var(index,63);
B_173 = rconst(index,90)*var(index,126);
B_174 = rconst(index,90)*var(index,58);
B_175 = rconst(index,91)*var(index,126);
B_176 = rconst(index,91)*var(index,77);
B_177 = rconst(index,92);
B_178 = rconst(index,93)*var(index,126);
B_179 = rconst(index,93)*var(index,49);
B_180 = rconst(index,94)*var(index,124);
B_181 = rconst(index,94)*var(index,107);
B_182 = rconst(index,95)*var(index,126);
B_183 = rconst(index,95)*var(index,107);
B_184 = rconst(index,96)*var(index,136);
B_185 = rconst(index,96)*var(index,107);
B_186 = rconst(index,97)*var(index,137);
B_187 = rconst(index,97)*var(index,93);
B_188 = rconst(index,98)*var(index,133);
B_189 = rconst(index,98)*var(index,93);
B_190 = rconst(index,99)*var(index,125);
B_191 = rconst(index,99)*var(index,93);
B_192 = rconst(index,100)*var(index,126);
B_193 = rconst(index,100)*var(index,69);
B_194 = rconst(index,101)*var(index,137);
B_195 = rconst(index,101)*var(index,115);
B_196 = rconst(index,102)*var(index,133);
B_197 = rconst(index,102)*var(index,115);
B_198 = rconst(index,103)*var(index,126);
B_199 = rconst(index,103)*var(index,67);
B_200 = rconst(index,104)*var(index,126);
B_201 = rconst(index,104)*var(index,86);
B_202 = rconst(index,105)*var(index,137);
B_203 = rconst(index,105)*var(index,94);
B_204 = rconst(index,106)*var(index,133);
B_205 = rconst(index,106)*var(index,94);
B_206 = rconst(index,107)*var(index,125);
B_207 = rconst(index,107)*var(index,94);
B_208 = rconst(index,108)*var(index,126);
B_209 = rconst(index,108)*var(index,72);
B_210 = rconst(index,109)*var(index,126);
B_211 = rconst(index,109)*var(index,108);
B_212 = rconst(index,110)*var(index,126);
B_213 = rconst(index,110)*var(index,96);
B_214 = rconst(index,111)*var(index,126);
B_215 = rconst(index,111)*var(index,62);
B_216 = rconst(index,112)*var(index,126);
B_217 = rconst(index,112)*var(index,40);
B_218 = rconst(index,113)*var(index,125);
B_219 = rconst(index,113)*var(index,102);
B_220 = rconst(index,114)*var(index,137);
B_221 = rconst(index,114)*var(index,102);
B_222 = rconst(index,115)*var(index,133);
B_223 = rconst(index,115)*var(index,102);
B_224 = rconst(index,116)*var(index,126);
B_225 = rconst(index,116)*var(index,79);
B_226 = rconst(index,117)*var(index,124);
B_227 = rconst(index,117)*var(index,110);
B_228 = rconst(index,118)*var(index,126);
B_229 = rconst(index,118)*var(index,110);
B_230 = rconst(index,119)*var(index,137);
B_231 = rconst(index,119)*var(index,113);
B_232 = rconst(index,120)*var(index,133);
B_233 = rconst(index,120)*var(index,113);
B_234 = rconst(index,121)*var(index,135);
B_235 = rconst(index,121)*var(index,113);
B_236 = 2e-12*var(index,125);
B_237 = 2e-12*var(index,113);
B_238 = 2e-12*2*var(index,113);
B_239 = 3e-11*var(index,126);
B_240 = 3e-11*var(index,82);
B_241 = rconst(index,125)*var(index,126);
B_242 = rconst(index,125)*var(index,85);
B_243 = rconst(index,126)*var(index,137);
B_244 = rconst(index,126)*var(index,99);
B_245 = rconst(index,127)*var(index,133);
B_246 = rconst(index,127)*var(index,99);
B_247 = rconst(index,128)*var(index,126);
B_248 = rconst(index,128)*var(index,68);
B_249 = 1.7e-12*var(index,126);
B_250 = 1.7e-12*var(index,111);
B_251 = 3.2e-11*var(index,126);
B_252 = 3.2e-11*var(index,64);
B_253 = rconst(index,131);
B_254 = rconst(index,132)*var(index,124);
B_255 = rconst(index,132)*var(index,106);
B_256 = rconst(index,133)*var(index,126);
B_257 = rconst(index,133)*var(index,106);
B_258 = rconst(index,134)*var(index,136);
B_259 = rconst(index,134)*var(index,106);
B_260 = rconst(index,135)*var(index,137);
B_261 = rconst(index,135)*var(index,109);
B_262 = rconst(index,136)*var(index,133);
B_263 = rconst(index,136)*var(index,109);
B_264 = 2e-12*var(index,125);
B_265 = 2e-12*var(index,109);
B_266 = 2e-12*2*var(index,109);
B_267 = 1e-10*var(index,126);
B_268 = 1e-10*var(index,66);
B_269 = 1.3e-11*var(index,126);
B_270 = 1.3e-11*var(index,91);
B_271 = rconst(index,141)*var(index,127);
B_272 = rconst(index,141)*var(index,124);
B_273 = rconst(index,142)*var(index,134);
B_274 = rconst(index,142)*var(index,131);
B_275 = rconst(index,143)*2*var(index,134);
B_276 = rconst(index,144)*2*var(index,134);
B_277 = rconst(index,145)*2*var(index,134);
B_278 = rconst(index,146)*2*var(index,134);
B_279 = rconst(index,147);
B_280 = rconst(index,148)*var(index,127);
B_281 = rconst(index,148)*var(index,97);
B_282 = rconst(index,149)*var(index,137);
B_283 = rconst(index,149)*var(index,127);
B_284 = rconst(index,150)*var(index,137);
B_285 = rconst(index,150)*var(index,127);
B_286 = rconst(index,151)*var(index,127);
B_287 = rconst(index,151)*var(index,88);
B_288 = rconst(index,152)*var(index,134);
B_289 = rconst(index,152)*var(index,126);
B_290 = rconst(index,153)*var(index,137);
B_291 = rconst(index,153)*var(index,134);
B_292 = rconst(index,154)*var(index,138);
B_293 = rconst(index,154)*var(index,126);
B_294 = rconst(index,155)*var(index,126);
B_295 = rconst(index,155)*var(index,112);
B_296 = rconst(index,156)*var(index,134);
B_297 = rconst(index,156)*var(index,133);
B_298 = rconst(index,157)*var(index,135);
B_299 = rconst(index,157)*var(index,134);
B_300 = rconst(index,158);
B_301 = rconst(index,159)*var(index,131);
B_302 = rconst(index,159)*var(index,116);
B_303 = rconst(index,160)*var(index,127);
B_304 = rconst(index,160)*var(index,116);
B_305 = rconst(index,161)*var(index,127);
B_306 = rconst(index,161)*var(index,98);
B_307 = rconst(index,162)*var(index,130);
B_308 = rconst(index,162)*var(index,127);
B_309 = 5.9e-11*var(index,127);
B_310 = 5.9e-11*var(index,104);
B_311 = rconst(index,164)*var(index,134);
B_312 = rconst(index,164)*var(index,125);
B_313 = 3.3e-10*var(index,120);
B_314 = 3.3e-10*var(index,41);
B_315 = 1.65e-10*var(index,120);
B_316 = 1.65e-10*var(index,75);
B_317 = rconst(index,167)*var(index,126);
B_318 = rconst(index,167)*var(index,75);
B_319 = 3.25e-10*var(index,120);
B_320 = 3.25e-10*var(index,57);
B_321 = rconst(index,169)*var(index,126);
B_322 = rconst(index,169)*var(index,57);
B_323 = rconst(index,170)*var(index,127);
B_324 = rconst(index,170)*var(index,103);
B_325 = 8e-11*var(index,127);
B_326 = 8e-11*var(index,119);
B_327 = 1.4e-10*var(index,120);
B_328 = 1.4e-10*var(index,42);
B_329 = 2.3e-10*var(index,120);
B_330 = 2.3e-10*var(index,43);
B_331 = rconst(index,174)*var(index,129);
B_332 = rconst(index,174)*var(index,124);
B_333 = rconst(index,175)*var(index,132);
B_334 = rconst(index,175)*var(index,131);
B_335 = 2.7e-12*2*var(index,132);
B_336 = rconst(index,177)*2*var(index,132);
B_337 = rconst(index,178)*var(index,137);
B_338 = rconst(index,178)*var(index,129);
B_339 = rconst(index,179)*var(index,137);
B_340 = rconst(index,179)*var(index,132);
B_341 = rconst(index,180)*var(index,126);
B_342 = rconst(index,180)*var(index,123);
B_343 = rconst(index,181)*var(index,131);
B_344 = rconst(index,181)*var(index,118);
B_345 = rconst(index,182)*var(index,126);
B_346 = rconst(index,182)*var(index,100);
B_347 = 4.9e-11*var(index,129);
B_348 = 4.9e-11*var(index,105);
B_349 = rconst(index,184)*var(index,133);
B_350 = rconst(index,184)*var(index,132);
B_351 = rconst(index,185)*var(index,135);
B_352 = rconst(index,185)*var(index,132);
B_353 = rconst(index,186);
B_354 = rconst(index,187)*var(index,130);
B_355 = rconst(index,187)*var(index,129);
B_356 = rconst(index,188)*var(index,129);
B_357 = rconst(index,188)*var(index,104);
B_358 = rconst(index,189)*var(index,132);
B_359 = rconst(index,189)*var(index,125);
B_360 = rconst(index,190)*var(index,132);
B_361 = rconst(index,190)*var(index,125);
B_362 = rconst(index,191)*var(index,126);
B_363 = rconst(index,191)*var(index,53);
B_364 = rconst(index,192)*var(index,129);
B_365 = rconst(index,192)*var(index,103);
B_366 = rconst(index,193)*var(index,129);
B_367 = rconst(index,193)*var(index,119);
B_368 = rconst(index,194)*var(index,126);
B_369 = rconst(index,194)*var(index,45);
B_370 = rconst(index,195)*var(index,126);
B_371 = rconst(index,195)*var(index,44);
B_372 = 3.32e-15*var(index,129);
B_373 = 3.32e-15*var(index,90);
B_374 = 1.1e-15*var(index,129);
B_375 = 1.1e-15*var(index,80);
B_376 = rconst(index,198)*var(index,127);
B_377 = rconst(index,198)*var(index,100);
B_378 = rconst(index,199)*var(index,134);
B_379 = rconst(index,199)*var(index,132);
B_380 = rconst(index,200)*var(index,134);
B_381 = rconst(index,200)*var(index,132);
B_382 = rconst(index,201)*var(index,134);
B_383 = rconst(index,201)*var(index,132);
B_384 = 1.45e-11*var(index,127);
B_385 = 1.45e-11*var(index,90);
B_386 = rconst(index,203)*var(index,126);
B_387 = rconst(index,203)*var(index,54);
B_388 = rconst(index,204)*var(index,126);
B_389 = rconst(index,204)*var(index,55);
B_390 = rconst(index,205)*var(index,126);
B_391 = rconst(index,205)*var(index,52);
B_392 = rconst(index,206)*var(index,126);
B_393 = rconst(index,206)*var(index,56);
B_394 = rconst(index,207)*var(index,126);
B_395 = rconst(index,207)*var(index,114);
B_396 = rconst(index,208)*var(index,126);
B_397 = rconst(index,208)*var(index,114);
B_398 = rconst(index,209)*var(index,136);
B_399 = rconst(index,209)*var(index,114);
B_400 = 1e-10*var(index,126);
B_401 = 1e-10*var(index,65);
B_402 = rconst(index,211);
B_403 = 3e-13*var(index,124);
B_404 = 3e-13*var(index,81);
B_405 = 5e-11*var(index,137);
B_406 = 5e-11*var(index,46);
B_407 = 3.3e-10*var(index,127);
B_408 = 3.3e-10*var(index,114);
B_409 = rconst(index,215)*var(index,129);
B_410 = rconst(index,215)*var(index,114);
B_411 = 4.4e-13*var(index,132);
B_412 = 4.4e-13*var(index,114);
B_414 = rconst(index,218);
B_415 = rconst(index,219);
B_416 = rconst(index,220);
B_417 = rconst(index,221);
B_418 = rconst(index,222);
B_419 = rconst(index,223);
B_420 = rconst(index,224);
B_421 = rconst(index,225);
B_422 = rconst(index,226);
B_423 = rconst(index,227);
B_424 = rconst(index,228);
B_425 = rconst(index,229);
B_426 = rconst(index,230);
B_427 = rconst(index,231);
B_428 = rconst(index,232);
B_429 = rconst(index,233);
B_431 = rconst(index,235);
B_432 = rconst(index,236);
B_433 = rconst(index,237);
B_434 = rconst(index,238);
B_435 = rconst(index,239);
B_436 = rconst(index,240);
B_437 = rconst(index,241);
B_438 = rconst(index,242);
B_439 = rconst(index,243);
B_440 = rconst(index,244);
B_441 = rconst(index,245);
B_442 = rconst(index,246);
B_443 = rconst(index,247);
B_444 = rconst(index,248);
B_445 = rconst(index,249);
B_446 = rconst(index,250);
B_447 = rconst(index,251);
B_448 = rconst(index,252);
B_449 = rconst(index,253);
B_450 = rconst(index,254);
B_451 = rconst(index,255);
B_452 = rconst(index,256);
B_453 = rconst(index,257);
B_454 = rconst(index,258);
B_455 = rconst(index,259);
B_456 = rconst(index,260);
B_457 = rconst(index,261);
B_458 = rconst(index,262);
B_459 = rconst(index,263);
B_460 = rconst(index,264);
B_461 = rconst(index,265);
B_462 = rconst(index,266);
B_463 = rconst(index,267);
B_464 = rconst(index,268);
B_465 = rconst(index,269);
B_466 = rconst(index,270);
B_467 = rconst(index,271);
B_468 = rconst(index,272);
B_469 = rconst(index,273);
B_470 = rconst(index,274);
B_471 = rconst(index,275);
B_472 = rconst(index,276);
B_473 = rconst(index,277);
B_474 = rconst(index,278);
B_475 = rconst(index,279);
B_476 = rconst(index,280);
B_477 = rconst(index,281);
B_478 = rconst(index,282);
B_479 = rconst(index,283);
B_480 = rconst(index,284);
B_481 = rconst(index,285)*var(index,128);
B_482 = rconst(index,285)*var(index,83);
B_483 = rconst(index,286);
B_484 = rconst(index,287)*var(index,138);
B_485 = rconst(index,287)*var(index,112);
B_486 = rconst(index,288)*var(index,138);
B_487 = rconst(index,288)*var(index,116);
B_488 = rconst(index,289)*var(index,128);
B_489 = rconst(index,289)*var(index,116);
B_490 = rconst(index,290)*var(index,138);
B_491 = rconst(index,290)*var(index,83);
B_492 = rconst(index,291)*var(index,123);
B_493 = rconst(index,291)*var(index,118);
B_494 = rconst(index,292)*var(index,128);
B_495 = rconst(index,292)*var(index,105);
B_496 = rconst(index,293)*var(index,123);
B_497 = rconst(index,293)*var(index,116);
B_498 = rconst(index,294)*var(index,138);
B_499 = rconst(index,294)*var(index,105);
B_500 = rconst(index,295)*var(index,123);
B_501 = rconst(index,295)*var(index,112);
B_502 = rconst(index,296)*var(index,138);
B_503 = rconst(index,296)*var(index,118);
B_504 = rconst(index,297);
B_505 = 2.3e-10*var(index,120);
B_506 = 2.3e-10*var(index,15);
B_507 = rconst(index,299);
B_508 = 1.4e-10*var(index,120);
B_509 = 1.4e-10*var(index,16);
B_510 = rconst(index,301);
B_511 = rconst(index,302)*var(index,120);
B_512 = rconst(index,302)*var(index,17);
B_513 = rconst(index,303)*var(index,120);
B_514 = rconst(index,303)*var(index,17);
B_515 = rconst(index,304);
B_516 = 3e-10*var(index,120);
B_517 = 3e-10*var(index,18);
B_518 = rconst(index,306)*var(index,126);
B_519 = rconst(index,306)*var(index,18);
B_520 = rconst(index,307);
B_521 = rconst(index,308);
B_522 = rconst(index,309);
jcb(index,0) = - B_469;
jcb(index,1) = - B_476;
jcb(index,2) = - B_474;
jcb(index,3) = - B_480;
jcb(index,4) = - B_504;
jcb(index,5) = - B_521;
jcb(index,6) = - B_522;
jcb(index,7) = B_476;
jcb(index,8) = B_474;
jcb(index,9) = 0;
jcb(index,10) = B_313+ B_462;
jcb(index,11) = B_327+ B_465;
jcb(index,12) = B_329+ B_464;
jcb(index,13) = B_370+ B_472;
jcb(index,14) = B_368+ B_473;
jcb(index,15) = B_390+ B_477;
jcb(index,16) = B_362;
jcb(index,17) = B_386+ B_478;
jcb(index,18) = B_388+ B_479;
jcb(index,19) = 2*B_319+ 2*B_321+ 2*B_463;
jcb(index,20) = 0.9*B_315+ B_317;
jcb(index,21) = B_314+ 0.9*B_316+ 2*B_320+ B_328+ B_330;
jcb(index,22) = B_318+ 2*B_322+ B_363+ B_369+ B_371+ B_387+ B_389+ B_391;
jcb(index,23) = 2*B_476;
jcb(index,24) = 3*B_474;
jcb(index,25) = 0;
jcb(index,26) = 2*B_327+ 2*B_465;
jcb(index,27) = B_329+ B_464;
jcb(index,28) = 2*B_328+ B_330;
jcb(index,29) = 0;
jcb(index,30) = B_465;
jcb(index,31) = 2*B_464;
jcb(index,32) = B_390;
jcb(index,33) = 2*B_386;
jcb(index,34) = B_388;
jcb(index,35) = 0.09*B_315;
jcb(index,36) = 0.09*B_316;
jcb(index,37) = 2*B_387+ B_389+ B_391;
jcb(index,38) = 0;
jcb(index,39) = B_405;
jcb(index,40) = 0.4*B_400;
jcb(index,41) = 0.4*B_401;
jcb(index,42) = B_406;
jcb(index,43) = 0;
jcb(index,44) = B_392;
jcb(index,45) = B_393;
jcb(index,46) = 0;
jcb(index,47) = 2*B_483;
jcb(index,48) = 0;
jcb(index,49) = 2*B_483;
jcb(index,50) = B_521;
jcb(index,51) = B_522;
jcb(index,52) = 0;
jcb(index,53) = B_507;
jcb(index,54) = B_510;
jcb(index,55) = B_513+ B_515;
jcb(index,56) = B_520;
jcb(index,57) = B_514;
jcb(index,58) = - B_505- B_507;
jcb(index,59) = - B_506;
jcb(index,60) = - B_508- B_510;
jcb(index,61) = - B_509;
jcb(index,62) = - B_511- B_513- B_515;
jcb(index,63) = - B_512- B_514;
jcb(index,64) = - B_516- B_518- B_520;
jcb(index,65) = - B_517;
jcb(index,66) = - B_519;
jcb(index,67) = B_504;
jcb(index,68) = 0;
jcb(index,69) = 0;
jcb(index,70) = B_22;
jcb(index,71) = B_23;
jcb(index,72) = 0;
jcb(index,73) = B_33;
jcb(index,74) = B_34;
jcb(index,75) = 0;
jcb(index,76) = 2*B_454;
jcb(index,77) = B_319;
jcb(index,78) = B_41+ B_43;
jcb(index,79) = B_315;
jcb(index,80) = B_481+ 3*B_483+ 2*B_490;
jcb(index,81) = B_93;
jcb(index,82) = B_100;
jcb(index,83) = B_79+ B_89+ B_91;
jcb(index,84) = B_12;
jcb(index,85) = B_108;
jcb(index,86) = B_134;
jcb(index,87) = B_498;
jcb(index,88) = B_254+ B_258;
jcb(index,89) = B_180+ B_184;
jcb(index,90) = B_226;
jcb(index,91) = B_457+ B_484+ B_500;
jcb(index,92) = B_486+ B_496;
jcb(index,93) = B_142;
jcb(index,94) = B_343+ B_468+ B_492+ B_502;
jcb(index,95) = B_150;
jcb(index,96) = 2*B_4+ B_13+ B_33+ B_42+ B_44+ B_109+ B_316+ B_320;
jcb(index,97) = B_162;
jcb(index,98) = B_10;
jcb(index,99) = B_493+ B_497+ B_501;
jcb(index,100) = 2*B_5+ 2*B_6+ B_11+ B_16+ B_22+ B_80+ B_94+ B_135+ B_181+ B_227+ B_255;
jcb(index,101) = B_118+ B_311+ B_360;
jcb(index,102) = B_14+ B_17+ B_288;
jcb(index,103) = B_34+ B_482;
jcb(index,104) = B_126;
jcb(index,105) = 2*B_7+ B_15+ B_20+ 2*B_49+ 2*B_273+ 2*B_333+ B_344;
jcb(index,106) = 2*B_334+ 2*B_335+ 2*B_336+ B_361+ B_378+ 2*B_380+ 2*B_382;
jcb(index,107) = 2*B_274+ 2*B_275+ 2*B_276+ B_277+ B_289+ B_312+ B_379+ 2*B_381+ 2*B_383;
jcb(index,108) = 2*B_50+ B_90+ B_92+ B_101;
jcb(index,109) = B_68+ B_119+ B_127+ B_143+ B_151+ B_163+ B_185+ B_259+ 2*B_422;
jcb(index,110) = B_21+ B_23+ B_69;
jcb(index,111) = B_485+ B_487+ 2*B_491+ B_499+ B_503;
jcb(index,112) = 0;
jcb(index,113) = 0.333333*B_498;
jcb(index,114) = 0.5*B_500;
jcb(index,115) = 0.333333*B_496;
jcb(index,116) = B_343+ B_468+ B_492+ 0.5*B_502;
jcb(index,117) = B_493+ 0.333333*B_497+ 0.5*B_501;
jcb(index,118) = B_360;
jcb(index,119) = 2*B_333+ B_344;
jcb(index,120) = 2*B_334+ 2*B_335+ 2*B_336+ B_361+ 0.5*B_378+ B_380+ B_382;
jcb(index,121) = 0.5*B_379+ B_381+ B_383;
jcb(index,122) = 0.333333*B_499+ 0.5*B_503;
jcb(index,123) = 0;
jcb(index,124) = 2*B_454;
jcb(index,125) = B_319;
jcb(index,126) = B_315;
jcb(index,127) = B_490;
jcb(index,128) = 0.333333*B_498;
jcb(index,129) = B_457+ B_484+ 0.5*B_500;
jcb(index,130) = 0.5*B_486+ 0.333333*B_496;
jcb(index,131) = 0.5*B_502;
jcb(index,132) = B_316+ B_320;
jcb(index,133) = 0.333333*B_497+ 0.5*B_501;
jcb(index,134) = B_311;
jcb(index,135) = B_288;
jcb(index,136) = 2*B_273;
jcb(index,137) = 0.5*B_378+ B_380+ B_382;
jcb(index,138) = 2*B_274+ 2*B_275+ 2*B_276+ B_277+ B_289+ B_312+ 0.5*B_379+ B_381+ B_383;
jcb(index,139) = B_485+ 0.5*B_487+ B_491+ 0.333333*B_499+ 0.5*B_503;
jcb(index,140) = 0;
jcb(index,141) = B_12;
jcb(index,142) = B_13;
jcb(index,143) = B_10;
jcb(index,144) = B_11+ B_16+ B_22;
jcb(index,145) = B_14+ B_17;
jcb(index,146) = B_15+ B_20;
jcb(index,147) = B_21+ B_23;
jcb(index,148) = 0;
jcb(index,149) = B_481+ 3*B_483+ B_490;
jcb(index,150) = B_93;
jcb(index,151) = B_100;
jcb(index,152) = B_79+ B_89+ B_91;
jcb(index,153) = 0.333333*B_498;
jcb(index,154) = 0.5*B_486+ 0.333333*B_496;
jcb(index,155) = 0.333333*B_497;
jcb(index,156) = B_80+ B_94;
jcb(index,157) = B_482;
jcb(index,158) = 2*B_49;
jcb(index,159) = 2*B_50+ B_90+ B_92+ B_101;
jcb(index,160) = B_68+ 2*B_422;
jcb(index,161) = B_69;
jcb(index,162) = 0.5*B_487+ B_491+ 0.333333*B_499;
jcb(index,163) = 0;
jcb(index,164) = B_41+ B_43;
jcb(index,165) = B_108;
jcb(index,166) = 2*B_4+ B_33+ B_42+ B_44+ B_109;
jcb(index,167) = 2*B_5+ 2*B_6;
jcb(index,168) = B_34;
jcb(index,169) = 2*B_7;
jcb(index,170) = 0;
jcb(index,171) = B_134;
jcb(index,172) = B_254+ B_258;
jcb(index,173) = B_180+ B_184;
jcb(index,174) = B_226;
jcb(index,175) = B_142;
jcb(index,176) = B_150;
jcb(index,177) = B_162;
jcb(index,178) = B_135+ B_181+ B_227+ B_255;
jcb(index,179) = B_118;
jcb(index,180) = B_126;
jcb(index,181) = B_119+ B_127+ B_143+ B_151+ B_163+ B_185+ B_259;
jcb(index,182) = 0;
jcb(index,183) = B_16;
jcb(index,184) = B_17;
jcb(index,185) = 0;
jcb(index,186) = B_62;
jcb(index,187) = B_63;
jcb(index,188) = B_476;
jcb(index,189) = B_474;
jcb(index,190) = 0;
jcb(index,191) = B_362+ B_471;
jcb(index,192) = B_363;
jcb(index,193) = B_476;
jcb(index,194) = 0;
jcb(index,195) = 4*B_313+ 4*B_462;
jcb(index,196) = 2*B_327+ 2*B_465;
jcb(index,197) = 3*B_329+ 3*B_464;
jcb(index,198) = 3*B_319+ 3*B_321+ 3*B_463;
jcb(index,199) = B_315+ B_317+ B_461;
jcb(index,200) = 4*B_314+ B_316+ 3*B_320+ 2*B_328+ 3*B_330;
jcb(index,201) = B_318+ 3*B_322;
jcb(index,202) = 0;
jcb(index,203) = B_116;
jcb(index,204) = B_117;
jcb(index,205) = B_469;
jcb(index,206) = 0;
jcb(index,207) = B_458;
jcb(index,208) = B_455;
jcb(index,209) = B_37+ B_47;
jcb(index,210) = B_418;
jcb(index,211) = 0.4*B_400;
jcb(index,212) = 0.333*B_426;
jcb(index,213) = B_70;
jcb(index,214) = B_188;
jcb(index,215) = B_204;
jcb(index,216) = B_245;
jcb(index,217) = B_345;
jcb(index,218) = B_72;
jcb(index,219) = B_222;
jcb(index,220) = B_262;
jcb(index,221) = B_232;
jcb(index,222) = B_394+ B_396+ B_407+ B_409;
jcb(index,223) = B_196;
jcb(index,224) = B_140;
jcb(index,225) = B_156+ B_158;
jcb(index,226) = B_28;
jcb(index,227) = B_116;
jcb(index,228) = B_71+ B_73+ B_346+ B_395+ B_397+ 0.4*B_401;
jcb(index,229) = B_284+ B_408;
jcb(index,230) = B_410;
jcb(index,231) = B_48+ B_62+ B_117+ B_141+ B_159+ B_189+ B_197+ B_205+ B_223+ B_233+ B_246+ B_263+ B_420;
jcb(index,232) = B_29+ B_63+ B_157+ B_285;
jcb(index,233) = 0;
jcb(index,234) = B_188;
jcb(index,235) = B_204;
jcb(index,236) = B_245;
jcb(index,237) = B_222;
jcb(index,238) = B_262;
jcb(index,239) = B_232;
jcb(index,240) = B_196;
jcb(index,241) = B_140;
jcb(index,242) = B_158;
jcb(index,243) = B_141+ B_159+ B_189+ B_197+ B_205+ B_223+ B_233+ B_246+ B_263;
jcb(index,244) = 0;
jcb(index,245) = 2*B_370+ 2*B_472;
jcb(index,246) = 3*B_368+ 3*B_473;
jcb(index,247) = B_390+ B_477;
jcb(index,248) = B_386+ B_478;
jcb(index,249) = 2*B_388+ 2*B_479;
jcb(index,250) = 3*B_369+ 2*B_371+ B_387+ 2*B_389+ B_391;
jcb(index,251) = 0;
jcb(index,252) = B_477;
jcb(index,253) = 2*B_478;
jcb(index,254) = B_479;
jcb(index,255) = - B_448;
jcb(index,256) = 0.8*B_247;
jcb(index,257) = 0.8*B_248;
jcb(index,258) = - B_279- B_454;
jcb(index,259) = B_278;
jcb(index,260) = - B_216;
jcb(index,261) = - B_217;
jcb(index,262) = - B_313- B_462;
jcb(index,263) = - B_314;
jcb(index,264) = - B_327- B_465;
jcb(index,265) = - B_328;
jcb(index,266) = - B_329- B_464;
jcb(index,267) = - B_330;
jcb(index,268) = - B_370- B_472;
jcb(index,269) = - B_371;
jcb(index,270) = - B_368- B_473;
jcb(index,271) = - B_369;
jcb(index,272) = - B_405;
jcb(index,273) = B_403;
jcb(index,274) = B_404;
jcb(index,275) = - B_406;
jcb(index,276) = - B_77;
jcb(index,277) = - B_78;
jcb(index,278) = - B_132;
jcb(index,279) = - B_133;
jcb(index,280) = - B_178;
jcb(index,281) = - B_179;
jcb(index,282) = - B_458;
jcb(index,283) = B_490;
jcb(index,284) = B_491;
jcb(index,285) = - B_455;
jcb(index,286) = B_378;
jcb(index,287) = B_277+ B_379;
jcb(index,288) = - B_390- B_477;
jcb(index,289) = - B_391;
jcb(index,290) = - B_362- B_471;
jcb(index,291) = - B_363;
jcb(index,292) = - B_386- B_478;
jcb(index,293) = - B_387;
jcb(index,294) = - B_388- B_479;
jcb(index,295) = - B_389;
jcb(index,296) = - B_392;
jcb(index,297) = 0.6*B_400;
jcb(index,298) = B_402;
jcb(index,299) = - B_393+ 0.6*B_401;
jcb(index,300) = - B_319- B_321- B_463;
jcb(index,301) = - B_320;
jcb(index,302) = - B_322;
jcb(index,303) = - B_173- B_435;
jcb(index,304) = B_269;
jcb(index,305) = - B_174+ B_270;
jcb(index,306) = - B_37- B_47- B_53;
jcb(index,307) = - B_48+ B_420;
jcb(index,308) = - B_54;
jcb(index,309) = B_53;
jcb(index,310) = - B_41- B_43- B_418;
jcb(index,311) = B_89;
jcb(index,312) = - B_42- B_44;
jcb(index,313) = 0;
jcb(index,314) = B_54+ B_90;
jcb(index,315) = - B_104;
jcb(index,316) = B_98;
jcb(index,317) = B_99;
jcb(index,318) = - B_105;
jcb(index,319) = - B_214- B_442;
jcb(index,320) = 0.04*B_188;
jcb(index,321) = - B_215;
jcb(index,322) = 0.04*B_189;
jcb(index,323) = - B_171- B_434;
jcb(index,324) = B_154;
jcb(index,325) = - B_172;
jcb(index,326) = B_155;
jcb(index,327) = - B_251- B_253- B_450;
jcb(index,328) = B_234;
jcb(index,329) = - B_252;
jcb(index,330) = B_235;
jcb(index,331) = - B_400;
jcb(index,332) = B_396+ B_411;
jcb(index,333) = B_397- B_401;
jcb(index,334) = B_412;
jcb(index,335) = - B_267- B_451;
jcb(index,336) = B_260;
jcb(index,337) = - B_268;
jcb(index,338) = B_261;
jcb(index,339) = - B_198;
jcb(index,340) = B_194;
jcb(index,341) = - B_199;
jcb(index,342) = B_195;
jcb(index,343) = - B_247- B_447;
jcb(index,344) = B_243;
jcb(index,345) = - B_248;
jcb(index,346) = B_244;
jcb(index,347) = - B_192- B_437;
jcb(index,348) = B_186;
jcb(index,349) = - B_193;
jcb(index,350) = B_187;
jcb(index,351) = B_104;
jcb(index,352) = - B_98- B_102;
jcb(index,353) = B_95;
jcb(index,354) = - B_99;
jcb(index,355) = - B_103+ B_105;
jcb(index,356) = - B_146- B_432;
jcb(index,357) = B_138;
jcb(index,358) = - B_147;
jcb(index,359) = B_139;
jcb(index,360) = - B_208- B_441;
jcb(index,361) = B_202;
jcb(index,362) = - B_209;
jcb(index,363) = B_203;
jcb(index,364) = - B_74- B_75- B_426;
jcb(index,365) = - B_76;
jcb(index,366) = B_66;
jcb(index,367) = B_67;
jcb(index,368) = - B_152;
jcb(index,369) = 0.18*B_168;
jcb(index,370) = B_156+ B_166+ 0.18*B_169;
jcb(index,371) = B_167;
jcb(index,372) = - B_153;
jcb(index,373) = B_157;
jcb(index,374) = - 0.9*B_315- B_317- B_461;
jcb(index,375) = - 0.9*B_316;
jcb(index,376) = - B_318;
jcb(index,377) = - B_70- B_424;
jcb(index,378) = B_100;
jcb(index,379) = B_60- B_71;
jcb(index,380) = B_61;
jcb(index,381) = B_101;
jcb(index,382) = - B_175- B_177- B_436;
jcb(index,383) = B_160;
jcb(index,384) = - B_176;
jcb(index,385) = B_161;
jcb(index,386) = - B_130;
jcb(index,387) = 0.23125*B_134;
jcb(index,388) = 0.28*B_254;
jcb(index,389) = 0.22*B_180;
jcb(index,390) = 0.45*B_226;
jcb(index,391) = 0.23125*B_135+ 0.22*B_181+ 0.45*B_227+ 0.28*B_255;
jcb(index,392) = - B_131;
jcb(index,393) = - B_224- B_443;
jcb(index,394) = B_220;
jcb(index,395) = - B_225;
jcb(index,396) = B_221;
jcb(index,397) = - B_374- B_453;
jcb(index,398) = B_384;
jcb(index,399) = B_484;
jcb(index,400) = B_303+ B_486;
jcb(index,401) = B_304+ B_385;
jcb(index,402) = - B_375;
jcb(index,403) = B_275;
jcb(index,404) = B_485+ B_487;
jcb(index,405) = - B_402- B_403;
jcb(index,406) = B_394+ B_398+ B_407+ B_409;
jcb(index,407) = - B_404;
jcb(index,408) = B_395;
jcb(index,409) = B_408;
jcb(index,410) = B_410;
jcb(index,411) = B_399;
jcb(index,412) = - B_239- B_445;
jcb(index,413) = B_230;
jcb(index,414) = - B_240;
jcb(index,415) = B_231;
jcb(index,416) = - B_59- B_423- B_481- B_483- B_490;
jcb(index,417) = - B_482;
jcb(index,418) = B_57;
jcb(index,419) = B_58;
jcb(index,420) = - B_491;
jcb(index,421) = - B_93- B_95;
jcb(index,422) = B_79+ B_81+ B_91;
jcb(index,423) = B_80- B_94;
jcb(index,424) = B_92;
jcb(index,425) = B_82;
jcb(index,426) = 0.85*B_224+ 0.67*B_443;
jcb(index,427) = - B_241- B_446;
jcb(index,428) = 0.88*B_218+ 0.56*B_222;
jcb(index,429) = B_249+ 0.67*B_449;
jcb(index,430) = 0.88*B_219;
jcb(index,431) = 0.85*B_225- B_242+ B_250;
jcb(index,432) = 0.56*B_223;
jcb(index,433) = 0;
jcb(index,434) = B_214+ B_442;
jcb(index,435) = 0.7*B_192+ B_437;
jcb(index,436) = - B_200- B_438;
jcb(index,437) = 0.96*B_188+ B_190;
jcb(index,438) = B_191;
jcb(index,439) = 0.7*B_193- B_201+ B_215;
jcb(index,440) = 0.96*B_189;
jcb(index,441) = 0;
jcb(index,442) = - B_98+ B_102;
jcb(index,443) = 0;
jcb(index,444) = - B_96- B_99- B_100- B_106;
jcb(index,445) = B_83;
jcb(index,446) = 0;
jcb(index,447) = - B_97+ B_103;
jcb(index,448) = - B_101;
jcb(index,449) = B_84;
jcb(index,450) = - B_35- B_286- B_417;
jcb(index,451) = 0.13875*B_134;
jcb(index,452) = 0.09*B_254;
jcb(index,453) = 0.13875*B_135+ 0.09*B_255;
jcb(index,454) = - B_36;
jcb(index,455) = - B_287;
jcb(index,456) = B_32;
jcb(index,457) = - B_112;
jcb(index,458) = 0.2*B_190;
jcb(index,459) = 0.5*B_206;
jcb(index,460) = 0.18*B_218;
jcb(index,461) = 0.03*B_180;
jcb(index,462) = 0.25*B_264;
jcb(index,463) = 0.25*B_236;
jcb(index,464) = 0.25*B_144;
jcb(index,465) = 0.03*B_181;
jcb(index,466) = B_121+ 0.25*B_145+ 0.2*B_191+ 0.5*B_207+ 0.18*B_219+ 0.25*B_237+ 0.25*B_265;
jcb(index,467) = - B_113;
jcb(index,468) = B_374;
jcb(index,469) = - B_372- B_384- B_475;
jcb(index,470) = B_376;
jcb(index,471) = B_498;
jcb(index,472) = B_500;
jcb(index,473) = B_496;
jcb(index,474) = B_502;
jcb(index,475) = B_497+ B_501;
jcb(index,476) = B_377- B_385;
jcb(index,477) = - B_373+ B_375;
jcb(index,478) = B_382;
jcb(index,479) = B_383;
jcb(index,480) = B_499+ B_503;
jcb(index,481) = - B_269- B_452;
jcb(index,482) = B_258;
jcb(index,483) = 0.044*B_262;
jcb(index,484) = - B_270;
jcb(index,485) = 0.044*B_263;
jcb(index,486) = B_259;
jcb(index,487) = B_77;
jcb(index,488) = B_93;
jcb(index,489) = - B_79- B_81- B_83- B_85- B_87- B_89- B_91;
jcb(index,490) = - B_80+ B_94;
jcb(index,491) = B_78;
jcb(index,492) = - B_86- B_88;
jcb(index,493) = - B_90- B_92;
jcb(index,494) = - B_82- B_84;
jcb(index,495) = 0.82*B_178;
jcb(index,496) = 0.3*B_192;
jcb(index,497) = - B_186- B_188- B_190;
jcb(index,498) = - B_191;
jcb(index,499) = 0.82*B_179+ 0.3*B_193;
jcb(index,500) = - B_189;
jcb(index,501) = - B_187;
jcb(index,502) = 0.3*B_208;
jcb(index,503) = B_200;
jcb(index,504) = 0;
jcb(index,505) = - B_202- B_204- B_206;
jcb(index,506) = - B_207;
jcb(index,507) = B_201+ 0.3*B_209;
jcb(index,508) = - B_205;
jcb(index,509) = - B_203;
jcb(index,510) = B_173+ B_435;
jcb(index,511) = B_175;
jcb(index,512) = 0.25*B_445;
jcb(index,513) = 0;
jcb(index,514) = - B_128;
jcb(index,515) = B_212+ B_440;
jcb(index,516) = B_431;
jcb(index,517) = 0.63*B_134;
jcb(index,518) = 0.14*B_254;
jcb(index,519) = 0.31*B_180;
jcb(index,520) = 0;
jcb(index,521) = 0.22*B_226+ B_444;
jcb(index,522) = 0.25*B_232+ 0.125*B_236+ 0.5*B_238;
jcb(index,523) = B_433;
jcb(index,524) = 0;
jcb(index,525) = 0.63*B_135+ 0.31*B_181+ 0.22*B_227+ 0.14*B_255;
jcb(index,526) = 0.125*B_237;
jcb(index,527) = B_124- B_129+ B_174+ B_176+ B_213;
jcb(index,528) = B_307;
jcb(index,529) = B_354;
jcb(index,530) = B_125+ B_126+ B_308+ B_355+ B_428+ B_429;
jcb(index,531) = 0.25*B_233;
jcb(index,532) = 0;
jcb(index,533) = B_127;
jcb(index,534) = 0;
jcb(index,535) = 0.7*B_208;
jcb(index,536) = 0.5*B_445;
jcb(index,537) = 0.5*B_206;
jcb(index,538) = - B_212- B_440;
jcb(index,539) = 0.04*B_180;
jcb(index,540) = B_210;
jcb(index,541) = 0.25*B_264;
jcb(index,542) = 0.9*B_226;
jcb(index,543) = 0.5*B_232+ 0.5*B_236+ B_238;
jcb(index,544) = 0.04*B_181+ 0.9*B_227;
jcb(index,545) = 0.5*B_207+ 0.5*B_237+ 0.25*B_265;
jcb(index,546) = 0.7*B_209+ B_211- B_213;
jcb(index,547) = 0.5*B_233;
jcb(index,548) = 0;
jcb(index,549) = - B_12- B_18- B_280;
jcb(index,550) = 0.05*B_108+ 0.69*B_431;
jcb(index,551) = - B_13+ 0.05*B_109;
jcb(index,552) = B_26;
jcb(index,553) = - B_19;
jcb(index,554) = - B_281;
jcb(index,555) = B_428;
jcb(index,556) = B_27;
jcb(index,557) = - B_108- B_110- B_305- B_431;
jcb(index,558) = 0.06*B_180;
jcb(index,559) = - B_109;
jcb(index,560) = 0.06*B_181;
jcb(index,561) = - B_111;
jcb(index,562) = - B_306;
jcb(index,563) = 0.2*B_247;
jcb(index,564) = B_241;
jcb(index,565) = - B_243- B_245;
jcb(index,566) = 0;
jcb(index,567) = 0;
jcb(index,568) = 0;
jcb(index,569) = B_242+ 0.2*B_248;
jcb(index,570) = - B_246;
jcb(index,571) = - B_244;
jcb(index,572) = B_372;
jcb(index,573) = - B_345- B_376- B_466;
jcb(index,574) = B_347;
jcb(index,575) = 0;
jcb(index,576) = 0;
jcb(index,577) = B_492;
jcb(index,578) = B_493;
jcb(index,579) = - B_346;
jcb(index,580) = - B_377;
jcb(index,581) = B_348+ B_373;
jcb(index,582) = B_336;
jcb(index,583) = 0;
jcb(index,584) = 0;
jcb(index,585) = 2*B_481+ B_490;
jcb(index,586) = - B_72- B_425;
jcb(index,587) = B_494+ B_498;
jcb(index,588) = B_398;
jcb(index,589) = B_486+ B_488+ B_496;
jcb(index,590) = B_150;
jcb(index,591) = B_497;
jcb(index,592) = B_64- B_73;
jcb(index,593) = 2*B_482+ B_489+ B_495;
jcb(index,594) = B_126;
jcb(index,595) = B_65;
jcb(index,596) = B_127+ B_151+ B_399;
jcb(index,597) = B_487+ B_491+ B_499;
jcb(index,598) = B_216;
jcb(index,599) = 0.15*B_224;
jcb(index,600) = - B_218- B_220- B_222;
jcb(index,601) = - B_219;
jcb(index,602) = B_217+ 0.15*B_225;
jcb(index,603) = - B_223;
jcb(index,604) = - B_221;
jcb(index,605) = - B_134- B_136- B_323- B_364;
jcb(index,606) = - B_135;
jcb(index,607) = - B_137;
jcb(index,608) = - B_324;
jcb(index,609) = - B_365;
jcb(index,610) = - B_122- B_309- B_356- B_427;
jcb(index,611) = B_114;
jcb(index,612) = - B_123;
jcb(index,613) = - B_310;
jcb(index,614) = - B_357;
jcb(index,615) = B_115;
jcb(index,616) = - B_347- B_353- B_470- B_494- B_498;
jcb(index,617) = - B_495;
jcb(index,618) = - B_348;
jcb(index,619) = B_351;
jcb(index,620) = B_352;
jcb(index,621) = - B_499;
jcb(index,622) = - B_254- B_256- B_258;
jcb(index,623) = - B_255;
jcb(index,624) = - B_257;
jcb(index,625) = - B_259;
jcb(index,626) = - B_180- B_182- B_184;
jcb(index,627) = - B_181;
jcb(index,628) = - B_183;
jcb(index,629) = - B_185;
jcb(index,630) = B_251+ B_450;
jcb(index,631) = 0.5*B_198;
jcb(index,632) = 0.25*B_445;
jcb(index,633) = B_269;
jcb(index,634) = 0.2*B_206;
jcb(index,635) = 0;
jcb(index,636) = - B_210- B_439;
jcb(index,637) = 0.25*B_264;
jcb(index,638) = 0.25*B_232+ 0.375*B_236+ B_238;
jcb(index,639) = 0;
jcb(index,640) = 0;
jcb(index,641) = 0.2*B_207+ 0.375*B_237+ 0.25*B_265;
jcb(index,642) = 0.5*B_199- B_211+ B_252+ B_270;
jcb(index,643) = 0.25*B_233;
jcb(index,644) = 0;
jcb(index,645) = 0;
jcb(index,646) = 0;
jcb(index,647) = B_256;
jcb(index,648) = - B_260- B_262- B_264- 2*B_266;
jcb(index,649) = 0;
jcb(index,650) = - B_265;
jcb(index,651) = B_257;
jcb(index,652) = - B_263;
jcb(index,653) = 0;
jcb(index,654) = - B_261;
jcb(index,655) = B_267+ B_451;
jcb(index,656) = B_452;
jcb(index,657) = 0.65*B_254;
jcb(index,658) = 0.956*B_262+ 0.5*B_264+ 2*B_266;
jcb(index,659) = - B_226- B_228- B_444;
jcb(index,660) = - B_227+ 0.65*B_255;
jcb(index,661) = 0.5*B_265;
jcb(index,662) = - B_229+ B_268;
jcb(index,663) = 0.956*B_263;
jcb(index,664) = 0;
jcb(index,665) = 0;
jcb(index,666) = 0.015*B_245;
jcb(index,667) = 0.16*B_222;
jcb(index,668) = B_184;
jcb(index,669) = - B_249- B_449;
jcb(index,670) = 0.02*B_196;
jcb(index,671) = 0;
jcb(index,672) = 0;
jcb(index,673) = - B_250;
jcb(index,674) = 0.02*B_197+ 0.16*B_223+ 0.015*B_246;
jcb(index,675) = B_185;
jcb(index,676) = 0;
jcb(index,677) = - B_294- B_457- B_484- B_500;
jcb(index,678) = B_488;
jcb(index,679) = - B_501;
jcb(index,680) = - B_295;
jcb(index,681) = B_489;
jcb(index,682) = B_290;
jcb(index,683) = B_291;
jcb(index,684) = - B_485;
jcb(index,685) = B_253;
jcb(index,686) = B_239;
jcb(index,687) = 0.1*B_254;
jcb(index,688) = B_228;
jcb(index,689) = - B_230- B_232- B_234- B_236- 2*B_238;
jcb(index,690) = 0.1*B_255;
jcb(index,691) = - B_237;
jcb(index,692) = B_229+ B_240;
jcb(index,693) = - B_233;
jcb(index,694) = - B_235;
jcb(index,695) = 0;
jcb(index,696) = - B_231;
jcb(index,697) = - B_394- B_396- B_398- B_407- B_409- B_411;
jcb(index,698) = - B_395- B_397;
jcb(index,699) = - B_408;
jcb(index,700) = - B_410;
jcb(index,701) = - B_412;
jcb(index,702) = - B_399;
jcb(index,703) = 0.5*B_198;
jcb(index,704) = 0.666667*B_136+ 0.666667*B_323+ 0.666667*B_364;
jcb(index,705) = B_182;
jcb(index,706) = - B_194- B_196;
jcb(index,707) = 0;
jcb(index,708) = 0.666667*B_137+ B_183+ 0.5*B_199;
jcb(index,709) = 0.666667*B_324;
jcb(index,710) = 0.666667*B_365;
jcb(index,711) = - B_197;
jcb(index,712) = 0;
jcb(index,713) = - B_195;
jcb(index,714) = - B_300- B_301- B_303- B_459- B_460- B_486- B_488- B_496;
jcb(index,715) = - B_497;
jcb(index,716) = - B_304;
jcb(index,717) = - B_489;
jcb(index,718) = - B_302;
jcb(index,719) = B_298;
jcb(index,720) = B_299;
jcb(index,721) = - B_487;
jcb(index,722) = B_132;
jcb(index,723) = 0.18*B_178;
jcb(index,724) = 0.3*B_146;
jcb(index,725) = 0.33*B_443;
jcb(index,726) = B_446;
jcb(index,727) = 0.12*B_218+ 0.28*B_222;
jcb(index,728) = 0.06*B_180;
jcb(index,729) = 0.33*B_449;
jcb(index,730) = 0;
jcb(index,731) = - B_138- B_140- B_142- B_144- B_168;
jcb(index,732) = - B_169;
jcb(index,733) = 0.06*B_181;
jcb(index,734) = - B_145+ 0.12*B_219;
jcb(index,735) = B_133+ 0.3*B_147+ 0.18*B_179;
jcb(index,736) = 0;
jcb(index,737) = 0;
jcb(index,738) = - B_141+ 0.28*B_223;
jcb(index,739) = - B_143;
jcb(index,740) = - B_139;
jcb(index,741) = B_345;
jcb(index,742) = B_494;
jcb(index,743) = 0;
jcb(index,744) = 0;
jcb(index,745) = - B_343- B_468- B_492- B_502;
jcb(index,746) = - B_493;
jcb(index,747) = B_358;
jcb(index,748) = B_346;
jcb(index,749) = 0;
jcb(index,750) = B_495;
jcb(index,751) = 0;
jcb(index,752) = - B_344;
jcb(index,753) = B_339+ B_359;
jcb(index,754) = 0;
jcb(index,755) = 0;
jcb(index,756) = B_340;
jcb(index,757) = - B_503;
jcb(index,758) = B_447;
jcb(index,759) = 0.7*B_146+ B_432;
jcb(index,760) = 0.33*B_443;
jcb(index,761) = 0.985*B_245;
jcb(index,762) = 0.12*B_218+ 0.28*B_222;
jcb(index,763) = 0.47*B_180;
jcb(index,764) = 0.33*B_449;
jcb(index,765) = 0.98*B_196;
jcb(index,766) = B_140+ B_142+ 0.75*B_144+ B_168;
jcb(index,767) = - B_148- B_150- B_325- B_366- B_433;
jcb(index,768) = B_169;
jcb(index,769) = 0.47*B_181;
jcb(index,770) = 0.75*B_145+ 0.12*B_219;
jcb(index,771) = 0.7*B_147- B_149;
jcb(index,772) = - B_326;
jcb(index,773) = - B_367;
jcb(index,774) = B_141+ 0.98*B_197+ 0.28*B_223+ 0.985*B_246;
jcb(index,775) = B_143- B_151;
jcb(index,776) = 0;
jcb(index,777) = - B_313;
jcb(index,778) = - B_327;
jcb(index,779) = - B_329;
jcb(index,780) = - B_319;
jcb(index,781) = - B_41- B_43+ B_418;
jcb(index,782) = - B_315;
jcb(index,783) = 0;
jcb(index,784) = - B_12;
jcb(index,785) = - B_108;
jcb(index,786) = 0;
jcb(index,787) = - B_0- B_4- B_13- B_33- B_39- B_42- B_44- B_109- B_314- B_316- B_320- B_328- B_330;
jcb(index,788) = 0;
jcb(index,789) = - B_5+ B_414;
jcb(index,790) = 0;
jcb(index,791) = 0;
jcb(index,792) = - B_34;
jcb(index,793) = 0;
jcb(index,794) = 0;
jcb(index,795) = 0;
jcb(index,796) = 0;
jcb(index,797) = 0;
jcb(index,798) = 2*B_448;
jcb(index,799) = B_171;
jcb(index,800) = B_447;
jcb(index,801) = B_441;
jcb(index,802) = B_177+ B_436;
jcb(index,803) = 0.25*B_445;
jcb(index,804) = B_446;
jcb(index,805) = B_438;
jcb(index,806) = 0;
jcb(index,807) = B_204+ 0.3*B_206;
jcb(index,808) = B_212+ B_440;
jcb(index,809) = 0.985*B_245;
jcb(index,810) = 0;
jcb(index,811) = 0.1*B_254;
jcb(index,812) = 0.23*B_180;
jcb(index,813) = B_439;
jcb(index,814) = 0;
jcb(index,815) = 0.1*B_226+ B_444;
jcb(index,816) = 0;
jcb(index,817) = 0.25*B_232+ 0.125*B_236;
jcb(index,818) = 0;
jcb(index,819) = - B_168;
jcb(index,820) = B_148+ B_150+ B_325+ B_366;
jcb(index,821) = - B_154- B_156- B_158- B_160- B_162- B_164- B_166- B_169- 2*B_170;
jcb(index,822) = 0.23*B_181+ 0.1*B_227+ 0.1*B_255;
jcb(index,823) = - B_165- B_167+ 0.3*B_207+ 0.125*B_237;
jcb(index,824) = B_149+ B_172+ B_213;
jcb(index,825) = B_326;
jcb(index,826) = B_367;
jcb(index,827) = - B_159+ B_205+ 0.25*B_233+ 0.985*B_246;
jcb(index,828) = - B_161;
jcb(index,829) = B_151- B_163;
jcb(index,830) = - B_155- B_157;
jcb(index,831) = 0.09*B_315;
jcb(index,832) = B_128;
jcb(index,833) = 0;
jcb(index,834) = B_12+ B_18+ B_280;
jcb(index,835) = 0.4*B_108+ 0.31*B_431;
jcb(index,836) = 0;
jcb(index,837) = 0;
jcb(index,838) = 0;
jcb(index,839) = 0;
jcb(index,840) = 0;
jcb(index,841) = 0;
jcb(index,842) = 0;
jcb(index,843) = 0;
jcb(index,844) = 0;
jcb(index,845) = B_13+ 0.4*B_109+ 0.09*B_316;
jcb(index,846) = 0;
jcb(index,847) = - B_8- B_10- B_24- B_26- B_28;
jcb(index,848) = - B_11;
jcb(index,849) = 0;
jcb(index,850) = B_14+ B_19+ B_129;
jcb(index,851) = B_281;
jcb(index,852) = B_416;
jcb(index,853) = 0;
jcb(index,854) = B_429;
jcb(index,855) = B_15;
jcb(index,856) = 0;
jcb(index,857) = 0;
jcb(index,858) = 0;
jcb(index,859) = - B_25- B_27- B_29;
jcb(index,860) = B_456;
jcb(index,861) = B_364;
jcb(index,862) = B_356;
jcb(index,863) = - B_500;
jcb(index,864) = B_409;
jcb(index,865) = - B_496;
jcb(index,866) = - B_492;
jcb(index,867) = B_366;
jcb(index,868) = 0;
jcb(index,869) = - B_341- B_493- B_497- B_501;
jcb(index,870) = 0;
jcb(index,871) = 0;
jcb(index,872) = - B_342;
jcb(index,873) = 0;
jcb(index,874) = 0;
jcb(index,875) = B_337+ B_354+ B_357+ B_365+ B_367+ B_410;
jcb(index,876) = B_355;
jcb(index,877) = 0;
jcb(index,878) = 0;
jcb(index,879) = 0;
jcb(index,880) = 0;
jcb(index,881) = 0;
jcb(index,882) = 0;
jcb(index,883) = B_338;
jcb(index,884) = 0;
jcb(index,885) = - B_403;
jcb(index,886) = - B_93;
jcb(index,887) = - B_79;
jcb(index,888) = - B_134;
jcb(index,889) = - B_254;
jcb(index,890) = - B_180;
jcb(index,891) = - B_226;
jcb(index,892) = 0;
jcb(index,893) = - B_4;
jcb(index,894) = B_156;
jcb(index,895) = - B_10;
jcb(index,896) = - B_5- B_6- B_11- B_16- B_22- B_45- B_51- B_80- B_94- B_135- B_181- B_227- B_255- B_271- B_331- B_404 - B_414- B_415;
jcb(index,897) = 0;
jcb(index,898) = - B_17;
jcb(index,899) = - B_272;
jcb(index,900) = 0;
jcb(index,901) = - B_332;
jcb(index,902) = 0;
jcb(index,903) = B_2- B_7;
jcb(index,904) = 0;
jcb(index,905) = - B_46;
jcb(index,906) = - B_52;
jcb(index,907) = 0;
jcb(index,908) = - B_23+ B_157;
jcb(index,909) = 0;
jcb(index,910) = B_480;
jcb(index,911) = B_471;
jcb(index,912) = B_434;
jcb(index,913) = 0.6*B_400;
jcb(index,914) = B_152;
jcb(index,915) = B_461;
jcb(index,916) = B_402;
jcb(index,917) = B_438;
jcb(index,918) = - B_190;
jcb(index,919) = - B_206;
jcb(index,920) = 0.75*B_108+ B_110+ B_305;
jcb(index,921) = - B_218;
jcb(index,922) = 0.7*B_122+ B_356;
jcb(index,923) = 0.08*B_254;
jcb(index,924) = 0.07*B_180;
jcb(index,925) = - B_264;
jcb(index,926) = - B_236;
jcb(index,927) = 0;
jcb(index,928) = - B_144+ 0.82*B_168;
jcb(index,929) = B_433;
jcb(index,930) = 0.75*B_109;
jcb(index,931) = B_158+ B_162- B_166+ 0.82*B_169+ 2*B_170;
jcb(index,932) = 0;
jcb(index,933) = 0.07*B_181+ 0.08*B_255;
jcb(index,934) = - B_114- B_116- B_118- 2*B_120- 2*B_121- B_145- B_167- B_191- B_207- B_219- B_237- B_265- B_311- B_358 - B_360;
jcb(index,935) = B_111+ 0.7*B_123+ B_153+ 0.6*B_401;
jcb(index,936) = B_306;
jcb(index,937) = 0;
jcb(index,938) = B_357;
jcb(index,939) = 0;
jcb(index,940) = 0;
jcb(index,941) = - B_359- B_361;
jcb(index,942) = - B_117+ B_159;
jcb(index,943) = - B_312;
jcb(index,944) = 0;
jcb(index,945) = - B_119+ B_163;
jcb(index,946) = - B_115;
jcb(index,947) = 0;
jcb(index,948) = - B_216;
jcb(index,949) = - B_370;
jcb(index,950) = - B_368;
jcb(index,951) = - B_77;
jcb(index,952) = - B_132;
jcb(index,953) = - B_178;
jcb(index,954) = - B_390;
jcb(index,955) = - B_362;
jcb(index,956) = - B_386;
jcb(index,957) = - B_388;
jcb(index,958) = - B_392;
jcb(index,959) = B_319- B_321;
jcb(index,960) = - B_173;
jcb(index,961) = - B_104;
jcb(index,962) = - B_214;
jcb(index,963) = - B_171+ B_434;
jcb(index,964) = - B_251;
jcb(index,965) = - B_400;
jcb(index,966) = B_451;
jcb(index,967) = - 0.5*B_198;
jcb(index,968) = - 0.2*B_247+ B_447;
jcb(index,969) = - 0.3*B_192+ B_437;
jcb(index,970) = - B_102;
jcb(index,971) = - 0.3*B_146+ B_432;
jcb(index,972) = - 0.3*B_208+ B_441;
jcb(index,973) = - B_75+ 0.333*B_426;
jcb(index,974) = - B_152;
jcb(index,975) = - B_317;
jcb(index,976) = - B_70+ B_424;
jcb(index,977) = - B_175;
jcb(index,978) = - B_130;
jcb(index,979) = - 0.15*B_224+ B_443;
jcb(index,980) = 0;
jcb(index,981) = - B_239+ B_445;
jcb(index,982) = 0;
jcb(index,983) = - B_241;
jcb(index,984) = - B_200;
jcb(index,985) = - B_96;
jcb(index,986) = - B_35+ 2*B_417;
jcb(index,987) = - B_112;
jcb(index,988) = - B_269;
jcb(index,989) = B_81+ B_85;
jcb(index,990) = 0;
jcb(index,991) = 0;
jcb(index,992) = - B_128;
jcb(index,993) = - B_212;
jcb(index,994) = B_12- B_18;
jcb(index,995) = 0.75*B_108- B_110;
jcb(index,996) = 0;
jcb(index,997) = - B_345;
jcb(index,998) = - B_72+ B_425;
jcb(index,999) = 0;
jcb(index,1000) = 0.13*B_134- B_136;
jcb(index,1001) = - 0.7*B_122+ B_309+ B_427;
jcb(index,1002) = 0;
jcb(index,1003) = 0.25*B_254- B_256;
jcb(index,1004) = 0.33*B_180- B_182;
jcb(index,1005) = - B_210;
jcb(index,1006) = 0;
jcb(index,1007) = 0.19*B_226- B_228;
jcb(index,1008) = - B_249;
jcb(index,1009) = - B_294+ B_457;
jcb(index,1010) = 0;
jcb(index,1011) = - B_394- B_396;
jcb(index,1012) = 0;
jcb(index,1013) = 0;
jcb(index,1014) = 0;
jcb(index,1015) = B_343+ B_468;
jcb(index,1016) = - B_148;
jcb(index,1017) = B_13+ 2*B_33+ 0.75*B_109+ B_320;
jcb(index,1018) = 0;
jcb(index,1019) = B_10+ 2*B_24;
jcb(index,1020) = - B_341;
jcb(index,1021) = B_11- B_16+ B_22+ 0.13*B_135+ 0.33*B_181+ 0.19*B_227+ 0.25*B_255;
jcb(index,1022) = 0;
jcb(index,1023) = - B_14- B_17- B_19- B_30- B_36- B_60- B_64- B_71- B_73- B_76- B_78- B_97- B_103- B_105- B_111- B_113- 0.7 *B_123- B_124- B_129- B_131- B_133- B_137- 0.3*B_147- B_149- B_153- B_172- B_174- B_176- B_179- B_183- 0.3 *B_193- 0.5*B_199- B_201- 0.3*B_209- B_211- B_213- B_215- B_217- 0.15*B_225- B_229- B_240- B_242- 0.2 *B_248- B_250- B_252- B_257- B_270- B_288- B_292- B_295- B_318- B_322- B_342- B_346- B_363- B_369- B_371 - B_387- B_389- B_391- B_393- B_395- B_397- B_401;
jcb(index,1024) = B_284+ B_310;
jcb(index,1025) = 2*B_34+ B_416;
jcb(index,1026) = 0;
jcb(index,1027) = - B_125;
jcb(index,1028) = - B_15+ B_20+ B_344;
jcb(index,1029) = 0;
jcb(index,1030) = - B_61+ B_62+ B_86;
jcb(index,1031) = - B_289;
jcb(index,1032) = - B_65;
jcb(index,1033) = B_68;
jcb(index,1034) = B_21+ B_23+ 2*B_25- B_31+ B_63+ B_69+ B_82+ B_285;
jcb(index,1035) = - B_293;
jcb(index,1036) = B_476;
jcb(index,1037) = 2*B_454;
jcb(index,1038) = 3*B_313+ 4*B_462;
jcb(index,1039) = B_327+ B_465;
jcb(index,1040) = 2*B_329+ B_464;
jcb(index,1041) = B_458;
jcb(index,1042) = B_477;
jcb(index,1043) = 2*B_478;
jcb(index,1044) = B_479;
jcb(index,1045) = 3*B_319+ 3*B_321+ 3*B_463;
jcb(index,1046) = 0.35*B_315+ B_317+ B_461;
jcb(index,1047) = B_374+ 2*B_453;
jcb(index,1048) = 0;
jcb(index,1049) = - B_286;
jcb(index,1050) = B_372- B_384+ B_475;
jcb(index,1051) = - B_280;
jcb(index,1052) = - B_305;
jcb(index,1053) = - B_376;
jcb(index,1054) = - B_323;
jcb(index,1055) = - B_309;
jcb(index,1056) = 0;
jcb(index,1057) = 0;
jcb(index,1058) = 0;
jcb(index,1059) = B_457;
jcb(index,1060) = - B_407;
jcb(index,1061) = - B_303+ B_459;
jcb(index,1062) = 0;
jcb(index,1063) = - B_325;
jcb(index,1064) = 3*B_314+ 0.35*B_316+ 3*B_320+ B_328+ 2*B_330;
jcb(index,1065) = 0;
jcb(index,1066) = 0;
jcb(index,1067) = 0;
jcb(index,1068) = - B_271;
jcb(index,1069) = B_311;
jcb(index,1070) = 0.94*B_288+ B_292+ B_318+ 3*B_322;
jcb(index,1071) = - B_272- B_281- B_282- B_284- B_287- B_304- B_306- B_307- B_310- B_324- B_326- B_377- B_385- B_408;
jcb(index,1072) = 0;
jcb(index,1073) = B_373+ B_375;
jcb(index,1074) = - B_308;
jcb(index,1075) = B_273;
jcb(index,1076) = B_380;
jcb(index,1077) = B_296;
jcb(index,1078) = B_274+ 2*B_276+ B_277+ 0.94*B_289+ B_297+ B_312+ B_381;
jcb(index,1079) = 0;
jcb(index,1080) = 0;
jcb(index,1081) = - B_283- B_285;
jcb(index,1082) = B_293+ B_456;
jcb(index,1083) = B_216;
jcb(index,1084) = B_370;
jcb(index,1085) = B_368;
jcb(index,1086) = B_77;
jcb(index,1087) = B_132;
jcb(index,1088) = B_178;
jcb(index,1089) = B_390;
jcb(index,1090) = B_362;
jcb(index,1091) = B_386;
jcb(index,1092) = B_388;
jcb(index,1093) = B_321;
jcb(index,1094) = B_104;
jcb(index,1095) = B_171;
jcb(index,1096) = B_198;
jcb(index,1097) = B_102;
jcb(index,1098) = B_75;
jcb(index,1099) = B_152;
jcb(index,1100) = B_317;
jcb(index,1101) = B_70;
jcb(index,1102) = B_175;
jcb(index,1103) = B_130;
jcb(index,1104) = 0.85*B_224;
jcb(index,1105) = - B_481;
jcb(index,1106) = 0;
jcb(index,1107) = B_200;
jcb(index,1108) = B_96;
jcb(index,1109) = B_35;
jcb(index,1110) = B_83+ B_87+ B_89;
jcb(index,1111) = 0;
jcb(index,1112) = B_18;
jcb(index,1113) = B_110+ 1.155*B_431;
jcb(index,1114) = B_72;
jcb(index,1115) = 0;
jcb(index,1116) = 0;
jcb(index,1117) = B_122;
jcb(index,1118) = - B_494;
jcb(index,1119) = 0;
jcb(index,1120) = 0;
jcb(index,1121) = 0;
jcb(index,1122) = B_249;
jcb(index,1123) = B_294+ B_484+ B_500;
jcb(index,1124) = 0;
jcb(index,1125) = 0;
jcb(index,1126) = - B_488;
jcb(index,1127) = 0;
jcb(index,1128) = B_492+ B_502;
jcb(index,1129) = B_148;
jcb(index,1130) = - B_33;
jcb(index,1131) = 0;
jcb(index,1132) = B_28;
jcb(index,1133) = B_341+ B_493+ B_501;
jcb(index,1134) = 0;
jcb(index,1135) = 0;
jcb(index,1136) = B_19+ B_30+ B_36+ B_71+ B_73+ B_76+ B_78+ B_97+ B_103+ B_105+ B_111+ B_123+ B_124+ B_131+ B_133+ B_149 + B_153+ B_172+ B_176+ B_179+ B_199+ B_201+ B_217+ 0.85*B_225+ B_250+ B_292+ B_295+ B_318+ B_322+ B_342 + B_363+ B_369+ B_371+ B_387+ B_389+ B_391;
jcb(index,1137) = 0;
jcb(index,1138) = - B_34- B_416- B_482- B_489- B_495;
jcb(index,1139) = 0;
jcb(index,1140) = B_125;
jcb(index,1141) = 0;
jcb(index,1142) = 0;
jcb(index,1143) = B_88;
jcb(index,1144) = 0;
jcb(index,1145) = B_90;
jcb(index,1146) = 0;
jcb(index,1147) = B_29+ B_31+ B_84;
jcb(index,1148) = B_293+ B_485+ B_503;
jcb(index,1149) = B_469;
jcb(index,1150) = B_476;
jcb(index,1151) = B_474;
jcb(index,1152) = 2*B_370+ 2*B_472;
jcb(index,1153) = 3*B_368+ 3*B_473;
jcb(index,1154) = B_390+ B_477;
jcb(index,1155) = B_362+ B_471;
jcb(index,1156) = B_386+ B_478;
jcb(index,1157) = 2*B_388+ 2*B_479;
jcb(index,1158) = - B_374;
jcb(index,1159) = - B_372+ B_384+ B_475;
jcb(index,1160) = B_345+ B_376+ 2*B_466;
jcb(index,1161) = - B_364;
jcb(index,1162) = - B_356;
jcb(index,1163) = - B_347+ 0.85*B_470;
jcb(index,1164) = 0;
jcb(index,1165) = - B_409+ B_411;
jcb(index,1166) = 0;
jcb(index,1167) = B_468;
jcb(index,1168) = - B_366;
jcb(index,1169) = 0;
jcb(index,1170) = B_341;
jcb(index,1171) = - B_331;
jcb(index,1172) = B_360;
jcb(index,1173) = B_342+ B_346+ B_363+ 3*B_369+ 2*B_371+ B_387+ 2*B_389+ B_391;
jcb(index,1174) = B_377+ B_385;
jcb(index,1175) = 0;
jcb(index,1176) = - B_332- B_337- B_348- B_354- B_357- B_365- B_367- B_373- B_375- B_410;
jcb(index,1177) = - B_355;
jcb(index,1178) = B_333;
jcb(index,1179) = B_334+ 2*B_335+ B_349+ B_361+ B_378+ B_380+ B_412+ B_467;
jcb(index,1180) = B_350;
jcb(index,1181) = B_379+ B_381;
jcb(index,1182) = 0;
jcb(index,1183) = 0;
jcb(index,1184) = - B_338;
jcb(index,1185) = 0;
jcb(index,1186) = B_173+ B_435;
jcb(index,1187) = B_400;
jcb(index,1188) = B_451;
jcb(index,1189) = B_441;
jcb(index,1190) = B_175;
jcb(index,1191) = 0.75*B_445;
jcb(index,1192) = B_112;
jcb(index,1193) = B_452;
jcb(index,1194) = 0.8*B_190;
jcb(index,1195) = B_204+ 0.8*B_206;
jcb(index,1196) = 0.25*B_108;
jcb(index,1197) = 0.68*B_218;
jcb(index,1198) = 1.13875*B_134;
jcb(index,1199) = 0.3*B_122+ B_309+ B_427;
jcb(index,1200) = 0.58*B_254;
jcb(index,1201) = 0.57*B_180;
jcb(index,1202) = B_439;
jcb(index,1203) = 0.956*B_262+ 1.25*B_264+ B_266;
jcb(index,1204) = B_444;
jcb(index,1205) = 0.75*B_232+ 1.125*B_236+ 0.5*B_238;
jcb(index,1206) = B_394+ B_398+ B_407+ B_409;
jcb(index,1207) = 0.98*B_196;
jcb(index,1208) = 0.75*B_144;
jcb(index,1209) = 0.25*B_109;
jcb(index,1210) = B_164+ B_166;
jcb(index,1211) = 0;
jcb(index,1212) = 1.13875*B_135+ 0.57*B_181+ 0.58*B_255;
jcb(index,1213) = B_116+ B_118+ 2*B_120+ B_121+ 0.75*B_145+ B_165+ B_167+ 0.8*B_191+ 0.8*B_207+ 0.68*B_219+ 1.125*B_237 + 1.25*B_265+ B_311+ B_358+ B_360;
jcb(index,1214) = B_113+ 0.3*B_123- B_124+ B_174+ B_176+ B_395+ B_401;
jcb(index,1215) = - B_307+ B_310+ B_408;
jcb(index,1216) = 0;
jcb(index,1217) = - B_354+ B_410;
jcb(index,1218) = - B_125- B_126- B_308- B_355- B_428- B_429;
jcb(index,1219) = 0;
jcb(index,1220) = B_359+ B_361;
jcb(index,1221) = B_117+ 0.98*B_197+ B_205+ 0.75*B_233+ 0.956*B_263;
jcb(index,1222) = B_312;
jcb(index,1223) = 0;
jcb(index,1224) = B_119- B_127+ B_399;
jcb(index,1225) = 0;
jcb(index,1226) = 0;
jcb(index,1227) = B_455;
jcb(index,1228) = B_37+ B_47+ B_53;
jcb(index,1229) = 0.1*B_315;
jcb(index,1230) = - B_301;
jcb(index,1231) = - B_343;
jcb(index,1232) = B_0+ B_39+ 0.1*B_316;
jcb(index,1233) = B_28;
jcb(index,1234) = 0;
jcb(index,1235) = - B_6+ B_415;
jcb(index,1236) = 0;
jcb(index,1237) = - B_14;
jcb(index,1238) = 0;
jcb(index,1239) = 0;
jcb(index,1240) = 0;
jcb(index,1241) = 0;
jcb(index,1242) = - B_2- B_7- B_15- B_20- B_49- B_273- B_302- B_333- B_344;
jcb(index,1243) = - B_334+ B_467;
jcb(index,1244) = B_48+ B_420;
jcb(index,1245) = - B_274;
jcb(index,1246) = - B_50+ B_54+ B_419;
jcb(index,1247) = B_421;
jcb(index,1248) = - B_21+ B_29;
jcb(index,1249) = 0;
jcb(index,1250) = B_353+ 0.15*B_470;
jcb(index,1251) = - B_411;
jcb(index,1252) = B_343;
jcb(index,1253) = 0;
jcb(index,1254) = B_331;
jcb(index,1255) = - B_358- B_360;
jcb(index,1256) = 0;
jcb(index,1257) = 0;
jcb(index,1258) = 0;
jcb(index,1259) = B_332;
jcb(index,1260) = 0;
jcb(index,1261) = - B_333+ B_344;
jcb(index,1262) = - B_334- 2*B_335- 2*B_336- B_339- B_349- B_351- B_359- B_361- B_378- B_380- B_382- B_412- B_467;
jcb(index,1263) = - B_350;
jcb(index,1264) = - B_379- B_381- B_383;
jcb(index,1265) = - B_352;
jcb(index,1266) = 0;
jcb(index,1267) = - B_340;
jcb(index,1268) = 0;
jcb(index,1269) = B_37- B_47;
jcb(index,1270) = 2*B_41;
jcb(index,1271) = B_98;
jcb(index,1272) = B_424;
jcb(index,1273) = 0;
jcb(index,1274) = B_96+ B_99+ B_100+ B_106;
jcb(index,1275) = - B_85- B_87+ B_91;
jcb(index,1276) = - B_188;
jcb(index,1277) = - B_204;
jcb(index,1278) = - B_245;
jcb(index,1279) = - B_222;
jcb(index,1280) = - B_262;
jcb(index,1281) = 0;
jcb(index,1282) = - B_232;
jcb(index,1283) = - B_196;
jcb(index,1284) = - B_140;
jcb(index,1285) = 2*B_42;
jcb(index,1286) = - B_158;
jcb(index,1287) = 0;
jcb(index,1288) = - B_45;
jcb(index,1289) = - B_116;
jcb(index,1290) = - B_60+ B_97;
jcb(index,1291) = 0;
jcb(index,1292) = 0;
jcb(index,1293) = 0;
jcb(index,1294) = 0;
jcb(index,1295) = B_49;
jcb(index,1296) = - B_349;
jcb(index,1297) = - B_46- B_48- B_55- B_61- B_62- B_86- B_88- B_117- B_141- B_159- B_189- B_197- B_205- B_223- B_233- B_246 - B_263- B_296- B_350- B_420;
jcb(index,1298) = - B_297;
jcb(index,1299) = B_50+ B_92+ B_101+ B_419;
jcb(index,1300) = - B_56+ B_422;
jcb(index,1301) = - B_63;
jcb(index,1302) = 0;
jcb(index,1303) = 2*B_279;
jcb(index,1304) = B_313;
jcb(index,1305) = B_327;
jcb(index,1306) = B_329;
jcb(index,1307) = B_455;
jcb(index,1308) = 0.46*B_315;
jcb(index,1309) = B_294;
jcb(index,1310) = B_300+ B_301+ B_460;
jcb(index,1311) = B_314+ 0.46*B_316+ B_328+ B_330;
jcb(index,1312) = 0;
jcb(index,1313) = 0;
jcb(index,1314) = B_271;
jcb(index,1315) = - B_311;
jcb(index,1316) = - B_288+ B_295;
jcb(index,1317) = B_272+ B_284;
jcb(index,1318) = 0;
jcb(index,1319) = 0;
jcb(index,1320) = 0;
jcb(index,1321) = - B_273+ B_302;
jcb(index,1322) = - B_378- B_380- B_382;
jcb(index,1323) = - B_296;
jcb(index,1324) = - B_274- 2*B_275- 2*B_276- 2*B_277- 2*B_278- B_289- B_290- B_297- B_298- B_312- B_379- B_381- B_383;
jcb(index,1325) = - B_299;
jcb(index,1326) = 0;
jcb(index,1327) = B_285- B_291;
jcb(index,1328) = 0;
jcb(index,1329) = B_469;
jcb(index,1330) = B_458;
jcb(index,1331) = B_173+ B_435;
jcb(index,1332) = - B_53;
jcb(index,1333) = B_214+ B_442;
jcb(index,1334) = B_251+ B_253+ B_450;
jcb(index,1335) = B_74+ B_75+ 0.667*B_426;
jcb(index,1336) = B_70;
jcb(index,1337) = B_175+ B_177+ B_436;
jcb(index,1338) = B_59+ B_423;
jcb(index,1339) = - B_100;
jcb(index,1340) = B_452;
jcb(index,1341) = - B_89- B_91;
jcb(index,1342) = 0.96*B_188;
jcb(index,1343) = B_204;
jcb(index,1344) = 0.985*B_245;
jcb(index,1345) = B_425;
jcb(index,1346) = 0.84*B_222;
jcb(index,1347) = B_353+ 0.15*B_470;
jcb(index,1348) = 0;
jcb(index,1349) = 0.956*B_262;
jcb(index,1350) = B_249+ B_449;
jcb(index,1351) = B_232- B_234;
jcb(index,1352) = 0;
jcb(index,1353) = 0.98*B_196;
jcb(index,1354) = B_300+ B_460;
jcb(index,1355) = B_140+ B_142;
jcb(index,1356) = 0;
jcb(index,1357) = B_158- B_160+ B_162;
jcb(index,1358) = 0;
jcb(index,1359) = B_45- B_51;
jcb(index,1360) = B_116+ B_118;
jcb(index,1361) = - B_64+ B_71+ B_76+ B_174+ B_176+ B_215+ B_250+ B_252;
jcb(index,1362) = 0;
jcb(index,1363) = 0;
jcb(index,1364) = 0;
jcb(index,1365) = 0;
jcb(index,1366) = - B_49;
jcb(index,1367) = B_349- B_351;
jcb(index,1368) = B_46+ 2*B_55+ B_62+ B_117+ B_141+ B_159+ 0.96*B_189+ 0.98*B_197+ B_205+ 0.84*B_223+ B_233+ 0.985*B_246 + 0.956*B_263+ B_296+ B_350;
jcb(index,1369) = B_297- B_298;
jcb(index,1370) = - B_50- B_52- B_54- B_57- B_65- B_66- B_90- B_92- B_101- B_161- B_235- B_299- B_352- B_419;
jcb(index,1371) = 2*B_56- B_58+ B_68+ B_119+ B_143+ B_163+ B_421;
jcb(index,1372) = B_63- B_67+ B_69;
jcb(index,1373) = 0;
jcb(index,1374) = 0.333*B_426;
jcb(index,1375) = B_59+ B_423;
jcb(index,1376) = B_72;
jcb(index,1377) = B_347+ 0.85*B_470;
jcb(index,1378) = - B_258;
jcb(index,1379) = - B_184;
jcb(index,1380) = - B_398;
jcb(index,1381) = B_301+ B_303+ B_459;
jcb(index,1382) = - B_142;
jcb(index,1383) = - B_150;
jcb(index,1384) = - B_162;
jcb(index,1385) = 0;
jcb(index,1386) = B_51;
jcb(index,1387) = - B_118;
jcb(index,1388) = B_73;
jcb(index,1389) = B_304;
jcb(index,1390) = 0;
jcb(index,1391) = B_348;
jcb(index,1392) = - B_126;
jcb(index,1393) = B_302;
jcb(index,1394) = 0;
jcb(index,1395) = - B_55;
jcb(index,1396) = 0;
jcb(index,1397) = B_52- B_57;
jcb(index,1398) = - B_56- B_58- B_68- B_119- B_127- B_143- B_151- B_163- B_185- B_259- B_399- B_421- B_422;
jcb(index,1399) = - B_69;
jcb(index,1400) = 0;
jcb(index,1401) = - B_405;
jcb(index,1402) = B_392;
jcb(index,1403) = B_442;
jcb(index,1404) = 0.4*B_400;
jcb(index,1405) = B_451;
jcb(index,1406) = B_437;
jcb(index,1407) = B_432;
jcb(index,1408) = B_74+ 0.667*B_426;
jcb(index,1409) = B_130;
jcb(index,1410) = 0.67*B_443;
jcb(index,1411) = 0;
jcb(index,1412) = 0.75*B_445;
jcb(index,1413) = B_106;
jcb(index,1414) = B_35+ B_286;
jcb(index,1415) = B_112;
jcb(index,1416) = B_452;
jcb(index,1417) = - B_81- B_83+ B_85;
jcb(index,1418) = - B_186+ 0.96*B_188+ 0.8*B_190;
jcb(index,1419) = - B_202+ 0.3*B_206;
jcb(index,1420) = B_440;
jcb(index,1421) = - B_243;
jcb(index,1422) = 1.23*B_218- B_220+ 0.56*B_222;
jcb(index,1423) = 0.13*B_134;
jcb(index,1424) = B_427;
jcb(index,1425) = 0.25*B_254;
jcb(index,1426) = 0.26*B_180;
jcb(index,1427) = B_210+ B_439;
jcb(index,1428) = - B_260+ 0.956*B_262+ B_264+ B_266;
jcb(index,1429) = 0.32*B_226+ B_444;
jcb(index,1430) = 0.67*B_449;
jcb(index,1431) = - B_230+ 0.75*B_232+ 0.875*B_236+ B_238;
jcb(index,1432) = B_396;
jcb(index,1433) = - B_194+ 0.98*B_196;
jcb(index,1434) = - B_138+ B_140+ B_142+ B_144+ 0.82*B_168;
jcb(index,1435) = B_433;
jcb(index,1436) = - B_154- B_156+ B_164+ 0.82*B_169;
jcb(index,1437) = B_8- B_24- B_26- B_28;
jcb(index,1438) = B_16- B_22+ 0.13*B_135+ 0.26*B_181+ 0.32*B_227+ 0.25*B_255;
jcb(index,1439) = - B_114+ B_116+ B_118+ 2*B_120+ B_145+ B_165+ 0.8*B_191+ 0.3*B_207+ 1.23*B_219+ 0.875*B_237+ B_265+ B_311 + B_360;
jcb(index,1440) = B_17- B_30+ B_36+ B_113+ B_124+ B_131+ B_211+ 0.94*B_288+ B_393+ B_397+ 0.4*B_401;
jcb(index,1441) = - B_282- B_284+ B_287+ B_307;
jcb(index,1442) = 0;
jcb(index,1443) = - B_337+ B_354;
jcb(index,1444) = B_125+ B_126+ B_308+ B_355+ B_429;
jcb(index,1445) = - B_20;
jcb(index,1446) = - B_339+ B_361;
jcb(index,1447) = - B_62+ B_86+ B_117+ B_141+ 0.96*B_189+ 0.98*B_197+ 0.56*B_223+ 0.75*B_233+ 0.956*B_263;
jcb(index,1448) = 0.94*B_289- B_290+ B_312;
jcb(index,1449) = - B_66;
jcb(index,1450) = - B_68+ B_119+ B_127+ B_143;
jcb(index,1451) = - B_21- B_23- B_25- B_27- B_29- B_31- 2*B_32- B_63- B_67- B_69- B_82- B_84- B_115- B_139- B_155- B_157 - B_187- B_195- B_203- B_221- B_231- B_244- B_261- B_283- B_285- B_291- B_338- B_340- B_406;
jcb(index,1452) = 0;
jcb(index,1453) = - B_490;
jcb(index,1454) = B_286;
jcb(index,1455) = B_280;
jcb(index,1456) = B_305;
jcb(index,1457) = B_323;
jcb(index,1458) = B_309;
jcb(index,1459) = - B_498;
jcb(index,1460) = 0;
jcb(index,1461) = 0;
jcb(index,1462) = - B_484;
jcb(index,1463) = B_407;
jcb(index,1464) = - B_486;
jcb(index,1465) = - B_502;
jcb(index,1466) = B_325;
jcb(index,1467) = 0;
jcb(index,1468) = 0;
jcb(index,1469) = 0;
jcb(index,1470) = 0;
jcb(index,1471) = 0;
jcb(index,1472) = 0;
jcb(index,1473) = 0.06*B_288- B_292;
jcb(index,1474) = B_281+ B_282+ B_287+ B_306+ B_307+ B_310+ B_324+ B_326+ B_408;
jcb(index,1475) = 0;
jcb(index,1476) = 0;
jcb(index,1477) = B_308;
jcb(index,1478) = 0;
jcb(index,1479) = 0;
jcb(index,1480) = 0;
jcb(index,1481) = 0.06*B_289;
jcb(index,1482) = 0;
jcb(index,1483) = 0;
jcb(index,1484) = B_283;
jcb(index,1485) = - B_293- B_456- B_485- B_487- B_491- B_499- B_503;
}
__device__ void Fun(double *var, const double * __restrict__ fix, const double * __restrict__ rconst, double *varDot, int &Nfun, const int VL_GLO){
int index = blockIdx.x*blockDim.x+threadIdx.x;
Nfun++;
double dummy, A_0, A_1, A_2, A_3, A_4, A_5, A_6, A_7, A_8, A_9, A_10, A_11, A_12, A_13, A_14, A_15, A_16, A_17, A_18, A_19, A_20, A_21, A_22, A_23, A_24, A_25, A_26, A_27, A_28, A_29, A_30, A_31, A_32, A_33, A_34, A_35, A_36, A_37, A_38, A_39, A_40, A_41, A_42, A_43, A_44, A_45, A_46, A_47, A_48, A_49, A_50, A_51, A_52, A_53, A_54, A_55, A_56, A_57, A_58, A_59, A_60, A_61, A_62, A_63, A_64, A_65, A_66, A_67, A_68, A_69, A_70, A_71, A_72, A_73, A_74, A_75, A_76, A_77, A_78, A_79, A_80, A_81, A_82, A_83, A_84, A_85, A_86, A_87, A_88, A_89, A_90, A_91, A_92, A_93, A_94, A_95, A_96, A_97, A_98, A_99, A_100, A_101, A_102, A_103, A_104, A_105, A_106, A_107, A_108, A_109, A_110, A_111, A_112, A_113, A_114, A_115, A_116, A_117, A_118, A_119, A_120, A_121, A_122, A_123, A_124, A_125, A_126, A_127, A_128, A_129, A_130, A_131, A_132, A_133, A_134, A_135, A_136, A_137, A_138, A_139, A_140, A_141, A_142, A_143, A_144, A_145, A_146, A_147, A_148, A_149, A_150, A_151, A_152, A_153, A_154, A_155, A_156, A_157, A_158, A_159, A_160, A_161, A_162, A_163, A_164, A_165, A_166, A_167, A_168, A_169, A_170, A_171, A_172, A_173, A_174, A_175, A_176, A_177, A_178, A_179, A_180, A_181, A_182, A_183, A_184, A_185, A_186, A_187, A_188, A_189, A_190, A_191, A_192, A_193, A_194, A_195, A_196, A_197, A_198, A_199, A_200, A_201, A_202, A_203, A_204, A_205, A_206, A_207, A_208, A_209, A_210, A_211, A_212, A_213, A_214, A_215, A_216, A_217, A_218, A_219, A_220, A_221, A_222, A_223, A_224, A_225, A_226, A_227, A_228, A_229, A_230, A_231, A_232, A_233, A_234, A_235, A_236, A_237, A_238, A_239, A_240, A_241, A_242, A_243, A_244, A_245, A_246, A_247, A_248, A_249, A_250, A_251, A_252, A_253, A_254, A_255, A_256, A_257, A_258, A_259, A_260, A_261, A_262, A_263, A_264, A_265, A_266, A_267, A_268, A_269, A_270, A_271, A_272, A_273, A_274, A_275, A_276, A_277, A_278, A_279, A_280, A_281, A_282, A_283, A_284, A_285, A_286, A_287, A_288, A_289, A_290, A_291, A_292, A_293, A_294, A_295, A_296, A_297, A_298, A_299, A_300, A_301, A_302, A_303, A_304, A_305, A_306, A_307, A_308, A_309;
{
A_0 = rconst(index,0)*var(index,120)*fix(index,0);
A_1 = rconst(index,1)*var(index,131)*fix(index,0);
A_2 = 1.2e-10*var(index,120)*var(index,124);
A_3 = rconst(index,3)*var(index,124)*var(index,131);
A_4 = rconst(index,4)*var(index,122)*fix(index,0);
A_5 = rconst(index,5)*var(index,122)*var(index,124);
A_6 = 1.2e-10*var(index,97)*var(index,120);
A_7 = rconst(index,7)*var(index,126)*var(index,131);
A_8 = rconst(index,8)*var(index,124)*var(index,126);
A_9 = rconst(index,9)*var(index,97)*var(index,126);
A_10 = rconst(index,10)*var(index,131)*var(index,137);
A_11 = rconst(index,11)*var(index,124)*var(index,137);
A_12 = 7.2e-11*var(index,122)*var(index,137);
A_13 = 6.9e-12*var(index,122)*var(index,137);
A_14 = 1.6e-12*var(index,122)*var(index,137);
A_15 = rconst(index,15)*var(index,126)*var(index,137);
A_16 = rconst(index,16)*var(index,137)*var(index,137);
A_17 = rconst(index,17)*var(index,120)*var(index,128);
A_18 = 1.8e-12*var(index,88)*var(index,126);
A_19 = rconst(index,19)*var(index,59)*fix(index,0);
A_20 = rconst(index,20)*var(index,120)*fix(index,1);
A_21 = rconst(index,21)*var(index,60)*var(index,120);
A_22 = rconst(index,22)*var(index,60)*var(index,120);
A_23 = rconst(index,23)*var(index,124)*var(index,133);
A_24 = rconst(index,24)*var(index,59)*var(index,133);
A_25 = rconst(index,25)*var(index,131)*var(index,135);
A_26 = rconst(index,26)*var(index,124)*var(index,135);
A_27 = rconst(index,27)*var(index,59)*var(index,135);
A_28 = rconst(index,28)*var(index,133)*var(index,136);
A_29 = rconst(index,29)*var(index,135)*var(index,136);
A_30 = rconst(index,30)*var(index,83);
A_31 = rconst(index,31)*var(index,126)*var(index,133);
A_32 = rconst(index,32)*var(index,133)*var(index,137);
A_33 = rconst(index,33)*var(index,126)*var(index,135);
A_34 = rconst(index,34)*var(index,135)*var(index,137);
A_35 = 3.5e-12*var(index,136)*var(index,137);
A_36 = rconst(index,36)*var(index,76)*var(index,126);
A_37 = rconst(index,37)*var(index,101)*var(index,126);
A_38 = rconst(index,38)*var(index,73);
A_39 = rconst(index,39)*var(index,73)*var(index,126);
A_40 = rconst(index,40)*var(index,47)*var(index,126);
A_41 = rconst(index,41)*var(index,92)*var(index,124);
A_42 = rconst(index,42)*var(index,92)*var(index,137);
A_43 = rconst(index,43)*var(index,92)*var(index,137);
A_44 = rconst(index,44)*var(index,92)*var(index,133);
A_45 = rconst(index,45)*var(index,92)*var(index,133);
A_46 = rconst(index,46)*var(index,92)*var(index,135);
A_47 = rconst(index,47)*var(index,92)*var(index,135);
A_48 = 1.2e-14*var(index,84)*var(index,124);
A_49 = 1300*var(index,84);
A_50 = rconst(index,50)*var(index,87)*var(index,126);
A_51 = rconst(index,51)*var(index,70)*var(index,87);
A_52 = rconst(index,52)*var(index,87)*var(index,135);
A_53 = 1.66e-12*var(index,70)*var(index,126);
A_54 = rconst(index,54)*var(index,61)*var(index,126);
A_55 = rconst(index,55)*var(index,87)*fix(index,0);
A_56 = 1.75e-10*var(index,98)*var(index,120);
A_57 = rconst(index,57)*var(index,98)*var(index,126);
A_58 = rconst(index,58)*var(index,89)*var(index,126);
A_59 = rconst(index,59)*var(index,125)*var(index,137);
A_60 = rconst(index,60)*var(index,125)*var(index,133);
A_61 = 1.3e-12*var(index,125)*var(index,136);
A_62 = rconst(index,62)*var(index,125)*var(index,125);
A_63 = rconst(index,63)*var(index,125)*var(index,125);
A_64 = rconst(index,64)*var(index,104)*var(index,126);
A_65 = rconst(index,65)*var(index,126)*var(index,130);
A_66 = rconst(index,66)*var(index,130)*var(index,136);
A_67 = rconst(index,67)*var(index,95)*var(index,126);
A_68 = 4e-13*var(index,78)*var(index,126);
A_69 = rconst(index,69)*var(index,48)*var(index,126);
A_70 = rconst(index,70)*var(index,103)*var(index,124);
A_71 = rconst(index,71)*var(index,103)*var(index,126);
A_72 = rconst(index,72)*var(index,117)*var(index,137);
A_73 = rconst(index,73)*var(index,117)*var(index,133);
A_74 = 2.3e-12*var(index,117)*var(index,136);
A_75 = rconst(index,75)*var(index,117)*var(index,125);
A_76 = rconst(index,76)*var(index,71)*var(index,126);
A_77 = rconst(index,77)*var(index,119)*var(index,126);
A_78 = rconst(index,78)*var(index,119)*var(index,136);
A_79 = rconst(index,79)*var(index,74)*var(index,126);
A_80 = rconst(index,80)*var(index,121)*var(index,137);
A_81 = rconst(index,81)*var(index,121)*var(index,137);
A_82 = rconst(index,82)*var(index,121)*var(index,133);
A_83 = rconst(index,83)*var(index,121)*var(index,135);
A_84 = 4e-12*var(index,121)*var(index,136);
A_85 = rconst(index,85)*var(index,121)*var(index,125);
A_86 = rconst(index,86)*var(index,121)*var(index,125);
A_87 = rconst(index,87)*var(index,117)*var(index,121);
A_88 = rconst(index,88)*var(index,121)*var(index,121);
A_89 = rconst(index,89)*var(index,63)*var(index,126);
A_90 = rconst(index,90)*var(index,58)*var(index,126);
A_91 = rconst(index,91)*var(index,77)*var(index,126);
A_92 = rconst(index,92)*var(index,77);
A_93 = rconst(index,93)*var(index,49)*var(index,126);
A_94 = rconst(index,94)*var(index,107)*var(index,124);
A_95 = rconst(index,95)*var(index,107)*var(index,126);
A_96 = rconst(index,96)*var(index,107)*var(index,136);
A_97 = rconst(index,97)*var(index,93)*var(index,137);
A_98 = rconst(index,98)*var(index,93)*var(index,133);
A_99 = rconst(index,99)*var(index,93)*var(index,125);
A_100 = rconst(index,100)*var(index,69)*var(index,126);
A_101 = rconst(index,101)*var(index,115)*var(index,137);
A_102 = rconst(index,102)*var(index,115)*var(index,133);
A_103 = rconst(index,103)*var(index,67)*var(index,126);
A_104 = rconst(index,104)*var(index,86)*var(index,126);
A_105 = rconst(index,105)*var(index,94)*var(index,137);
A_106 = rconst(index,106)*var(index,94)*var(index,133);
A_107 = rconst(index,107)*var(index,94)*var(index,125);
A_108 = rconst(index,108)*var(index,72)*var(index,126);
A_109 = rconst(index,109)*var(index,108)*var(index,126);
A_110 = rconst(index,110)*var(index,96)*var(index,126);
A_111 = rconst(index,111)*var(index,62)*var(index,126);
A_112 = rconst(index,112)*var(index,40)*var(index,126);
A_113 = rconst(index,113)*var(index,102)*var(index,125);
A_114 = rconst(index,114)*var(index,102)*var(index,137);
A_115 = rconst(index,115)*var(index,102)*var(index,133);
A_116 = rconst(index,116)*var(index,79)*var(index,126);
A_117 = rconst(index,117)*var(index,110)*var(index,124);
A_118 = rconst(index,118)*var(index,110)*var(index,126);
A_119 = rconst(index,119)*var(index,113)*var(index,137);
A_120 = rconst(index,120)*var(index,113)*var(index,133);
A_121 = rconst(index,121)*var(index,113)*var(index,135);
A_122 = 2e-12*var(index,113)*var(index,125);
A_123 = 2e-12*var(index,113)*var(index,113);
A_124 = 3e-11*var(index,82)*var(index,126);
A_125 = rconst(index,125)*var(index,85)*var(index,126);
A_126 = rconst(index,126)*var(index,99)*var(index,137);
A_127 = rconst(index,127)*var(index,99)*var(index,133);
A_128 = rconst(index,128)*var(index,68)*var(index,126);
A_129 = 1.7e-12*var(index,111)*var(index,126);
A_130 = 3.2e-11*var(index,64)*var(index,126);
A_131 = rconst(index,131)*var(index,64);
A_132 = rconst(index,132)*var(index,106)*var(index,124);
A_133 = rconst(index,133)*var(index,106)*var(index,126);
A_134 = rconst(index,134)*var(index,106)*var(index,136);
A_135 = rconst(index,135)*var(index,109)*var(index,137);
A_136 = rconst(index,136)*var(index,109)*var(index,133);
A_137 = 2e-12*var(index,109)*var(index,125);
A_138 = 2e-12*var(index,109)*var(index,109);
A_139 = 1e-10*var(index,66)*var(index,126);
A_140 = 1.3e-11*var(index,91)*var(index,126);
A_141 = rconst(index,141)*var(index,124)*var(index,127);
A_142 = rconst(index,142)*var(index,131)*var(index,134);
A_143 = rconst(index,143)*var(index,134)*var(index,134);
A_144 = rconst(index,144)*var(index,134)*var(index,134);
A_145 = rconst(index,145)*var(index,134)*var(index,134);
A_146 = rconst(index,146)*var(index,134)*var(index,134);
A_147 = rconst(index,147)*var(index,39);
A_148 = rconst(index,148)*var(index,97)*var(index,127);
A_149 = rconst(index,149)*var(index,127)*var(index,137);
A_150 = rconst(index,150)*var(index,127)*var(index,137);
A_151 = rconst(index,151)*var(index,88)*var(index,127);
A_152 = rconst(index,152)*var(index,126)*var(index,134);
A_153 = rconst(index,153)*var(index,134)*var(index,137);
A_154 = rconst(index,154)*var(index,126)*var(index,138);
A_155 = rconst(index,155)*var(index,112)*var(index,126);
A_156 = rconst(index,156)*var(index,133)*var(index,134);
A_157 = rconst(index,157)*var(index,134)*var(index,135);
A_158 = rconst(index,158)*var(index,116);
A_159 = rconst(index,159)*var(index,116)*var(index,131);
A_160 = rconst(index,160)*var(index,116)*var(index,127);
A_161 = rconst(index,161)*var(index,98)*var(index,127);
A_162 = rconst(index,162)*var(index,127)*var(index,130);
A_163 = 5.9e-11*var(index,104)*var(index,127);
A_164 = rconst(index,164)*var(index,125)*var(index,134);
A_165 = 3.3e-10*var(index,41)*var(index,120);
A_166 = 1.65e-10*var(index,75)*var(index,120);
A_167 = rconst(index,167)*var(index,75)*var(index,126);
A_168 = 3.25e-10*var(index,57)*var(index,120);
A_169 = rconst(index,169)*var(index,57)*var(index,126);
A_170 = rconst(index,170)*var(index,103)*var(index,127);
A_171 = 8e-11*var(index,119)*var(index,127);
A_172 = 1.4e-10*var(index,42)*var(index,120);
A_173 = 2.3e-10*var(index,43)*var(index,120);
A_174 = rconst(index,174)*var(index,124)*var(index,129);
A_175 = rconst(index,175)*var(index,131)*var(index,132);
A_176 = 2.7e-12*var(index,132)*var(index,132);
A_177 = rconst(index,177)*var(index,132)*var(index,132);
A_178 = rconst(index,178)*var(index,129)*var(index,137);
A_179 = rconst(index,179)*var(index,132)*var(index,137);
A_180 = rconst(index,180)*var(index,123)*var(index,126);
A_181 = rconst(index,181)*var(index,118)*var(index,131);
A_182 = rconst(index,182)*var(index,100)*var(index,126);
A_183 = 4.9e-11*var(index,105)*var(index,129);
A_184 = rconst(index,184)*var(index,132)*var(index,133);
A_185 = rconst(index,185)*var(index,132)*var(index,135);
A_186 = rconst(index,186)*var(index,105);
A_187 = rconst(index,187)*var(index,129)*var(index,130);
A_188 = rconst(index,188)*var(index,104)*var(index,129);
A_189 = rconst(index,189)*var(index,125)*var(index,132);
A_190 = rconst(index,190)*var(index,125)*var(index,132);
A_191 = rconst(index,191)*var(index,53)*var(index,126);
A_192 = rconst(index,192)*var(index,103)*var(index,129);
A_193 = rconst(index,193)*var(index,119)*var(index,129);
A_194 = rconst(index,194)*var(index,45)*var(index,126);
A_195 = rconst(index,195)*var(index,44)*var(index,126);
A_196 = 3.32e-15*var(index,90)*var(index,129);
A_197 = 1.1e-15*var(index,80)*var(index,129);
A_198 = rconst(index,198)*var(index,100)*var(index,127);
A_199 = rconst(index,199)*var(index,132)*var(index,134);
A_200 = rconst(index,200)*var(index,132)*var(index,134);
A_201 = rconst(index,201)*var(index,132)*var(index,134);
A_202 = 1.45e-11*var(index,90)*var(index,127);
A_203 = rconst(index,203)*var(index,54)*var(index,126);
A_204 = rconst(index,204)*var(index,55)*var(index,126);
A_205 = rconst(index,205)*var(index,52)*var(index,126);
A_206 = rconst(index,206)*var(index,56)*var(index,126);
A_207 = rconst(index,207)*var(index,114)*var(index,126);
A_208 = rconst(index,208)*var(index,114)*var(index,126);
A_209 = rconst(index,209)*var(index,114)*var(index,136);
A_210 = 1e-10*var(index,65)*var(index,126);
A_211 = rconst(index,211)*var(index,81);
A_212 = 3e-13*var(index,81)*var(index,124);
A_213 = 5e-11*var(index,46)*var(index,137);
A_214 = 3.3e-10*var(index,114)*var(index,127);
A_215 = rconst(index,215)*var(index,114)*var(index,129);
A_216 = 4.4e-13*var(index,114)*var(index,132);
A_217 = rconst(index,217)*fix(index,0);
A_218 = rconst(index,218)*var(index,124);
A_219 = rconst(index,219)*var(index,124);
A_220 = rconst(index,220)*var(index,128);
A_221 = rconst(index,221)*var(index,88);
A_222 = rconst(index,222)*var(index,60);
A_223 = rconst(index,223)*var(index,135);
A_224 = rconst(index,224)*var(index,133);
A_225 = rconst(index,225)*var(index,136);
A_226 = rconst(index,226)*var(index,136);
A_227 = rconst(index,227)*var(index,83);
A_228 = rconst(index,228)*var(index,76);
A_229 = rconst(index,229)*var(index,101);
A_230 = rconst(index,230)*var(index,73);
A_231 = rconst(index,231)*var(index,104);
A_232 = rconst(index,232)*var(index,130);
A_233 = rconst(index,233)*var(index,130);
A_234 = rconst(index,234)*fix(index,2);
A_235 = rconst(index,235)*var(index,98);
A_236 = rconst(index,236)*var(index,71);
A_237 = rconst(index,237)*var(index,119);
A_238 = rconst(index,238)*var(index,63);
A_239 = rconst(index,239)*var(index,58);
A_240 = rconst(index,240)*var(index,77);
A_241 = rconst(index,241)*var(index,69);
A_242 = rconst(index,242)*var(index,86);
A_243 = rconst(index,243)*var(index,108);
A_244 = rconst(index,244)*var(index,96);
A_245 = rconst(index,245)*var(index,72);
A_246 = rconst(index,246)*var(index,62);
A_247 = rconst(index,247)*var(index,79);
A_248 = rconst(index,248)*var(index,110);
A_249 = rconst(index,249)*var(index,82);
A_250 = rconst(index,250)*var(index,85);
A_251 = rconst(index,251)*var(index,68);
A_252 = rconst(index,252)*var(index,38);
A_253 = rconst(index,253)*var(index,111);
A_254 = rconst(index,254)*var(index,64);
A_255 = rconst(index,255)*var(index,66);
A_256 = rconst(index,256)*var(index,91);
A_257 = rconst(index,257)*var(index,80);
A_258 = rconst(index,258)*var(index,39);
A_259 = rconst(index,259)*var(index,51);
A_260 = rconst(index,260)*var(index,138);
A_261 = rconst(index,261)*var(index,112);
A_262 = rconst(index,262)*var(index,50);
A_263 = rconst(index,263)*var(index,116);
A_264 = rconst(index,264)*var(index,116);
A_265 = rconst(index,265)*var(index,75);
A_266 = rconst(index,266)*var(index,41);
A_267 = rconst(index,267)*var(index,57);
A_268 = rconst(index,268)*var(index,43);
A_269 = rconst(index,269)*var(index,42);
A_270 = rconst(index,270)*var(index,100);
A_271 = rconst(index,271)*var(index,132);
A_272 = rconst(index,272)*var(index,118);
A_273 = rconst(index,273)*var(index,0);
A_274 = rconst(index,274)*var(index,105);
A_275 = rconst(index,275)*var(index,53);
A_276 = rconst(index,276)*var(index,44);
A_277 = rconst(index,277)*var(index,45);
A_278 = rconst(index,278)*var(index,2);
A_279 = rconst(index,279)*var(index,90);
A_280 = rconst(index,280)*var(index,1);
A_281 = rconst(index,281)*var(index,52);
A_282 = rconst(index,282)*var(index,54);
A_283 = rconst(index,283)*var(index,55);
A_284 = rconst(index,284)*var(index,3);
A_285 = rconst(index,285)*var(index,83)*var(index,128);
A_286 = rconst(index,286)*var(index,83);
A_287 = rconst(index,287)*var(index,112)*var(index,138);
A_288 = rconst(index,288)*var(index,116)*var(index,138);
A_289 = rconst(index,289)*var(index,116)*var(index,128);
A_290 = rconst(index,290)*var(index,83)*var(index,138);
A_291 = rconst(index,291)*var(index,118)*var(index,123);
A_292 = rconst(index,292)*var(index,105)*var(index,128);
A_293 = rconst(index,293)*var(index,116)*var(index,123);
A_294 = rconst(index,294)*var(index,105)*var(index,138);
A_295 = rconst(index,295)*var(index,112)*var(index,123);
A_296 = rconst(index,296)*var(index,118)*var(index,138);
A_297 = rconst(index,297)*var(index,4);
A_298 = 2.3e-10*var(index,15)*var(index,120);
A_299 = rconst(index,299)*var(index,15);
A_300 = 1.4e-10*var(index,16)*var(index,120);
A_301 = rconst(index,301)*var(index,16);
A_302 = rconst(index,302)*var(index,17)*var(index,120);
A_303 = rconst(index,303)*var(index,17)*var(index,120);
A_304 = rconst(index,304)*var(index,17);
A_305 = 3e-10*var(index,18)*var(index,120);
A_306 = rconst(index,306)*var(index,18)*var(index,126);
A_307 = rconst(index,307)*var(index,18);
A_308 = rconst(index,308)*var(index,5);
A_309 = rconst(index,309)*var(index,6);
varDot(index,0) = - A_273;
varDot(index,1) = - A_280;
varDot(index,2) = - A_278;
varDot(index,3) = - A_284;
varDot(index,4) = - A_297;
varDot(index,5) = - A_308;
varDot(index,6) = - A_309;
varDot(index,7) = A_165+ 0.9*A_166+ A_167+ 2*A_168+ 2*A_169+ A_172+ A_173+ A_191+ A_194+ A_195+ A_203+ A_204+ A_205+ A_266+ 2 *A_267+ A_268+ A_269+ A_276+ A_277+ A_278+ A_280+ A_281+ A_282+ A_283;
varDot(index,8) = 2*A_172+ A_173+ A_268+ 2*A_269+ 3*A_278+ 2*A_280;
varDot(index,9) = 0.09*A_166+ 2*A_203+ A_204+ A_205+ 2*A_268+ A_269;
varDot(index,10) = 0.4*A_210+ A_213;
varDot(index,11) = A_206;
varDot(index,12) = 2*A_286;
varDot(index,13) = 2*A_286;
varDot(index,14) = A_299+ A_301+ A_303+ A_304+ A_307+ A_308+ A_309;
varDot(index,15) = - A_298- A_299;
varDot(index,16) = - A_300- A_301;
varDot(index,17) = - A_302- A_303- A_304;
varDot(index,18) = - A_305- A_306- A_307;
varDot(index,19) = A_297;
varDot(index,20) = A_11;
varDot(index,21) = A_17;
varDot(index,22) = 2*A_2+ 2*A_3+ A_5+ A_6+ A_7+ A_8+ A_10+ A_11+ A_17+ A_21+ A_22+ 2*A_25+ A_35+ A_41+ A_46+ A_47+ A_48+ A_52 + A_56+ A_61+ A_66+ A_70+ A_74+ A_78+ A_84+ A_94+ A_96+ A_117+ A_132+ A_134+ 2*A_142+ 2*A_143+ 2*A_144 + A_145+ A_152+ A_164+ A_166+ A_168+ 2*A_175+ 2*A_176+ 2*A_177+ A_181+ A_190+ A_199+ 2*A_200+ 2*A_201+ 2 *A_226+ 2*A_258+ A_261+ A_272+ A_285+ 3*A_286+ A_287+ A_288+ 2*A_290+ A_291+ A_293+ A_294+ A_295+ A_296;
varDot(index,23) = 2*A_175+ 2*A_176+ 2*A_177+ A_181+ A_190+ 0.5*A_199+ A_200+ A_201+ A_272+ A_291+ 0.333333*A_293+ 0.333333 *A_294+ 0.5*A_295+ 0.5*A_296;
varDot(index,24) = 2*A_142+ 2*A_143+ 2*A_144+ A_145+ A_152+ A_164+ A_166+ A_168+ 0.5*A_199+ A_200+ A_201+ 2*A_258+ A_261 + A_287+ 0.5*A_288+ A_290+ 0.333333*A_293+ 0.333333*A_294+ 0.5*A_295+ 0.5*A_296;
varDot(index,25) = A_5+ A_6+ A_7+ A_8+ A_10+ A_11;
varDot(index,26) = 2*A_25+ A_35+ A_41+ A_46+ A_47+ A_48+ A_52+ 2*A_226+ A_285+ 3*A_286+ 0.5*A_288+ A_290+ 0.333333*A_293 + 0.333333*A_294;
varDot(index,27) = 2*A_2+ 2*A_3+ A_17+ A_21+ A_22+ A_56;
varDot(index,28) = A_61+ A_66+ A_70+ A_74+ A_78+ A_84+ A_94+ A_96+ A_117+ A_132+ A_134;
varDot(index,29) = A_8;
varDot(index,30) = A_32;
varDot(index,31) = A_191+ A_275+ A_278+ A_280;
varDot(index,32) = 4*A_165+ A_166+ A_167+ 3*A_168+ 3*A_169+ 2*A_172+ 3*A_173+ A_265+ 4*A_266+ 3*A_267+ 3*A_268+ 2*A_269 + A_280;
varDot(index,33) = A_60;
varDot(index,34) = A_14+ A_19+ A_24+ A_32+ A_36+ A_37+ A_60+ A_73+ A_81+ A_82+ A_98+ A_102+ A_106+ A_115+ A_120+ A_127+ A_136 + A_150+ A_182+ A_207+ A_208+ 0.4*A_210+ A_214+ A_215+ 2*A_217+ A_222+ A_224+ 0.333*A_230+ A_234+ A_259 + A_262+ A_273;
varDot(index,35) = A_73+ A_82+ A_98+ A_102+ A_106+ A_115+ A_120+ A_127+ A_136;
varDot(index,36) = 3*A_194+ 2*A_195+ A_203+ 2*A_204+ A_205+ 2*A_276+ 3*A_277+ A_281+ A_282+ 2*A_283;
varDot(index,37) = A_281+ 2*A_282+ A_283;
varDot(index,38) = 0.8*A_128- A_252;
varDot(index,39) = A_146- A_147- A_258;
varDot(index,40) = - A_112;
varDot(index,41) = - A_165- A_266;
varDot(index,42) = - A_172- A_269;
varDot(index,43) = - A_173- A_268;
varDot(index,44) = - A_195- A_276;
varDot(index,45) = - A_194- A_277;
varDot(index,46) = A_212- A_213;
varDot(index,47) = - A_40;
varDot(index,48) = - A_69;
varDot(index,49) = - A_93;
varDot(index,50) = - A_262+ A_290;
varDot(index,51) = A_145+ A_199- A_259;
varDot(index,52) = - A_205- A_281;
varDot(index,53) = - A_191- A_275;
varDot(index,54) = - A_203- A_282;
varDot(index,55) = - A_204- A_283;
varDot(index,56) = - A_206+ 0.6*A_210+ A_211;
varDot(index,57) = - A_168- A_169- A_267;
varDot(index,58) = - A_90+ A_140- A_239;
varDot(index,59) = - A_19- A_24- A_27+ A_224;
varDot(index,60) = - A_21- A_22+ A_27+ A_46- A_222;
varDot(index,61) = A_51- A_54;
varDot(index,62) = 0.04*A_98- A_111- A_246;
varDot(index,63) = A_80- A_89- A_238;
varDot(index,64) = A_121- A_130- A_131- A_254;
varDot(index,65) = A_208- A_210+ A_216;
varDot(index,66) = A_135- A_139- A_255;
varDot(index,67) = A_101- A_103;
varDot(index,68) = A_126- A_128- A_251;
varDot(index,69) = A_97- A_100- A_241;
varDot(index,70) = A_49- A_51- A_53+ A_54;
varDot(index,71) = A_72- A_76- A_236;
varDot(index,72) = A_105- A_108- A_245;
varDot(index,73) = A_34- A_38- A_39- A_230;
varDot(index,74) = - A_79+ A_81+ A_86+ 0.18*A_87;
varDot(index,75) = - 0.9*A_166- A_167- A_265;
varDot(index,76) = A_31- A_36+ A_52- A_228;
varDot(index,77) = A_83- A_91- A_92- A_240;
varDot(index,78) = - A_68+ 0.23125*A_70+ 0.22*A_94+ 0.45*A_117+ 0.28*A_132;
varDot(index,79) = A_114- A_116- A_247;
varDot(index,80) = A_143+ A_160- A_197+ A_202- A_257+ A_287+ A_288;
varDot(index,81) = A_207+ A_209- A_211- A_212+ A_214+ A_215;
varDot(index,82) = A_119- A_124- A_249;
varDot(index,83) = A_29- A_30- A_227- A_285- A_286- A_290;
varDot(index,84) = A_41+ A_42+ A_47- A_48- A_49;
varDot(index,85) = 0.88*A_113+ 0.56*A_115+ 0.85*A_116- A_125+ A_129+ 0.67*A_247- A_250+ 0.67*A_253;
varDot(index,86) = 0.96*A_98+ A_99+ 0.7*A_100- A_104+ A_111+ A_241- A_242+ A_246;
varDot(index,87) = A_43- A_50- A_51- A_52+ A_53- A_55;
varDot(index,88) = A_16- A_18+ 0.13875*A_70+ 0.09*A_132- A_151- A_221;
varDot(index,89) = - A_58+ A_63+ 0.25*A_75+ 0.03*A_94+ 0.2*A_99+ 0.5*A_107+ 0.18*A_113+ 0.25*A_122+ 0.25*A_137;
varDot(index,90) = - A_196+ A_197+ A_198+ A_201- A_202- A_279+ A_293+ A_294+ A_295+ A_296;
varDot(index,91) = A_134+ 0.044*A_136- A_140- A_256;
varDot(index,92) = A_40- A_41- A_42- A_43- A_44- A_45- A_46- A_47+ A_48;
varDot(index,93) = 0.82*A_93- A_97- A_98- A_99+ 0.3*A_100;
varDot(index,94) = A_104- A_105- A_106- A_107+ 0.3*A_108;
varDot(index,95) = A_65+ A_66- A_67+ 0.63*A_70+ A_90+ A_91+ 0.31*A_94+ A_110+ 0.22*A_117+ 0.25*A_120+ 0.125*A_122+ 0.5*A_123 + 0.14*A_132+ A_162+ A_187+ A_232+ A_233+ A_234+ A_235+ A_237+ A_239+ A_244+ A_248+ 0.25*A_249;
varDot(index,96) = 0.04*A_94+ 0.5*A_107+ 0.7*A_108+ A_109- A_110+ 0.9*A_117+ 0.5*A_120+ 0.5*A_122+ A_123+ 0.25*A_137- A_244 + 0.5*A_249;
varDot(index,97) = - A_6- A_9+ A_13+ 0.05*A_56- A_148+ A_232+ 0.69*A_235;
varDot(index,98) = - A_56- A_57+ 0.06*A_94- A_161- A_235;
varDot(index,99) = A_125- A_126- A_127+ 0.2*A_128;
varDot(index,100) = A_177- A_182+ A_183+ A_196- A_198- A_270+ A_291;
varDot(index,101) = A_33- A_37+ A_66+ A_78+ A_209- A_229+ 2*A_285+ A_288+ A_289+ A_290+ A_292+ A_293+ A_294;
varDot(index,102) = A_112- A_113- A_114- A_115+ 0.15*A_116;
varDot(index,103) = - A_70- A_71- A_170- A_192;
varDot(index,104) = A_59- A_64- A_163- A_188- A_231;
varDot(index,105) = - A_183+ A_185- A_186- A_274- A_292- A_294;
varDot(index,106) = - A_132- A_133- A_134;
varDot(index,107) = - A_94- A_95- A_96;
varDot(index,108) = 0.5*A_103+ 0.2*A_107- A_109+ 0.25*A_120+ 0.375*A_122+ A_123+ A_130+ 0.25*A_137+ A_140- A_243+ 0.25*A_249 + A_254;
varDot(index,109) = A_133- A_135- A_136- A_137- 2*A_138;
varDot(index,110) = - A_117- A_118+ 0.65*A_132+ 0.956*A_136+ 0.5*A_137+ 2*A_138+ A_139- A_248+ A_255+ A_256;
varDot(index,111) = A_96+ 0.02*A_102+ 0.16*A_115+ 0.015*A_127- A_129- A_253;
varDot(index,112) = A_153- A_155- A_261- A_287+ A_289- A_295;
varDot(index,113) = A_118- A_119- A_120- A_121- A_122- 2*A_123+ A_124+ A_131+ 0.1*A_132;
varDot(index,114) = - A_207- A_208- A_209- A_214- A_215- A_216;
varDot(index,115) = 0.666667*A_71+ A_95- A_101- A_102+ 0.5*A_103+ 0.666667*A_170+ 0.666667*A_192;
varDot(index,116) = A_157- A_158- A_159- A_160- A_263- A_264- A_288- A_289- A_293;
varDot(index,117) = A_69- A_72- A_73- A_74- A_75+ 0.3*A_76- A_87+ 0.18*A_93+ 0.06*A_94+ 0.12*A_113+ 0.28*A_115+ 0.33*A_247 + A_250+ 0.33*A_253;
varDot(index,118) = A_179- A_181+ A_182+ A_189- A_272- A_291+ A_292- A_296;
varDot(index,119) = A_73+ A_74+ 0.75*A_75+ 0.7*A_76- A_77- A_78+ A_87+ 0.47*A_94+ 0.98*A_102+ 0.12*A_113+ 0.28*A_115+ 0.985 *A_127- A_171- A_193+ A_236- A_237+ 0.33*A_247+ A_251+ 0.33*A_253;
varDot(index,120) = - A_0- A_2- A_6- A_17- A_20- A_21- A_22- A_56- A_165- A_166- A_168- A_172- A_173+ A_218+ A_222;
varDot(index,121) = A_77+ A_78- A_80- A_81- A_82- A_83- A_84- A_85- A_86- A_87- 2*A_88+ A_89+ A_92+ 0.23*A_94+ A_106+ 0.3 *A_107+ A_110+ 0.1*A_117+ 0.25*A_120+ 0.125*A_122+ 0.985*A_127+ 0.1*A_132+ A_171+ A_193+ A_240+ A_242 + A_243+ A_244+ A_245+ A_248+ 0.25*A_249+ A_250+ A_251+ 2*A_252;
varDot(index,122) = - A_4- A_5+ A_6+ A_7+ A_9- A_12- A_13- A_14+ 0.4*A_56+ A_67+ A_148+ 0.09*A_166+ A_220+ A_233+ 0.31*A_235 + A_260;
varDot(index,123) = A_178- A_180+ A_187+ A_188+ A_192+ A_193+ A_215- A_291- A_293- A_295;
varDot(index,124) = A_1- A_2- A_3- A_5- A_8- A_11- A_23- A_26- A_41- A_48- A_70+ A_81- A_94- A_117- A_132- A_141- A_174 - A_212- A_218- A_219;
varDot(index,125) = 0.75*A_56+ A_57- A_59- A_60- A_61- 2*A_62- 2*A_63+ 0.7*A_64- A_75+ A_79+ A_82+ A_84- A_86+ 0.82*A_87+ 2 *A_88+ 0.07*A_94- A_99- A_107- A_113- A_122+ 0.08*A_132- A_137+ A_161- A_164+ A_188- A_189- A_190+ 0.6 *A_210+ A_211+ A_237+ A_238+ A_242+ A_265+ A_275+ A_284;
varDot(index,126) = A_5+ A_6- A_7- A_8- A_9+ A_10+ A_11+ 2*A_12- A_15+ 2*A_17- A_18- A_31+ A_32- A_33+ A_35- A_36- A_37 - A_39- A_40+ A_42+ A_44- A_50- A_53- A_54+ 0.75*A_56- A_57- A_58- 0.7*A_64- A_65- A_67- A_68- A_69+ 0.13 *A_70- A_71- 0.3*A_76- A_77- A_79- A_89- A_90- A_91- A_93+ 0.33*A_94- A_95- 0.3*A_100- 0.5*A_103- A_104 - 0.3*A_108- A_109- A_110- A_111- A_112- 0.15*A_116+ 0.19*A_117- A_118- A_124- A_125- 0.2*A_128- A_129 - A_130+ 0.25*A_132- A_133- A_140+ A_150- A_152- A_154- A_155+ A_163- A_167+ A_168- A_169- A_180+ A_181 - A_182- A_191- A_194- A_195- A_203- A_204- A_205- A_206- A_207- A_208- A_210+ A_220+ 2*A_221+ A_228+ A_229 + 0.333*A_230+ A_231+ A_236+ A_238+ A_241+ A_245+ A_247+ A_249+ A_251+ A_255+ A_261+ A_272;
varDot(index,127) = - A_141+ A_142+ 2*A_144+ A_145- A_148- A_149- A_150- A_151+ 0.94*A_152+ A_154+ A_156- A_160- A_161- A_162 - A_163+ A_164+ 3*A_165+ 0.35*A_166+ A_167+ 3*A_168+ 3*A_169- A_170- A_171+ A_172+ 2*A_173+ A_196+ A_197 - A_198+ A_200- A_202- A_214+ 2*A_257+ 2*A_258+ A_260+ A_261+ A_262+ A_263+ A_265+ 4*A_266+ 3*A_267+ A_268 + A_269+ A_279+ A_280+ A_281+ 2*A_282+ A_283;
varDot(index,128) = A_9+ A_14+ A_15- A_17+ A_18+ A_36+ A_37+ A_39+ A_40+ A_43+ A_45+ A_46+ A_50+ A_53+ A_54+ A_57+ A_64 + A_65+ A_68+ A_69+ A_77+ A_79+ A_89+ A_91+ A_93+ A_103+ A_104+ A_112+ 0.85*A_116+ A_129+ A_154+ A_155 + A_167+ A_169+ A_180+ A_191+ A_194+ A_195+ A_203+ A_204+ A_205- A_220+ 1.155*A_235- A_285+ A_287- A_289 + A_291- A_292+ A_295+ A_296;
varDot(index,129) = - A_174+ A_175+ 2*A_176- A_178+ A_180+ A_182- A_183+ A_184- A_187- A_188+ A_190+ A_191- A_192- A_193+ 3 *A_194+ 2*A_195- A_196- A_197+ A_198+ A_199+ A_200+ A_202+ A_203+ 2*A_204+ A_205- A_215+ A_216+ 2*A_270 + A_271+ A_272+ A_273+ 0.85*A_274+ A_275+ 2*A_276+ 3*A_277+ A_278+ A_279+ A_280+ A_281+ A_282+ 2*A_283;
varDot(index,130) = 0.25*A_56+ A_58+ A_60+ A_61+ 2*A_62+ A_63+ 0.3*A_64- A_65- A_66+ 1.13875*A_70+ 0.75*A_75+ A_85+ A_86 + A_90+ A_91+ 0.57*A_94+ 0.8*A_99+ 0.98*A_102+ A_106+ 0.8*A_107+ 0.68*A_113+ 0.75*A_120+ 1.125*A_122+ 0.5 *A_123+ 0.58*A_132+ 0.956*A_136+ 1.25*A_137+ A_138- A_162+ A_163+ A_164- A_187+ A_189+ A_190+ A_207+ A_209 + A_210+ A_214+ A_215+ A_231- A_232- A_233+ A_239+ A_243+ A_245+ A_248+ 0.75*A_249+ A_255+ A_256;
varDot(index,131) = A_0- A_1- A_3- A_7- A_10+ A_14+ A_19+ A_20+ A_24- A_25+ A_27- A_142- A_159+ 0.1*A_166- A_175- A_181+ 2 *A_217+ A_219+ A_223+ A_224+ A_225+ A_234+ A_259+ A_271;
varDot(index,132) = A_174- A_175- 2*A_176- 2*A_177- A_179+ A_181- A_184- A_185+ A_186- A_189- A_190- A_199- A_200- A_201 - A_216- A_271+ 0.15*A_274;
varDot(index,133) = A_19+ 2*A_21- A_23- A_24+ A_25- A_28- A_31- A_32- A_44- A_45+ A_47+ A_50+ A_51+ A_52+ A_55- A_60- A_73 - A_82- A_98- A_102- A_106- A_115- A_120- A_127- A_136- A_156- A_184+ A_223- A_224+ A_226+ A_228;
varDot(index,134) = A_141- A_142- 2*A_143- 2*A_144- 2*A_145- 2*A_146+ 2*A_147+ A_150- A_152- A_153+ A_155- A_156- A_157 + A_158+ A_159- A_164+ A_165+ 0.46*A_166+ A_172+ A_173- A_199- A_200- A_201+ A_259+ A_264;
varDot(index,135) = A_23- A_25- A_26- A_27+ 2*A_28- A_29+ A_30+ A_32- A_33- A_34+ A_35+ A_36+ A_38+ A_39- A_46- A_47- A_52 + A_60+ A_61+ A_73+ A_74+ A_82- A_83+ A_84+ A_90+ A_91+ A_92+ 0.96*A_98+ 0.98*A_102+ A_106+ A_111+ 0.84 *A_115+ A_120- A_121+ 0.985*A_127+ A_129+ A_130+ A_131+ 0.956*A_136+ A_156- A_157+ A_158+ A_184- A_185 + A_186- A_223+ A_225+ A_227+ A_229+ 0.667*A_230+ A_239+ A_240+ A_246+ A_253+ A_254+ A_256+ A_262+ A_264 + A_273+ 0.15*A_274;
varDot(index,136) = A_26- A_28- A_29+ A_30- A_35+ A_37- A_61- A_66- A_74- A_78- A_84- A_96- A_134+ A_159+ A_160+ A_183- A_209 - A_225- A_226+ A_227+ 0.333*A_230+ A_263+ 0.85*A_274;
varDot(index,137) = A_4+ A_8- A_10- A_11- A_12- A_13- A_14- A_15- 2*A_16+ A_18- A_32- A_34- A_35+ A_38- A_42- A_43+ A_44 + A_55+ A_58- A_59+ A_60+ A_61+ 2*A_62+ A_65+ A_66+ A_68+ 0.13*A_70- A_72+ A_73+ A_74+ A_75- A_80- A_81 + A_85+ 0.82*A_87+ 0.26*A_94- A_97+ 0.96*A_98+ 0.8*A_99- A_101+ 0.98*A_102- A_105+ 0.3*A_107+ A_109+ 1.23 *A_113- A_114+ 0.56*A_115+ 0.32*A_117- A_119+ 0.75*A_120+ 0.875*A_122+ A_123- A_126+ 0.25*A_132- A_135 + 0.956*A_136+ A_137+ A_138- A_149- A_150+ A_151+ 0.94*A_152- A_153+ A_162+ A_164- A_178- A_179+ A_187 + A_190+ A_206+ A_208+ 0.4*A_210- A_213+ 0.667*A_230+ A_231+ A_233+ A_236+ A_237+ A_241+ A_243+ A_244 + A_246+ 0.67*A_247+ A_248+ 0.75*A_249+ 0.67*A_253+ A_255+ A_256;
varDot(index,138) = A_148+ A_149+ A_151+ 0.06*A_152- A_154+ A_161+ A_162+ A_163+ A_170+ A_171+ A_214- A_260- A_287- A_288 - A_290- A_294- A_296;
}
}
__device__ void ros_FunTimeDerivative(const double T, double roundoff, double * __restrict__ var, const double * __restrict__ fix,
const double * __restrict__ rconst, double *dFdT, double *Fcn0, int &Nfun,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
const double DELTAMIN = 1.0E-6;
double delta,one_over_delta;
delta = sqrt(roundoff)*fmax(DELTAMIN,fabs(T));
one_over_delta = 1.0/delta;
Fun(var, fix, rconst, dFdT, Nfun, VL_GLO);
for (int i=0; i < NVAR; i++){
dFdT(index,i) = (dFdT(index,i) - Fcn0(index,i)) * one_over_delta;
}
}
__device__ static int ros_Integrator(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T,
// Rosenbrock method coefficients
const int ros_S, const double * __restrict__ ros_M, const double * __restrict__ ros_E, const double * __restrict__ ros_A, const double * __restrict__ ros_C,
const double * __restrict__ ros_Alpha, const double * __restrict__ ros_Gamma, const double ros_ELO, const int * ros_NewF,
// Integration parameters
const int autonomous, const int vectorTol, const int Max_no_steps,
const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit,
const double FacMin, const double FacMax, const double FacRej, const double FacSafe,
// Status parameters
int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng,
// cuda global mem buffers
const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0,
double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// VL_GLO
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
double H, Hnew, HC, HG, Fac; // Tau - not used
double Err; //*varErr;
int direction;
int rejectLastH, rejectMoreH;
const double DELTAMIN = 1.0E-5;
// ~~~> Initial preparations
T = Tstart;
Hexit = 0.0;
H = fmin(Hstart,Hmax);
if (fabs(H) <= 10.0*roundoff)
H = DELTAMIN;
if (Tend >= Tstart)
{
direction = + 1;
}
else
{
direction = - 1;
}
rejectLastH=0;
rejectMoreH=0;
// ~~~> Time loop begins below
// TimeLoop:
while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO))
{
if (Nstp > Max_no_steps) // Too many steps
return -6;
// Step size too small
if (H <= roundoff){ // Step size too small
//if (((T+ 0.1*H) == T) || (H <= roundoff)) {
return -7;
}
// ~~~> Limit H if necessary to avoid going beyond Tend
Hexit = H;
H = fmin(H,fabs(Tend-T));
// ~~~> Compute the function at current time
Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write
// ~~~> Compute the function derivative with respect to T
if (!autonomous)
ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read
// ~~~> Compute the Jacobian at current time
Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ
// ~~~> Repeat step calculation until current step accepted
// UntilAccepted:
while(1)
{
ros_PrepareMatrix(H, direction, ros_Gamma[0], jac0, Ghimj, Nsng, Ndec, VL_GLO);
// ~~~> Compute the stages
// Stage:
for (int istage=0; istage < ros_S; istage++)
{
// For the 1st istage the function has been computed previously
if (istage == 0)
{
for (int i=0; i<NVAR; i++){
varNew(index,i) = Fcn0(index,i); // FCN0 Read
}
}
else if(ros_NewF[istage])
{
for (int i=0; i<NVAR; i++){
varNew(index,i) = var(index,i);
}
for (int j=0; j < (istage); j++){
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,j,i)*ros_A[(istage)*(istage-1)/2 + j] + varNew(index,i);
}
}
Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap
}
for (int i=0; i<NVAR; i++)
K(index,istage,i) = varNew(index,i);
for (int j=0; j<(istage); j++)
{
HC = ros_C[(istage)*(istage-1)/2 + j]/(direction*H);
for (int i=0; i<NVAR; i++){
double tmp = K(index,j,i);
K(index,istage,i) += tmp*HC;
}
}
if ((!autonomous) && (ros_Gamma[istage] ))
{
HG = direction*H*ros_Gamma[istage];
for (int i=0; i<NVAR; i++){
K(index,istage,i) += dFdT(index,i)*HG;
}
}
// R ,RW, RW, R, R
ros_Solve(Ghimj, K, Nsol, istage, ros_S);
} // Stage
// ~~~> Compute the new solution
for (int i=0; i<NVAR; i++){
double tmpNew = var(index,i); /// VAR READ
double tmpErr = ZERO;
for (int j=0; j<ros_S; j++){
double tmp = K(index,j,i);
#ifdef DEBUG
if (isnan(tmp)){
printf("Solver detected NAN!");
tmp = 0;
}
#endif
tmpNew += tmp*ros_M[j];
tmpErr += tmp*ros_E[j];
}
varNew(index,i) = tmpNew; // varNew is killed
varErr(index,i) = tmpErr;
}
Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ
// ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax
Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/ros_ELO)));
Hnew = H*Fac;
// ~~~> Check the error magnitude and adjust step size
Nstp = Nstp+ 1;
if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step
{
Nacc = Nacc + 1;
for (int j=0; j<NVAR ; j++)
var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read
T = T + direction*H;
Hnew = fmax(Hmin,fmin(Hnew,Hmax));
if (rejectLastH) // No step size increase after a rejected step
Hnew = fmin(Hnew,H);
rejectLastH = 0;
rejectMoreH = 0;
H = Hnew;
break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED
}
else // ~~~> Reject step
{
if (rejectMoreH)
Hnew = H*FacRej;
rejectMoreH = rejectLastH;
rejectLastH = 1;
H = Hnew;
if (Nacc >= 1)
Nrej += 1;
} // Err <= 1
} // UntilAccepted
} // TimeLoop
// ~~~> Succesful exit
return 0; // ~~~> The integration was successful
}
typedef struct {
double ros_A[15];
double ros_C[15];
int ros_NewF[8];
double ros_M[6];
double ros_E[6];
double ros_Alpha[6];
double ros_Gamma[6];
double ros_ELO;
int ros_S;
} ros_t;
/*
* Lookup tables for different ROS for branch elimination. It is much faster in GPU.
*/
__device__ __constant__ ros_t ros[5] = {
{
{.58578643762690495119831127579030,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-1.17157287525380990239662255158060,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,0,0,0,0,0,0}, /* ros_NewF */
{.87867965644035742679746691368545,.29289321881345247559915563789515,0,0,0,0}, /* ros_M */
{.29289321881345247559915563789515,.29289321881345247559915563789515,0,0,0,0}, /* ros_E */
{0,1.0,0,0,0,0}, /* ros_Alpha */
{1.70710678118654752440084436210485,-1.70710678118654752440084436210485,0,0,0,0}, /* ros_Gamma */
2.0, /* ros_ELO */
2, /* ros_S*/
}, /* Ros2 */
{
{1.0,1.0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-0.10156171083877702091975600115545E+01, 0.40759956452537699824805835358067E+01,0.92076794298330791242156818474003E+01,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,0,0,0,0,0,0}, /* ros_NewF */
{0.1E+01,0.61697947043828245592553615689730E+01,-0.42772256543218573326238373806514E+00,0,0,0}, /* ros_M */
{0.5E+00,- 0.29079558716805469821718236208017E+01,0.22354069897811569627360909276199E+00,0,0,0}, /* ros_E */
{0.0E+00,0.43586652150845899941601945119356E+00,0.43586652150845899941601945119356E+00,0,0,0}, /* ros_Alpha */
{0.43586652150845899941601945119356E+00,0.24291996454816804366592249683314E+00,0.21851380027664058511513169485832E+01,0,0,0}, /* ros_Gamma */
3.0, /* ros_ELO */
3
}, /* Ros3 */
{
{0.2000000000000000E+01, 0.1867943637803922E+01, 0.2344449711399156E+00, 0.1867943637803922E+01, 0.2344449711399156E+00,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-0.7137615036412310E+01,0.2580708087951457E+01,0.6515950076447975E+00, - 0.2137148994382534E+01, - 0.3214669691237626E+00, - 0.6949742501781779E+00 ,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,1,0,0,0,0,0}, /* ros_NewF */
{0.2255570073418735E+01, 0.2870493262186792E+00, 0.4353179431840180E+00, 0.1093502252409163E+01,0,0}, /* ros_M */
{ -0.2815431932141155E+00, -0.7276199124938920E-01, -0.1082196201495311E+00, -0.1093502252409163E+01, 0, 0}, /* ros_E */
{0.0, 0.1145640000000000E+01, 0.6552168638155900E+00, 0.6552168638155900E+00,0,0}, /* ros_Alpha */
{ 0.5728200000000000E+00, -0.1769193891319233E+01, 0.7592633437920482E+00, -0.1049021087100450E+00,0,0}, /* ros_Gamma */
4.0, /* ros_ELO */
4
}, /* Ros4 */
{
{ 0.0E+00, 2.0E+00, 0.0E+00, 2.0E+00, 0.0E+00, 1.0E+00, 0,0,0,0,0,0,0,0,0}, /* ros_A */
{ 4.0E+00, 1.0E+00, - 1.0E+00, 1.0E+00, - 1.0E+00, - 2.66666666666666666666666666666666, 0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,0,1,1,0,0,0,0}, /* ros_NewF */
{2.0,0,1.0,1.0,0,0}, /* ros_M */
{0,0,0,1.0,0,0}, /* ros_E */
{0,0,1.0,1.0,0,0}, /* ros_Alpha */
{0.5,1.5,0,0,0,0}, /* ros_Gamma */
3.0, /* ros_ELO */
4
}, /* Rodas3 */
{
{
0.1544000000000000E+01, 0.9466785280815826E+00, 0.2557011698983284E+00, 0.3314825187068521E+01,
0.2896124015972201E+01, 0.9986419139977817E+00, 0.1221224509226641E+01, 0.6019134481288629E+01,
0.1253708332932087E+02, -0.6878860361058950E+00, 0.1221224509226641E+01, 0.6019134481288629E+01,
0.1253708332932087E+02, -0.6878860361058950E+00, 1.0E+00}, /* ros_A */
{
-0.5668800000000000E+01, -0.2430093356833875E+01, -0.2063599157091915E+00, -0.1073529058151375E+00,
-0.9594562251023355E+01, -0.2047028614809616E+02, 0.7496443313967647E+01, -0.1024680431464352E+02,
-0.3399990352819905E+02, 0.1170890893206160E+02, 0.8083246795921522E+01, -0.7981132988064893E+01,
-0.3152159432874371E+02, 0.1631930543123136E+02, -0.6058818238834054E+01}, /* ros_C */
{1,1,1,1,1,1,0,0}, /* ros_NewF */
{0.1221224509226641E+01,0.6019134481288629E+01,0.1253708332932087E+02,- 0.6878860361058950E+00,1,1}, /* ros_M */
{0,0,0,0,0,1.0}, /* ros_E */
{0.000, 0.386, 0.210, 0.630, 1.000, 1.000}, /* ros_Alpha */
{0.2500000000000000E+00, -0.1043000000000000E+00, 0.1035000000000000E+00, 0.3620000000000023E-01, 0, 0}, /* ros_Gamma */
4.0, /* ros_ELO */
6
} /* Rodas4 */
};
//__device__ double rconst_local[MAX_VL_GLO*NREACT];
/* Initialize rconst local */
//__device__ double * rconst_local;
__device__ double k_3rd(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc)
/*
*
* temp temperature [K]
* cair air concentration [molecules/cm3]
* k0_300K low pressure limit at 300 K
* n exponent for low pressure limit
* kinf_300K high pressure limit at 300 K
* m exponent for high pressure limit
* fc broadening factor (usually fc=0.6)
*
*/
{
double zt_help, k0_T, kinf_T, k_ratio, k_3rd_r;
zt_help = 300.0/temp;
k0_T = k0_300K *pow(zt_help,n) *cair;
kinf_T = kinf_300K *pow(zt_help,m);
k_ratio = k0_T/kinf_T;
k_3rd_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio),2)));
return k_3rd_r;
}
__device__ double k_3rd_iupac(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc)
/*
*
* temp temperature [K]
* cair air concentration [molecules/cm3]
* k0_300K low pressure limit at 300 K
* n exponent for low pressure limit
* kinf_300K high pressure limit at 300 K
* m exponent for high pressure limit
* fc broadening factor (e.g. 0.45 or 0.6...)
* nu N
*
*/
{
double zt_help, k0_T, kinf_T, k_ratio, nu, k_3rd_iupac_r;
zt_help = 300.0/temp;
k0_T = k0_300K *pow(zt_help,n) *cair;
kinf_T = kinf_300K *pow(zt_help,m);
k_ratio = k0_T/kinf_T;
nu = 0.75- 1.27*log10(fc);
k_3rd_iupac_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio)/nu,2)));
return k_3rd_iupac_r;
}
double * temp_gpu;
double * press_gpu;
double * cair_gpu;
double * Ghimj;
double * K;
double * varNew;
double * Fcn0;
double * dFdT;
double * jac0;
double * varErr;
double * var;
double * fix;
double * rconst;
__device__ void update_rconst(const double * __restrict__ var,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx, double * __restrict__ rconst,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
/* Set local buffer */
{
const double temp_loc = temp_gpu[index];
const double press_loc = press_gpu[index];
const double cair_loc = cair_gpu[index];
double k_HO2_HO2, k_NO3_NO2, k_NO2_HO2, k_HNO3_OH, k_CH3OOH_OH, k_ClO_ClO, k_BrO_NO2, k_I_NO2, k_DMS_OH, k_CH2OO_SO2, k_O3s, beta_null_CH3NO3, beta_inf_CH3NO3, beta_CH3NO3, k_NO2_CH3O2, k_C6H5O2_NO2, k_CH2OO_NO2, beta_C2H5NO3, alpha_NO_HO2, beta_NO_HO2, k0_NO_HO2, k2d_NO_HO2, k1d_NO_HO2, k2w_NO_HO2, k1w_NO_HO2, k_PrO2_HO2, k_PrO2_NO, k_PrO2_CH3O2, G7402a_yield, k_CH3CO3_NO2, k_PAN_M, KRO2NO, KRO2HO2[12], KAPNO, KRO2NO3, KNO3AL, KAPHO2, k_CH3O2, k_RO2RCO3, k_RO2pRO2, k_RO2sRO2, k_RO2tRO2, k_RO2pORO2, k_RO2sORO2, k_RO2tORO2, k_RO2LISOPACO2, k_RO2ISOPBO2, k_RO2ISOPDO2, k_p, k_s, k_t, k_rohro, k_co2h, k_adp, k_ads, k_adt, KHSB, KHSD, K16HSZ14, K16HSZ41, K16HS, K15HSDHB, K14HSAL, K15HS24VYNAL, K15HS42VYNAL, KHYDEC, k_CH2CHOH_OH_HCOOH, k_CH2CHOH_OH_ALD, k_CH2CHOH_HCOOH, k_ALD_HCOOH, J_IC3H7NO3, J_ACETOL, J_HPALD, J_KETENE, RO2, k1_RO2RCO3, k1_RO2pRO2, k1_RO2sRO2, k1_RO2tRO2, k1_RO2pORO2, k1_RO2sORO2, k1_RO2tORO2, k1_RO2LISOPACO2, k1_RO2ISOPBO2, k1_RO2ISOPDO2;
k_HO2_HO2 = (3.0E-13 *exp(460. / temp_loc)+ 2.1E-33 *exp(920. / temp_loc) *cair_loc) * (1.+ 1.4E-21 *exp(2200. / temp_loc) *var(index,ind_H2O));
k_NO3_NO2 = k_3rd(temp_loc , cair_loc , 2.4E-30 , 3.0 , 1.6E-12 , - 0.1 , 0.6);
k_NO2_HO2 = k_3rd(temp_loc , cair_loc , 1.9E-31 , 3.4 , 4.0E-12 , 0.3 , 0.6);
k_HNO3_OH = 1.32E-14 *exp(527. / temp_loc) + 1. / (1. / (7.39E-32 *exp(453. / temp_loc) *cair_loc) + 1. / (9.73E-17 *exp(1910. / temp_loc)));
k_CH3OOH_OH = 5.3E-12 *exp(190. / temp_loc);
k_ClO_ClO = k_3rd(temp_loc , cair_loc , 1.9E-32 , 3.6 , 3.7E-12 , 1.6 , 0.6);
k_BrO_NO2 = k_3rd_iupac(temp_loc , cair_loc , 4.7E-31 , 3.1 , 1.8E-11 , 0.0 , 0.4);
k_I_NO2 = k_3rd_iupac(temp_loc , cair_loc , 3.0E-31 , 1.0 , 6.6E-11 , 0.0 , 0.63);
k_DMS_OH = 1.E-9 *exp(5820. / temp_loc) *var(index,ind_O2) / (1.E30+ 5. *exp(6280. / temp_loc) *var(index,ind_O2));
k_CH2OO_SO2 = 3.66E-11;
k_O3s = (1.7E-12 *exp(- 940. / temp_loc)) *var(index,ind_OH) + (1.E-14 *exp(- 490. / temp_loc)) *var(index,ind_HO2) + jx(index,ip_O1D) *2.2E-10 *var(index,ind_H2O) / (3.2E-11 *exp(70. / temp_loc) *var(index,ind_O2) + 1.8E-11 *exp(110. / temp_loc) *var(index,ind_N2) + 2.2E-10 *var(index,ind_H2O));
beta_null_CH3NO3 = 0.00295 + 5.15E-22 *cair_loc * pow(temp_loc / 298, 7.4);
beta_inf_CH3NO3 = 0.022;
beta_CH3NO3 = (beta_null_CH3NO3 *beta_inf_CH3NO3) / (beta_null_CH3NO3 + beta_inf_CH3NO3) / 10.;
k_NO2_CH3O2 = k_3rd(temp_loc , cair_loc , 1.0E-30 , 4.8 , 7.2E-12 , 2.1 , 0.6);
k_C6H5O2_NO2 = k_NO2_CH3O2;
k_CH2OO_NO2 = 4.25E-12;
beta_C2H5NO3 = (1- 1 / (1+ 1.E-2 *(3.88e-3 *cair_loc / 2.46e19 *760.+ .365) *(1+ 1500. *(1 / temp_loc - 1 / 298.))));
alpha_NO_HO2 = var(index,ind_H2O) *6.6E-27 *temp_loc *exp(3700. / temp_loc);
beta_NO_HO2 = max(((530. / temp_loc)+ (press_loc *4.8004E-6)- 1.73) *0.01 , 0.);
k0_NO_HO2 = 3.5E-12 *exp(250. / temp_loc);
k2d_NO_HO2 = (beta_NO_HO2 *k0_NO_HO2) / (1.+ beta_NO_HO2);
k1d_NO_HO2 = k0_NO_HO2 - k2d_NO_HO2;
k2w_NO_HO2 = (beta_NO_HO2 *k0_NO_HO2 *(1.+ 42. *alpha_NO_HO2))/ ((1.+ alpha_NO_HO2) *(1.+ beta_NO_HO2));
k1w_NO_HO2 = k0_NO_HO2 - k2w_NO_HO2;
k_PrO2_HO2 = 1.9E-13 *exp(1300. / temp_loc);
k_PrO2_NO = 2.7E-12 *exp(360. / temp_loc);
k_PrO2_CH3O2 = 9.46E-14 *exp(431. / temp_loc);
G7402a_yield = 0.8 / 1.1;
k_CH3CO3_NO2 = k_3rd(temp_loc , cair_loc , 9.7E-29 , 5.6 , 9.3E-12 , 1.5 , 0.6);
k_PAN_M = k_CH3CO3_NO2 / (9.0E-29 *exp(14000. / temp_loc));
KRO2NO = 2.54E-12 *exp(360. / temp_loc);
/*KRO2HO2(:) = 2.91E-13 *exp(1300. / temp_loc) *(1.- exp(- 0.245 *(nC(:))));*/
for (int ii=0;ii<12;ii++) {
KRO2HO2[ii] = 2.91E-13 *exp(1300. / temp_loc) * (1.- exp(- 0.245 *float(ii+1)));
}
KAPNO = 8.10E-12 *exp(270. / temp_loc);
KRO2NO3 = 2.50E-12;
KNO3AL = 1.4E-12 *exp(- 1900. / temp_loc);
KAPHO2 = 5.20E-13 *exp(980. / temp_loc) *1.865;
k_CH3O2 = 1.03E-13 *exp(365. / temp_loc);
k_RO2RCO3 = 2. *2.E-12 *exp(500. / temp_loc);
k_RO2pRO2 = 2. * pow(1.E-12 *k_CH3O2, .5);
k_RO2sRO2 = 2. * pow(1.6E-12 *exp(- 2200. / temp_loc) *k_CH3O2, .5);
k_RO2tRO2 = 2. *3.8E-13 *exp(- 1430. / temp_loc);
k_RO2pORO2 = 2. *7.5E-13 *exp(500. / temp_loc);
k_RO2sORO2 = 2. * pow(7.7E-15 *exp(1330. / temp_loc) *k_CH3O2, .5);
k_RO2tORO2 = 2. * pow(4.7E-13 *exp(- 1420. / temp_loc) *k_CH3O2, .5);
k_RO2LISOPACO2 = 2. * pow((2.8E-12+ 3.9E-12) / 2. *k_CH3O2, .5);
k_RO2ISOPBO2 = 2. * pow(6.9E-14 *k_CH3O2, .5);
k_RO2ISOPDO2 = 2. * pow(4.8E-12 *k_CH3O2, .5);
k_p = 4.49E-18 *temp_loc *temp_loc *exp(- 320. / temp_loc);
k_s = 4.50E-18 *temp_loc *temp_loc *exp(253. / temp_loc);
k_t = 2.12E-18 *temp_loc *temp_loc *exp(696. / temp_loc);
k_rohro = 2.1E-18 *temp_loc *temp_loc *exp(- 85. / temp_loc);
k_co2h = .7 *4.2E-14 *exp(850. / temp_loc);
k_adp = 4.5E-12 * pow(temp_loc / 300., - 0.85);
k_ads = .25 *(1.1E-11 *exp(485. / temp_loc)+ 1.0E-11 *exp(553. / temp_loc));
k_adt = 1.922E-11 *exp(450. / temp_loc) - k_ads;
KHSB = 1.52E11 *exp(- 9512. / temp_loc) *1.;
KHSD = 6.08E10 *exp(- 8893. / temp_loc) *1.;
K16HSZ14 = 2.28E9 *exp(- 6764 / temp_loc) *0.28;
K16HSZ41 = 1.23E9 *exp(- 6186 / temp_loc) *0.28;
K16HS = pow(K16HSZ14 *K16HSZ41, .5);
K15HSDHB = 5.;
K14HSAL = 2.9E7 *exp(- 1 *(5297+ 705) / temp_loc);
K15HS24VYNAL = K16HSZ14 *exp(- 3500 / (1.987 *temp_loc));
K15HS42VYNAL = K16HSZ41 *exp(- 3500 / (1.987 *temp_loc));
KHYDEC = 6.e14 *exp(- 16000. / (1.98588 *temp_loc));
k_CH2CHOH_OH_HCOOH = 4.3E-11;
k_CH2CHOH_OH_ALD = 2.4E-11;
k_CH2CHOH_HCOOH = 4.67E-26 * pow(temp_loc, 3.286 *exp(4509. / (1.987 *temp_loc)));
k_ALD_HCOOH = 1.17E-19 * pow(temp_loc, 1.209 *exp(- 556. / (1.987 *temp_loc)));
J_IC3H7NO3 = 3.7 *jx(index,ip_PAN);
J_ACETOL = 0.65 *0.11 *jx(index,ip_CHOH);
J_HPALD = (jx(index,ip_CH3OOH)+ jx(index,ip_MACR) / (2. *1.95E-3));
J_KETENE = jx(index,ip_MVK) / 0.004;
RO2 = 0.;
if (ind_LISOPACO2>0) RO2 = RO2 + var(index,ind_LISOPACO2);
if (ind_LDISOPACO2>0) RO2 = RO2 + var(index,ind_LDISOPACO2);
if (ind_ISOPBO2>0) RO2 = RO2 + var(index,ind_ISOPBO2);
if (ind_ISOPDO2>0) RO2 = RO2 + var(index,ind_ISOPDO2);
if (ind_LISOPEFO2>0) RO2 = RO2 + var(index,ind_LISOPEFO2);
if (ind_NISOPO2>0) RO2 = RO2 + var(index,ind_NISOPO2);
if (ind_LHC4ACCO3>0) RO2 = RO2 + var(index,ind_LHC4ACCO3);
if (ind_LC578O2>0) RO2 = RO2 + var(index,ind_LC578O2);
if (ind_C59O2>0) RO2 = RO2 + var(index,ind_C59O2);
if (ind_LNISO3>0) RO2 = RO2 + var(index,ind_LNISO3);
if (ind_CH3O2>0) RO2 = RO2 + var(index,ind_CH3O2);
if (ind_HOCH2O2>0) RO2 = RO2 + var(index,ind_HOCH2O2);
if (ind_CH3CO3>0) RO2 = RO2 + var(index,ind_CH3CO3);
if (ind_C2H5O2>0) RO2 = RO2 + var(index,ind_C2H5O2);
if (ind_HOCH2CO3>0) RO2 = RO2 + var(index,ind_HOCH2CO3);
if (ind_HYPROPO2>0) RO2 = RO2 + var(index,ind_HYPROPO2);
if (ind_LBUT1ENO2>0) RO2 = RO2 + var(index,ind_LBUT1ENO2);
if (ind_BUT2OLO2>0) RO2 = RO2 + var(index,ind_BUT2OLO2);
if (ind_HCOCO3>0) RO2 = RO2 + var(index,ind_HCOCO3);
if (ind_CO2H3CO3>0) RO2 = RO2 + var(index,ind_CO2H3CO3);
if (ind_LHMVKABO2>0) RO2 = RO2 + var(index,ind_LHMVKABO2);
if (ind_MACO3>0) RO2 = RO2 + var(index,ind_MACO3);
if (ind_MACRO2>0) RO2 = RO2 + var(index,ind_MACRO2);
if (ind_PRONO3BO2>0) RO2 = RO2 + var(index,ind_PRONO3BO2);
if (ind_HOCH2CH2O2>0) RO2 = RO2 + var(index,ind_HOCH2CH2O2);
if (ind_CH3COCH2O2>0) RO2 = RO2 + var(index,ind_CH3COCH2O2);
if (ind_IC3H7O2>0) RO2 = RO2 + var(index,ind_IC3H7O2);
if (ind_NC3H7O2>0) RO2 = RO2 + var(index,ind_NC3H7O2);
if (ind_LC4H9O2>0) RO2 = RO2 + var(index,ind_LC4H9O2);
if (ind_TC4H9O2>0) RO2 = RO2 + var(index,ind_TC4H9O2);
if (ind_LMEKO2>0) RO2 = RO2 + var(index,ind_LMEKO2);
if (ind_HCOCH2O2>0) RO2 = RO2 + var(index,ind_HCOCH2O2);
if (ind_EZCH3CO2CHCHO>0) RO2 = RO2 + var(index,ind_EZCH3CO2CHCHO);
if (ind_EZCHOCCH3CHO2>0) RO2 = RO2 + var(index,ind_EZCHOCCH3CHO2);
if (ind_CH3COCHO2CHO>0) RO2 = RO2 + var(index,ind_CH3COCHO2CHO);
if (ind_HCOCO2CH3CHO>0) RO2 = RO2 + var(index,ind_HCOCO2CH3CHO);
if (ind_C1ODC3O2C4OOH>0) RO2 = RO2 + var(index,ind_C1ODC3O2C4OOH);
if (ind_C1OOHC2O2C4OD>0) RO2 = RO2 + var(index,ind_C1OOHC2O2C4OD);
if (ind_C1ODC2O2C4OD>0) RO2 = RO2 + var(index,ind_C1ODC2O2C4OD);
if (ind_ISOPBDNO3O2>0) RO2 = RO2 + var(index,ind_ISOPBDNO3O2);
if (ind_LISOPACNO3O2>0) RO2 = RO2 + var(index,ind_LISOPACNO3O2);
if (ind_DB1O2>0) RO2 = RO2 + var(index,ind_DB1O2);
if (ind_DB2O2>0) RO2 = RO2 + var(index,ind_DB2O2);
if (ind_LME3FURANO2>0) RO2 = RO2 + var(index,ind_LME3FURANO2);
if (ind_NO3CH2CO3>0) RO2 = RO2 + var(index,ind_NO3CH2CO3);
if (ind_CH3COCO3>0) RO2 = RO2 + var(index,ind_CH3COCO3);
if (ind_ZCO3C23DBCOD>0) RO2 = RO2 + var(index,ind_ZCO3C23DBCOD);
if (ind_IBUTOLBO2>0) RO2 = RO2 + var(index,ind_IBUTOLBO2);
if (ind_IPRCO3>0) RO2 = RO2 + var(index,ind_IPRCO3);
if (ind_IC4H9O2>0) RO2 = RO2 + var(index,ind_IC4H9O2);
if (ind_LMBOABO2>0) RO2 = RO2 + var(index,ind_LMBOABO2);
if (ind_IPRHOCO3>0) RO2 = RO2 + var(index,ind_IPRHOCO3);
if (ind_LNMBOABO2>0) RO2 = RO2 + var(index,ind_LNMBOABO2);
if (ind_NC4OHCO3>0) RO2 = RO2 + var(index,ind_NC4OHCO3);
if (ind_LAPINABO2>0) RO2 = RO2 + var(index,ind_LAPINABO2);
if (ind_C96O2>0) RO2 = RO2 + var(index,ind_C96O2);
if (ind_C97O2>0) RO2 = RO2 + var(index,ind_C97O2);
if (ind_C98O2>0) RO2 = RO2 + var(index,ind_C98O2);
if (ind_C85O2>0) RO2 = RO2 + var(index,ind_C85O2);
if (ind_C86O2>0) RO2 = RO2 + var(index,ind_C86O2);
if (ind_PINALO2>0) RO2 = RO2 + var(index,ind_PINALO2);
if (ind_C96CO3>0) RO2 = RO2 + var(index,ind_C96CO3);
if (ind_C89CO3>0) RO2 = RO2 + var(index,ind_C89CO3);
if (ind_C85CO3>0) RO2 = RO2 + var(index,ind_C85CO3);
if (ind_OHMENTHEN6ONEO2>0) RO2 = RO2 + var(index,ind_OHMENTHEN6ONEO2);
if (ind_C511O2>0) RO2 = RO2 + var(index,ind_C511O2);
if (ind_C106O2>0) RO2 = RO2 + var(index,ind_C106O2);
if (ind_CO235C6CO3>0) RO2 = RO2 + var(index,ind_CO235C6CO3);
if (ind_CHOC3COCO3>0) RO2 = RO2 + var(index,ind_CHOC3COCO3);
if (ind_CO235C6O2>0) RO2 = RO2 + var(index,ind_CO235C6O2);
if (ind_C716O2>0) RO2 = RO2 + var(index,ind_C716O2);
if (ind_C614O2>0) RO2 = RO2 + var(index,ind_C614O2);
if (ind_HCOCH2CO3>0) RO2 = RO2 + var(index,ind_HCOCH2CO3);
if (ind_BIACETO2>0) RO2 = RO2 + var(index,ind_BIACETO2);
if (ind_CO23C4CO3>0) RO2 = RO2 + var(index,ind_CO23C4CO3);
if (ind_C109O2>0) RO2 = RO2 + var(index,ind_C109O2);
if (ind_C811CO3>0) RO2 = RO2 + var(index,ind_C811CO3);
if (ind_C89O2>0) RO2 = RO2 + var(index,ind_C89O2);
if (ind_C812O2>0) RO2 = RO2 + var(index,ind_C812O2);
if (ind_C813O2>0) RO2 = RO2 + var(index,ind_C813O2);
if (ind_C721CO3>0) RO2 = RO2 + var(index,ind_C721CO3);
if (ind_C721O2>0) RO2 = RO2 + var(index,ind_C721O2);
if (ind_C722O2>0) RO2 = RO2 + var(index,ind_C722O2);
if (ind_C44O2>0) RO2 = RO2 + var(index,ind_C44O2);
if (ind_C512O2>0) RO2 = RO2 + var(index,ind_C512O2);
if (ind_C513O2>0) RO2 = RO2 + var(index,ind_C513O2);
if (ind_CHOC3COO2>0) RO2 = RO2 + var(index,ind_CHOC3COO2);
if (ind_C312COCO3>0) RO2 = RO2 + var(index,ind_C312COCO3);
if (ind_HOC2H4CO3>0) RO2 = RO2 + var(index,ind_HOC2H4CO3);
if (ind_LNAPINABO2>0) RO2 = RO2 + var(index,ind_LNAPINABO2);
if (ind_C810O2>0) RO2 = RO2 + var(index,ind_C810O2);
if (ind_C514O2>0) RO2 = RO2 + var(index,ind_C514O2);
if (ind_CHOCOCH2O2>0) RO2 = RO2 + var(index,ind_CHOCOCH2O2);
if (ind_ROO6R1O2>0) RO2 = RO2 + var(index,ind_ROO6R1O2);
if (ind_ROO6R3O2>0) RO2 = RO2 + var(index,ind_ROO6R3O2);
if (ind_RO6R1O2>0) RO2 = RO2 + var(index,ind_RO6R1O2);
if (ind_RO6R3O2>0) RO2 = RO2 + var(index,ind_RO6R3O2);
if (ind_BPINAO2>0) RO2 = RO2 + var(index,ind_BPINAO2);
if (ind_C8BCO2>0) RO2 = RO2 + var(index,ind_C8BCO2);
if (ind_NOPINDO2>0) RO2 = RO2 + var(index,ind_NOPINDO2);
if (ind_LNBPINABO2>0) RO2 = RO2 + var(index,ind_LNBPINABO2);
if (ind_BZBIPERO2>0) RO2 = RO2 + var(index,ind_BZBIPERO2);
if (ind_C6H5CH2O2>0) RO2 = RO2 + var(index,ind_C6H5CH2O2);
if (ind_TLBIPERO2>0) RO2 = RO2 + var(index,ind_TLBIPERO2);
if (ind_BZEMUCCO3>0) RO2 = RO2 + var(index,ind_BZEMUCCO3);
if (ind_BZEMUCO2>0) RO2 = RO2 + var(index,ind_BZEMUCO2);
if (ind_C5DIALO2>0) RO2 = RO2 + var(index,ind_C5DIALO2);
if (ind_NPHENO2>0) RO2 = RO2 + var(index,ind_NPHENO2);
if (ind_PHENO2>0) RO2 = RO2 + var(index,ind_PHENO2);
if (ind_CRESO2>0) RO2 = RO2 + var(index,ind_CRESO2);
if (ind_NCRESO2>0) RO2 = RO2 + var(index,ind_NCRESO2);
if (ind_TLEMUCCO3>0) RO2 = RO2 + var(index,ind_TLEMUCCO3);
if (ind_TLEMUCO2>0) RO2 = RO2 + var(index,ind_TLEMUCO2);
if (ind_C615CO2O2>0) RO2 = RO2 + var(index,ind_C615CO2O2);
if (ind_MALDIALCO3>0) RO2 = RO2 + var(index,ind_MALDIALCO3);
if (ind_EPXDLCO3>0) RO2 = RO2 + var(index,ind_EPXDLCO3);
if (ind_C3DIALO2>0) RO2 = RO2 + var(index,ind_C3DIALO2);
if (ind_MALDIALO2>0) RO2 = RO2 + var(index,ind_MALDIALO2);
if (ind_C6H5O2>0) RO2 = RO2 + var(index,ind_C6H5O2);
if (ind_C6H5CO3>0) RO2 = RO2 + var(index,ind_C6H5CO3);
if (ind_OXYL1O2>0) RO2 = RO2 + var(index,ind_OXYL1O2);
if (ind_C5CO14O2>0) RO2 = RO2 + var(index,ind_C5CO14O2);
if (ind_NBZFUO2>0) RO2 = RO2 + var(index,ind_NBZFUO2);
if (ind_BZFUO2>0) RO2 = RO2 + var(index,ind_BZFUO2);
if (ind_HCOCOHCO3>0) RO2 = RO2 + var(index,ind_HCOCOHCO3);
if (ind_CATEC1O2>0) RO2 = RO2 + var(index,ind_CATEC1O2);
if (ind_MCATEC1O2>0) RO2 = RO2 + var(index,ind_MCATEC1O2);
if (ind_C5DICARBO2>0) RO2 = RO2 + var(index,ind_C5DICARBO2);
if (ind_NTLFUO2>0) RO2 = RO2 + var(index,ind_NTLFUO2);
if (ind_TLFUO2>0) RO2 = RO2 + var(index,ind_TLFUO2);
if (ind_NPHEN1O2>0) RO2 = RO2 + var(index,ind_NPHEN1O2);
if (ind_NNCATECO2>0) RO2 = RO2 + var(index,ind_NNCATECO2);
if (ind_NCATECO2>0) RO2 = RO2 + var(index,ind_NCATECO2);
if (ind_NBZQO2>0) RO2 = RO2 + var(index,ind_NBZQO2);
if (ind_PBZQO2>0) RO2 = RO2 + var(index,ind_PBZQO2);
if (ind_NPTLQO2>0) RO2 = RO2 + var(index,ind_NPTLQO2);
if (ind_PTLQO2>0) RO2 = RO2 + var(index,ind_PTLQO2);
if (ind_NCRES1O2>0) RO2 = RO2 + var(index,ind_NCRES1O2);
if (ind_MNNCATECO2>0) RO2 = RO2 + var(index,ind_MNNCATECO2);
if (ind_MNCATECO2>0) RO2 = RO2 + var(index,ind_MNCATECO2);
if (ind_MECOACETO2>0) RO2 = RO2 + var(index,ind_MECOACETO2);
if (ind_CO2H3CO3>0) RO2 = RO2 + var(index,ind_CO2H3CO3);
if (ind_MALANHYO2>0) RO2 = RO2 + var(index,ind_MALANHYO2);
if (ind_NDNPHENO2>0) RO2 = RO2 + var(index,ind_NDNPHENO2);
if (ind_DNPHENO2>0) RO2 = RO2 + var(index,ind_DNPHENO2);
if (ind_NDNCRESO2>0) RO2 = RO2 + var(index,ind_NDNCRESO2);
if (ind_DNCRESO2>0) RO2 = RO2 + var(index,ind_DNCRESO2);
if (ind_C5CO2OHCO3>0) RO2 = RO2 + var(index,ind_C5CO2OHCO3);
if (ind_C6CO2OHCO3>0) RO2 = RO2 + var(index,ind_C6CO2OHCO3);
if (ind_MMALANHYO2>0) RO2 = RO2 + var(index,ind_MMALANHYO2);
if (ind_ACCOMECO3>0) RO2 = RO2 + var(index,ind_ACCOMECO3);
if (ind_C4CO2DBCO3>0) RO2 = RO2 + var(index,ind_C4CO2DBCO3);
if (ind_C5CO2DBCO3>0) RO2 = RO2 + var(index,ind_C5CO2DBCO3);
if (ind_NSTYRENO2>0) RO2 = RO2 + var(index,ind_NSTYRENO2);
if (ind_STYRENO2>0) RO2 = RO2 + var(index,ind_STYRENO2);
k1_RO2RCO3 = RO2 *k_RO2RCO3;
k1_RO2pRO2 = RO2 *k_RO2pRO2;
k1_RO2sRO2 = RO2 *k_RO2sRO2;
k1_RO2tRO2 = RO2 *k_RO2tRO2;
k1_RO2pORO2 = RO2 *k_RO2pORO2;
k1_RO2sORO2 = RO2 *k_RO2sORO2;
k1_RO2tORO2 = RO2 *k_RO2tORO2;
k1_RO2LISOPACO2 = RO2 *k_RO2LISOPACO2;
k1_RO2ISOPBO2 = RO2 *k_RO2ISOPBO2;
k1_RO2ISOPDO2 = RO2 *k_RO2ISOPDO2;
rconst(index,0) = (3.3E-11 *exp(55. / temp_loc));
rconst(index,1) = (6.0E-34 *( pow(temp_loc / 300., - 2.4) )*cair_loc);
rconst(index,3) = (8.0E-12 *exp(- 2060. / temp_loc));
rconst(index,4) = (k_3rd(temp_loc , cair_loc , 4.4E-32 , 1.3 , 7.5E-11 , - 0.2 , 0.6));
rconst(index,5) = (1.4E-10 *exp(- 470. / temp_loc));
rconst(index,7) = (1.8E-11 *exp(180. / temp_loc));
rconst(index,8) = (1.7E-12 *exp(- 940. / temp_loc));
rconst(index,9) = (2.8E-12 *exp(- 1800. / temp_loc));
rconst(index,10) = (3.E-11 *exp(200. / temp_loc));
rconst(index,11) = (1.E-14 *exp(- 490. / temp_loc));
rconst(index,15) = (4.8E-11 *exp(250. / temp_loc));
rconst(index,16) = (k_HO2_HO2);
rconst(index,17) = (1.63E-10 *exp(60. / temp_loc));
rconst(index,19) = (1.5E-11 *exp(- 3600. / temp_loc));
rconst(index,20) = (2.15E-11 *exp(110. / temp_loc));
rconst(index,21) = (7.259E-11 *exp(20. / temp_loc));
rconst(index,22) = (4.641E-11 *exp(20. / temp_loc));
rconst(index,23) = (3.0E-12 *exp(- 1500. / temp_loc));
rconst(index,24) = (2.1E-11 *exp(100. / temp_loc));
rconst(index,25) = (5.1E-12 *exp(210. / temp_loc));
rconst(index,26) = (1.2E-13 *exp(- 2450. / temp_loc));
rconst(index,27) = (5.8E-12 *exp(220. / temp_loc));
rconst(index,28) = (1.5E-11 *exp(170. / temp_loc));
rconst(index,29) = (k_NO3_NO2);
rconst(index,30) = (k_NO3_NO2 / (5.8E-27 *exp(10840. / temp_loc)));
rconst(index,31) = (k_3rd(temp_loc , cair_loc , 7.0E-31 , 2.6 , 3.6E-11 , 0.1 , 0.6));
rconst(index,32) = (3.3E-12 *exp(270. / temp_loc));
rconst(index,33) = (k_3rd(temp_loc , cair_loc , 1.8E-30 , 3.0 , 2.8E-11 , 0. , 0.6));
rconst(index,34) = (k_NO2_HO2);
rconst(index,36) = (1.8E-11 *exp(- 390. / temp_loc));
rconst(index,37) = (k_HNO3_OH);
rconst(index,38) = (k_NO2_HO2 / (2.1E-27 *exp(10900. / temp_loc)));
rconst(index,39) = (1.3E-12 *exp(380. / temp_loc));
rconst(index,40) = (1.7E-12 *exp(- 710. / temp_loc));
rconst(index,41) = (4.3E-12 *exp(- 930. / temp_loc));
rconst(index,42) = (4.8E-07 *exp(- 628. / temp_loc) * pow(temp_loc, - 1.32) );
rconst(index,43) = (9.4E-09 *exp(- 356. / temp_loc) * pow(temp_loc, - 1.12) );
rconst(index,44) = (1.92E-12 *( pow(temp_loc / 298., - 1.5) ));
rconst(index,45) = (1.41E-11 *( pow(temp_loc / 298., - 1.5) ));
rconst(index,46) = (1.2E-11 *( pow(temp_loc / 298., - 2.0) ));
rconst(index,47) = (0.8E-11 *( pow(temp_loc / 298., - 2.0) ));
rconst(index,50) = (8.0E-11 *exp(- 500. / temp_loc));
rconst(index,51) = (1.66E-12 *exp(- 1500. / temp_loc));
rconst(index,52) = (1.0E-12 *exp(- 1000. / temp_loc));
rconst(index,54) = (4.13E-11 *exp(- 2138. / temp_loc));
rconst(index,55) = (3.65E-14 *exp(- 4600. / temp_loc));
rconst(index,57) = (1.85E-20 *exp(2.82 *log(temp_loc)- 987. / temp_loc));
rconst(index,58) = (2.9E-12 *exp(- 345. / temp_loc));
rconst(index,59) = (4.1E-13 *exp(750. / temp_loc));
rconst(index,60) = (2.8E-12 *exp(300. / temp_loc));
rconst(index,62) = (9.5E-14 *exp(390. / temp_loc) / (1.+ 1. / 26.2 *exp(1130. / temp_loc)));
rconst(index,63) = (9.5E-14 *exp(390. / temp_loc) / (1.+ 26.2 *exp(- 1130. / temp_loc)));
rconst(index,64) = (k_CH3OOH_OH);
rconst(index,65) = (9.52E-18 *exp(2.03 *log(temp_loc)+ 636. / temp_loc));
rconst(index,66) = (3.4E-13 *exp(- 1900. / temp_loc));
rconst(index,67) = ((1.57E-13+ cair_loc *3.54E-33));
rconst(index,69) = (1.49E-17 *temp_loc *temp_loc *exp(- 499. / temp_loc));
rconst(index,70) = (1.2E-14 *exp(- 2630. / temp_loc));
rconst(index,71) = (k_3rd(temp_loc , cair_loc , 1.0E-28 , 4.5 , 7.5E-12 , 0.85 , 0.6));
rconst(index,72) = (7.5E-13 *exp(700. / temp_loc));
rconst(index,73) = (2.6E-12 *exp(365. / temp_loc));
rconst(index,75) = (1.6E-13 *exp(195. / temp_loc));
rconst(index,76) = (k_CH3OOH_OH);
rconst(index,77) = (4.4E-12 *exp(365. / temp_loc));
rconst(index,78) = (1.4E-12 *exp(- 1900. / temp_loc));
rconst(index,79) = (4.2E-14 *exp(855. / temp_loc));
rconst(index,80) = (4.3E-13 *exp(1040. / temp_loc) / (1.+ 1. / 37. *exp(660. / temp_loc)));
rconst(index,81) = (4.3E-13 *exp(1040. / temp_loc) / (1.+ 37. *exp(- 660. / temp_loc)));
rconst(index,82) = (8.1E-12 *exp(270. / temp_loc));
rconst(index,83) = (k_CH3CO3_NO2);
rconst(index,85) = (0.9 *2.0E-12 *exp(500. / temp_loc));
rconst(index,86) = (0.1 *2.0E-12 *exp(500. / temp_loc));
rconst(index,87) = (4.9E-12 *exp(211. / temp_loc));
rconst(index,88) = (2.5E-12 *exp(500. / temp_loc));
rconst(index,89) = (0.6 *k_CH3OOH_OH);
rconst(index,90) = (5.6E-12 *exp(270. / temp_loc));
rconst(index,91) = (9.50E-13 *exp(- 650. / temp_loc));
rconst(index,92) = (k_PAN_M);
rconst(index,93) = (1.65E-17 *temp_loc *temp_loc *exp(- 87. / temp_loc));
rconst(index,94) = (6.5E-15 *exp(- 1900. / temp_loc));
rconst(index,95) = (k_3rd(temp_loc , cair_loc , 8.E-27 , 3.5 , 3.E-11 , 0. , 0.5));
rconst(index,96) = (4.6E-13 *exp(- 1155. / temp_loc));
rconst(index,97) = (k_PrO2_HO2);
rconst(index,98) = (k_PrO2_NO);
rconst(index,99) = (k_PrO2_CH3O2);
rconst(index,100) = (k_CH3OOH_OH);
rconst(index,101) = (6.5E-13 *exp(650. / temp_loc));
rconst(index,102) = (4.2E-12 *exp(180. / temp_loc));
rconst(index,103) = (3.8E-12 *exp(200. / temp_loc));
rconst(index,104) = (1.33E-13+ 3.82E-11 *exp(- 2000. / temp_loc));
rconst(index,105) = (8.6E-13 *exp(700. / temp_loc));
rconst(index,106) = (2.9E-12 *exp(300. / temp_loc));
rconst(index,107) = (7.5E-13 *exp(500. / temp_loc));
rconst(index,108) = (k_CH3OOH_OH);
rconst(index,109) = (2.15E-12 *exp(305. / temp_loc));
rconst(index,110) = (8.4E-13 *exp(830. / temp_loc));
rconst(index,111) = (6.2E-13 *exp(- 230. / temp_loc));
rconst(index,112) = (1.81E-17 *temp_loc *temp_loc *exp(114. / temp_loc));
rconst(index,113) = (k_PrO2_CH3O2);
rconst(index,114) = (k_PrO2_HO2);
rconst(index,115) = (k_PrO2_NO);
rconst(index,116) = (k_CH3OOH_OH);
rconst(index,117) = (.5 *(1.36E-15 *exp(- 2112. / temp_loc)+ 7.51E-16 *exp(- 1521. / temp_loc)));
rconst(index,118) = (.5 *(4.1E-12 *exp(452. / temp_loc)+ 1.9E-11 *exp(175. / temp_loc)));
rconst(index,119) = (1.82E-13 *exp(1300. / temp_loc));
rconst(index,120) = (2.54E-12 *exp(360. / temp_loc));
rconst(index,121) = (.25 *k_3rd(temp_loc , cair_loc , 9.7E-29 , 5.6 , 9.3E-12 , 1.5 , 0.6));
rconst(index,125) = (1.3E-12 *exp(- 25. / temp_loc));
rconst(index,126) = (k_PrO2_HO2);
rconst(index,127) = (k_PrO2_NO);
rconst(index,128) = (k_CH3OOH_OH);
rconst(index,131) = (k_PAN_M);
rconst(index,132) = (7.86E-15 *exp(- 1913. / temp_loc));
rconst(index,133) = (2.54E-11 *exp(410. / temp_loc));
rconst(index,134) = (3.03E-12 *exp(- 446. / temp_loc));
rconst(index,135) = (2.22E-13 *exp(1300. / temp_loc));
rconst(index,136) = (2.54E-12 *exp(360. / temp_loc));
rconst(index,141) = (2.8E-11 *exp(- 250. / temp_loc));
rconst(index,142) = (2.5E-11 *exp(110. / temp_loc));
rconst(index,143) = (1.0E-12 *exp(- 1590. / temp_loc));
rconst(index,144) = (3.0E-11 *exp(- 2450. / temp_loc));
rconst(index,145) = (3.5E-13 *exp(- 1370. / temp_loc));
rconst(index,146) = (k_ClO_ClO);
rconst(index,147) = (k_ClO_ClO / (2.16E-27 *exp(8537. / temp_loc)));
rconst(index,148) = (3.9E-11 *exp(- 2310. / temp_loc));
rconst(index,149) = (4.4E-11- 7.5E-11 *exp(- 620. / temp_loc));
rconst(index,150) = (7.5E-11 *exp(- 620. / temp_loc));
rconst(index,151) = (1.1E-11 *exp(- 980. / temp_loc));
rconst(index,152) = (7.3E-12 *exp(300. / temp_loc));
rconst(index,153) = (2.2E-12 *exp(340. / temp_loc));
rconst(index,154) = (1.7E-12 *exp(- 230. / temp_loc));
rconst(index,155) = (3.0E-12 *exp(- 500. / temp_loc));
rconst(index,156) = (6.2E-12 *exp(295. / temp_loc));
rconst(index,157) = (k_3rd_iupac(temp_loc , cair_loc , 1.6E-31 , 3.4 , 7.E-11 , 0. , 0.4));
rconst(index,158) = (6.918E-7 *exp(- 10909. / temp_loc) *cair_loc);
rconst(index,159) = (4.5E-12 *exp(- 900. / temp_loc));
rconst(index,160) = (6.2E-12 *exp(145. / temp_loc));
rconst(index,161) = (6.6E-12 *exp(- 1240. / temp_loc));
rconst(index,162) = (8.1E-11 *exp(- 34. / temp_loc));
rconst(index,164) = (1.8E-12 *exp(- 600. / temp_loc));
rconst(index,167) = (1.96E-12 *exp(- 1200. / temp_loc));
rconst(index,169) = (1.64E-12 *exp(- 1520. / temp_loc));
rconst(index,170) = (k_3rd_iupac(temp_loc , cair_loc , 1.85E-29 , 3.3 , 6.0E-10 , 0.0 , 0.4));
rconst(index,174) = (1.7E-11 *exp(- 800. / temp_loc));
rconst(index,175) = (1.9E-11 *exp(230. / temp_loc));
rconst(index,177) = (2.9E-14 *exp(840. / temp_loc));
rconst(index,178) = (7.7E-12 *exp(- 450. / temp_loc));
rconst(index,179) = (4.5E-12 *exp(500. / temp_loc));
rconst(index,180) = (6.7E-12 *exp(155. / temp_loc));
rconst(index,181) = (1.2E-10 *exp(- 430. / temp_loc));
rconst(index,182) = (2.0E-11 *exp(240. / temp_loc));
rconst(index,184) = (8.7E-12 *exp(260. / temp_loc));
rconst(index,185) = (k_BrO_NO2);
rconst(index,186) = (k_BrO_NO2 / (5.44E-9 *exp(14192. / temp_loc) *1.E6 *R_gas *temp_loc / (atm2Pa *N_A)));
rconst(index,187) = (7.7E-12 *exp(- 580. / temp_loc));
rconst(index,188) = (2.6E-12 *exp(- 1600. / temp_loc));
rconst(index,189) = (G7402a_yield *5.7E-12);
rconst(index,190) = ((1.- G7402a_yield) *5.7E-12);
rconst(index,191) = (1.42E-12 *exp(- 1150. / temp_loc));
rconst(index,192) = (2.8E-13 *exp(224. / temp_loc) / (1.+ 1.13E24 *exp(- 3200. / temp_loc) / var(index,ind_O2)));
rconst(index,193) = (1.8e-11 *exp(- 460. / temp_loc));
rconst(index,194) = (9.0E-13 *exp(- 360. / temp_loc));
rconst(index,195) = (2.0E-12 *exp(- 840. / temp_loc));
rconst(index,198) = (2.3E-10 *exp(135. / temp_loc));
rconst(index,199) = (1.6E-12 *exp(430. / temp_loc));
rconst(index,200) = (2.9E-12 *exp(220. / temp_loc));
rconst(index,201) = (5.8E-13 *exp(170. / temp_loc));
rconst(index,203) = (2.0E-12 *exp(- 840. / temp_loc));
rconst(index,204) = (2.0E-12 *exp(- 840. / temp_loc));
rconst(index,205) = (2.1E-12 *exp(- 880. / temp_loc));
rconst(index,206) = (k_3rd(temp_loc , cair_loc , 3.3E-31 , 4.3 , 1.6E-12 , 0. , 0.6));
rconst(index,207) = (1.13E-11 *exp(- 253. / temp_loc));
rconst(index,208) = (k_DMS_OH);
rconst(index,209) = (1.9E-13 *exp(520. / temp_loc));
rconst(index,211) = (1.8E13 *exp(- 8661. / temp_loc));
rconst(index,215) = (9.E-11 *exp(- 2386. / temp_loc));
rconst(index,217) = (jx(index,ip_O2));
rconst(index,218) = (jx(index,ip_O1D));
rconst(index,219) = (jx(index,ip_O3P));
rconst(index,220) = (jx(index,ip_H2O));
rconst(index,221) = (jx(index,ip_H2O2));
rconst(index,222) = (jx(index,ip_N2O));
rconst(index,223) = (jx(index,ip_NO2));
rconst(index,224) = (jx(index,ip_NO));
rconst(index,225) = (jx(index,ip_NO2O));
rconst(index,226) = (jx(index,ip_NOO2));
rconst(index,227) = (jx(index,ip_N2O5));
rconst(index,228) = (jx(index,ip_HONO));
rconst(index,229) = (jx(index,ip_HNO3));
rconst(index,230) = (jx(index,ip_HNO4));
rconst(index,231) = (jx(index,ip_CH3OOH));
rconst(index,232) = (jx(index,ip_COH2));
rconst(index,233) = (jx(index,ip_CHOH));
rconst(index,234) = (jx(index,ip_CO2));
rconst(index,235) = (jx(index,ip_CH4));
rconst(index,236) = (jx(index,ip_CH3OOH));
rconst(index,237) = (jx(index,ip_CH3CHO));
rconst(index,238) = (jx(index,ip_CH3CO3H));
rconst(index,239) = (0.19 *jx(index,ip_CHOH));
rconst(index,240) = (jx(index,ip_PAN));
rconst(index,241) = (jx(index,ip_CH3OOH));
rconst(index,242) = (jx(index,ip_CH3COCH3));
rconst(index,243) = (0.074 *jx(index,ip_CHOH));
rconst(index,244) = (jx(index,ip_MGLYOX));
rconst(index,245) = (jx(index,ip_CH3OOH));
rconst(index,246) = (3.7 *jx(index,ip_PAN));
rconst(index,247) = (jx(index,ip_CH3OOH));
rconst(index,248) = (0.019 *jx(index,ip_COH2)+ .015 *jx(index,ip_MGLYOX));
rconst(index,249) = (jx(index,ip_CH3OOH));
rconst(index,250) = (0.42 *jx(index,ip_CHOH));
rconst(index,251) = (jx(index,ip_CH3OOH));
rconst(index,252) = (2.15 *jx(index,ip_MGLYOX));
rconst(index,253) = (3.7 *jx(index,ip_PAN));
rconst(index,254) = (jx(index,ip_PAN));
rconst(index,255) = (jx(index,ip_CH3OOH));
rconst(index,256) = (3.7 *jx(index,ip_PAN));
rconst(index,257) = (jx(index,ip_Cl2));
rconst(index,258) = (jx(index,ip_Cl2O2));
rconst(index,259) = (jx(index,ip_OClO));
rconst(index,260) = (jx(index,ip_HCl));
rconst(index,261) = (jx(index,ip_HOCl));
rconst(index,262) = (jx(index,ip_ClNO2));
rconst(index,263) = (jx(index,ip_ClNO3));
rconst(index,264) = (jx(index,ip_ClONO2));
rconst(index,265) = (jx(index,ip_CH3Cl));
rconst(index,266) = (jx(index,ip_CCl4));
rconst(index,267) = (jx(index,ip_CH3CCl3));
rconst(index,268) = (jx(index,ip_CFCl3));
rconst(index,269) = (jx(index,ip_CF2Cl2));
rconst(index,270) = (jx(index,ip_Br2));
rconst(index,271) = (jx(index,ip_BrO));
rconst(index,272) = (jx(index,ip_HOBr));
rconst(index,273) = (jx(index,ip_BrNO2));
rconst(index,274) = (jx(index,ip_BrNO3));
rconst(index,275) = (jx(index,ip_CH3Br));
rconst(index,276) = (jx(index,ip_CH2Br2));
rconst(index,277) = (jx(index,ip_CHBr3));
rconst(index,278) = (jx(index,ip_CF3Br));
rconst(index,279) = (jx(index,ip_BrCl));
rconst(index,280) = (jx(index,ip_CF2ClBr));
rconst(index,281) = (jx(index,ip_CH2ClBr));
rconst(index,282) = (jx(index,ip_CHCl2Br));
rconst(index,283) = (jx(index,ip_CHClBr2));
rconst(index,284) = (jx(index,ip_CH3I));
rconst(index,285) = (khet_st(index,ihs_N2O5_H2O));
rconst(index,286) = (khet_tr(index,iht_N2O5));
rconst(index,287) = (khet_st(index,ihs_HOCl_HCl));
rconst(index,288) = (khet_st(index,ihs_ClNO3_HCl));
rconst(index,289) = (khet_st(index,ihs_ClNO3_H2O));
rconst(index,290) = (khet_st(index,ihs_N2O5_HCl));
rconst(index,291) = (khet_st(index,ihs_HOBr_HBr));
rconst(index,292) = (khet_st(index,ihs_BrNO3_H2O));
rconst(index,293) = (khet_st(index,ihs_ClNO3_HBr));
rconst(index,294) = (khet_st(index,ihs_BrNO3_HCl));
rconst(index,295) = (khet_st(index,ihs_HOCl_HBr));
rconst(index,296) = (khet_st(index,ihs_HOBr_HCl));
rconst(index,297) = (k_O3s);
rconst(index,299) = (jx(index,ip_CFCl3));
rconst(index,301) = (jx(index,ip_CF2Cl2));
rconst(index,302) = (7.25E-11 *exp(20. / temp_loc));
rconst(index,303) = (4.63E-11 *exp(20. / temp_loc));
rconst(index,304) = (jx(index,ip_N2O));
rconst(index,306) = (1.64E-12 *exp(- 1520. / temp_loc));
rconst(index,307) = (jx(index,ip_CH3CCl3));
rconst(index,308) = (jx(index,ip_CF2ClBr));
rconst(index,309) = (jx(index,ip_CF3Br));
rconst(index,(3)-1) = 1.2e-10;
rconst(index,(7)-1) = 1.2e-10;
rconst(index,(13)-1) = 7.2e-11;
rconst(index,(14)-1) = 6.9e-12;
rconst(index,(15)-1) = 1.6e-12;
rconst(index,(19)-1) = 1.8e-12;
rconst(index,(36)-1) = 3.5e-12;
rconst(index,(49)-1) = 1.2e-14;
rconst(index,(50)-1) = 1300;
rconst(index,(54)-1) = 1.66e-12;
rconst(index,(57)-1) = 1.75e-10;
rconst(index,(62)-1) = 1.3e-12;
rconst(index,(69)-1) = 4e-13;
rconst(index,(75)-1) = 2.3e-12;
rconst(index,(85)-1) = 4e-12;
rconst(index,(123)-1) = 2e-12;
rconst(index,(124)-1) = 2e-12;
rconst(index,(125)-1) = 3e-11;
rconst(index,(130)-1) = 1.7e-12;
rconst(index,(131)-1) = 3.2e-11;
rconst(index,(138)-1) = 2e-12;
rconst(index,(139)-1) = 2e-12;
rconst(index,(140)-1) = 1e-10;
rconst(index,(141)-1) = 1.3e-11;
rconst(index,(164)-1) = 5.9e-11;
rconst(index,(166)-1) = 3.3e-10;
rconst(index,(167)-1) = 1.65e-10;
rconst(index,(169)-1) = 3.25e-10;
rconst(index,(172)-1) = 8e-11;
rconst(index,(173)-1) = 1.4e-10;
rconst(index,(174)-1) = 2.3e-10;
rconst(index,(177)-1) = 2.7e-12;
rconst(index,(184)-1) = 4.9e-11;
rconst(index,(197)-1) = 3.32e-15;
rconst(index,(198)-1) = 1.1e-15;
rconst(index,(203)-1) = 1.45e-11;
rconst(index,(211)-1) = 1e-10;
rconst(index,(213)-1) = 3e-13;
rconst(index,(214)-1) = 5e-11;
rconst(index,(215)-1) = 3.3e-10;
rconst(index,(217)-1) = 4.4e-13;
rconst(index,(299)-1) = 2.3e-10;
rconst(index,(301)-1) = 1.4e-10;
rconst(index,(306)-1) = 3e-10;
}
}
__global__
void Rosenbrock(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus,
// values calculated from icntrl and rcntrl at host
const int autonomous, const int vectorTol, const int UplimTol, const int method, const int Max_no_steps,
const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff,
// cuda global mem buffers
const double * __restrict__ absTol, const double * __restrict__ relTol,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// global input
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
// extra
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
/* Temporary arrays allocated in stack */
/*
* Optimization NOTE: runs faster on Tesla/Fermi
* when tempallocated on stack instead of heap.
* In theory someone can aggregate accesses together,
* however due to algorithm, threads access
* different parts of memory, making it harder to
* optimize accesses.
*
*/
double varNew_stack[NVAR];
double var_stack[NSPEC];
double varErr_stack[NVAR];
double fix_stack[NFIX];
double Fcn0_stack[NVAR];
double jac0_stack[LU_NONZERO];
double dFdT_stack[NVAR];
double Ghimj_stack[LU_NONZERO];
double K_stack[6*NVAR];
/* Allocated in Global mem */
double rconst_stack[NREACT];
/* Allocated in stack */
double *Ghimj = Ghimj_stack;
double *K = K_stack;
double *varNew = varNew_stack;
double *Fcn0 = Fcn0_stack;
double *dFdT = dFdT_stack;
double *jac0 = jac0_stack;
double *varErr = varErr_stack;
double *var = var_stack;
double *fix = fix_stack;
double *rconst = rconst_stack;
if (index < VL_GLO)
{
int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng;
double Texit, Hexit;
Nfun = 0;
Njac = 0;
Nstp = 0;
Nacc = 0;
Nrej = 0;
Ndec = 0;
Nsol = 0;
Nsng = 0;
/* FIXME: add check for method */
const double *ros_A = &ros[method-1].ros_A[0];
const double *ros_C = &ros[method-1].ros_C[0];
const double *ros_M = &ros[method-1].ros_M[0];
const double *ros_E = &ros[method-1].ros_E[0];
const double *ros_Alpha = &ros[method-1].ros_Alpha[0];
const double *ros_Gamma = &ros[method-1].ros_Gamma[0];
const int *ros_NewF = &ros[method-1].ros_NewF[0];
const int ros_S = ros[method-1].ros_S;
const double ros_ELO = ros[method-1].ros_ELO;
/* Copy data from global memory to temporary array */
/*
* Optimization note: if we ever have enough constant
* memory, we could use it for storing the data.
* In current architectures if we use constant memory
* only a few threads will be able to run on the fly.
*
*/
for (int i=0; i<NSPEC; i++)
var(index,i) = conc(index,i);
for (int i=0; i<NFIX; i++)
fix(index,i) = conc(index,NVAR+i);
update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO);
ros_Integrator(var, fix, Tstart, Tend, Texit,
// Rosenbrock method coefficients
ros_S, ros_M, ros_E, ros_A, ros_C,
ros_Alpha, ros_Gamma, ros_ELO, ros_NewF,
// Integration parameters
autonomous, vectorTol, Max_no_steps,
roundoff, Hmin, Hmax, Hstart, Hexit,
FacMin, FacMax, FacRej, FacSafe,
// Status parameters
Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng,
// cuda global mem buffers
rconst, absTol, relTol, varNew, Fcn0,
K, dFdT, jac0, Ghimj, varErr,
// For update rconst
khet_st, khet_tr, jx,
VL_GLO
);
for (int i=0; i<NVAR; i++)
conc(index,i) = var(index,i);
/* Statistics */
istatus(index,ifun) = Nfun;
istatus(index,ijac) = Njac;
istatus(index,istp) = Nstp;
istatus(index,iacc) = Nacc;
istatus(index,irej) = Nrej;
istatus(index,idec) = Ndec;
istatus(index,isol) = Nsol;
istatus(index,isng) = Nsng;
// Last T and H
rstatus(index,itexit) = Texit;
rstatus(index,ihexit) = Hexit;
}
}
__device__ static int ros_Integrator_ros3(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T,
// Integration parameters
const int autonomous, const int vectorTol, const int Max_no_steps,
const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit,
const double FacMin, const double FacMax, const double FacRej, const double FacSafe,
// Status parameters
int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng,
// cuda global mem buffers
const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0,
double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// VL_GLO
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
double H, Hnew, HC, HC0,HC1, HG, Fac; // Tau - not used
double Err; //*varErr;
int direction;
int rejectLastH, rejectMoreH;
const double DELTAMIN = 1.0E-5;
const int ros_S = 3;
// ~~~> Initial preparations
T = Tstart;
Hexit = 0.0;
H = fmin(Hstart,Hmax);
if (fabs(H) <= 10.0*roundoff)
H = DELTAMIN;
if (Tend >= Tstart)
{
direction = + 1;
}
else
{
direction = - 1;
}
rejectLastH=0;
rejectMoreH=0;
// TimeLoop:
while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO))
{
if (Nstp > Max_no_steps) // Too many steps
return -6;
// Step size too small
if (H <= roundoff){ // Step size too small
//if (((T+ 0.1*H) == T) || (H <= roundoff)) {
return -7;
}
// ~~~> Limit H if necessary to avoid going beyond Tend
Hexit = H;
H = fmin(H,fabs(Tend-T));
// ~~~> Compute the function at current time
Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO);
// ~~~> Compute the function derivative with respect to T
if (!autonomous)
ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read
// ~~~> Compute the Jacobian at current time
Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ
// ~~~> Repeat step calculation until current step accepted
// UntilAccepted:
while(1)
{
ros_PrepareMatrix(H, direction, 0.43586652150845899941601945119356E+00 , jac0, Ghimj, Nsng, Ndec, VL_GLO);
{ // istage=0
for (int i=0; i<NVAR; i++){
K(index,0,i) = Fcn0(index,i); // FCN0 Read
}
if ((!autonomous))
{
HG = direction*H*0.43586652150845899941601945119356E+00;
for (int i=0; i<NVAR; i++){
K(index,0,i) += dFdT(index,i)*HG;
}
}
ros_Solve(Ghimj, K, Nsol, 0, ros_S);
} // Stage
{ // istage = 1
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,0,i) + var(index,i);
}
Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap
HC = -0.10156171083877702091975600115545E+01/(direction*H);
for (int i=0; i<NVAR; i++){
double tmp = K(index,0,i);
K(index,1,i) = tmp*HC + varNew(index,i);
}
if ((!autonomous))
{
HG = direction*H*0.24291996454816804366592249683314E+00;
for (int i=0; i<NVAR; i++){
K(index,1,i) += dFdT(index,i)*HG;
}
}
// R ,RW, RW, R, R
ros_Solve(Ghimj, K, Nsol, 1, ros_S);
} // Stage
{
int istage = 2;
HC0 = 0.40759956452537699824805835358067E+01/(direction*H);
HC1 = 0.92076794298330791242156818474003E+01/(direction*H);
for (int i=0; i<NVAR; i++){
K(index,2,i) = K(index,1,i)*HC1 + K(index,0,i)*HC0 + varNew(index,i);
}
if ((!autonomous) )
{
HG = direction*H*0.21851380027664058511513169485832E+01;
for (int i=0; i<NVAR; i++){
K(index,istage,i) += dFdT(index,i)*HG;
}
}
ros_Solve(Ghimj, K, Nsol, istage, ros_S);
} // Stage
// ~~~> Compute the new solution
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,0,i) + K(index,1,i)*0.61697947043828245592553615689730E+01 + K(index,2,i)*(-0.42772256543218573326238373806514) + var(index,i) ;
varErr(index,i) = K(index,0,i)/2 + K(index,1,i)*(-0.29079558716805469821718236208017E+01) + K(index,2,i)*(0.22354069897811569627360909276199);
}
Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol);
// ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax
Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/3.0)));
Hnew = H*Fac;
// ~~~> Check the error magnitude and adjust step size
Nstp = Nstp+ 1;
if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step
{
Nacc = Nacc + 1;
for (int j=0; j<NVAR ; j++)
var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read
T = T + direction*H;
Hnew = fmax(Hmin,fmin(Hnew,Hmax));
if (rejectLastH) // No step size increase after a rejected step
Hnew = fmin(Hnew,H);
rejectLastH = 0;
rejectMoreH = 0;
H = Hnew;
break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED
}
else // ~~~> Reject step
{
if (rejectMoreH)
Hnew = H*FacRej;
rejectMoreH = rejectLastH;
rejectLastH = 1;
H = Hnew;
if (Nacc >= 1)
Nrej += 1;
} // Err <= 1
} // UntilAccepted
} // TimeLoop
// ~~~> Succesful exit
return 0; // ~~~> The integration was successful
}
__global__
void Rosenbrock_ros3(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus,
const int autonomous, const int vectorTol, const int UplimTol, const int Max_no_steps,
const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff,
const double * __restrict__ absTol, const double * __restrict__ relTol,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO,
double * __restrict__ Ghimj,
double * __restrict__ K,
double * __restrict__ varNew,
double * __restrict__ Fcn0,
double * __restrict__ dFdT,
double * __restrict__ jac0,
double * __restrict__ varErr,
double * __restrict__ var,
double * __restrict__ fix,
double * __restrict__ rconst)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
/* Temporary arrays allocated in stack */
/*
* Optimization NOTE: runs faster on Tesla/Fermi
* when tempallocated on stack instead of heap.
* In theory someone can aggregate accesses together,
* however due to algorithm, threads access
* different parts of memory, making it harder to
* optimize accesses.
*
*/
// double varNew_stack[NVAR];
// double var_stack[NVAR];
// double varErr_stack[NVAR];
// double fix_stack[NFIX];
// double Fcn0_stack[NVAR];
// double jac0_stack[LU_NONZERO];
// double dFdT_stack[NVAR];
// double Ghimj_stack[LU_NONZERO];
// double K_stack[3*NVAR];
// double rconst_stack[NREACT];
// /* Allocated in stack */
// double *Ghimj = Ghimj_stack;
// double *K = K_stack;
// double *varNew = varNew_stack;
// double *Fcn0 = Fcn0_stack;
// double *dFdT = dFdT_stack;
// double *jac0 = jac0_stack;
// double *varErr = varErr_stack;
// double *var = var_stack;
// double *fix = fix_stack;
// double *rconst = rconst_stack;
const int method = 2;
if (index < VL_GLO)
{
int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng;
double Texit, Hexit;
Nfun = 0;
Njac = 0;
Nstp = 0;
Nacc = 0;
Nrej = 0;
Ndec = 0;
Nsol = 0;
Nsng = 0;
/* Copy data from global memory to temporary array */
/*
* Optimization note: if we ever have enough constant
* memory, we could use it for storing the data.
* In current architectures if we use constant memory
* only a few threads will be able to run on the fly.
*
*/
for (int i=0; i<NSPEC; i++)
var(index,i) = conc(index,i);
for (int i=0; i<NFIX; i++)
fix(index,i) = conc(index,NVAR+i);
//update_rconst(var, khet_st, khet_tr, jx, VL_GLO);
update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO);
ros_Integrator_ros3(var, fix, Tstart, Tend, Texit,
// Integration parameters
autonomous, vectorTol, Max_no_steps,
roundoff, Hmin, Hmax, Hstart, Hexit,
FacMin, FacMax, FacRej, FacSafe,
// Status parameters
Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng,
// cuda global mem buffers
rconst, absTol, relTol, varNew, Fcn0,
K, dFdT, jac0, Ghimj, varErr,
// For update rconst
khet_st, khet_tr, jx,
VL_GLO
);
for (int i=0; i<NVAR; i++)
conc(index,i) = var(index,i);
/* Statistics */
istatus(index,ifun) = Nfun;
istatus(index,ijac) = Njac;
istatus(index,istp) = Nstp;
istatus(index,iacc) = Nacc;
istatus(index,irej) = Nrej;
istatus(index,idec) = Ndec;
istatus(index,isol) = Nsol;
istatus(index,isng) = Nsng;
// Last T and H
rstatus(index,itexit) = Texit;
rstatus(index,ihexit) = Hexit;
}
}
// no int8 in CUDA :(
__global__ void reduce_istatus_1(int *istatus, int4 *tmp_out_1, int4 *tmp_out_2, int VL_GLO, int *xNacc, int *xNrej)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int idx_1 = threadIdx.x;
int global_size = blockDim.x*gridDim.x;
int foo;
//no int8 in CUDA :(
int4 accumulator_1 = make_int4(0,0,0,0);
int4 accumulator_2 = make_int4(0,0,0,0);
while (index < VL_GLO)
{
accumulator_1.x += istatus(index,0);
accumulator_1.y += istatus(index,1);
accumulator_1.z += istatus(index,2);
//some dirty work on the side...
foo = istatus(index,3);
xNacc[index] = foo;
accumulator_1.w += foo;
foo = istatus(index,4);
xNrej[index] = foo;
accumulator_2.x += foo;
accumulator_2.y += istatus(index,5);
accumulator_2.z += istatus(index,6);
accumulator_2.w += istatus(index,7);
index += global_size;
}
//no int8 in CUDA :(
__shared__ int4 buffer_1[REDUCTION_SIZE_1];
__shared__ int4 buffer_2[REDUCTION_SIZE_1];
buffer_1[idx_1] = accumulator_1;
buffer_2[idx_1] = accumulator_2;
__syncthreads();
int idx_2, active_threads = blockDim.x;
int4 tmp_1, tmp_2;
while (active_threads != 1)
{
active_threads /= 2;
if (idx_1 < active_threads)
{
idx_2 = idx_1+active_threads;
tmp_1 = buffer_1[idx_1];
tmp_2 = buffer_1[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_1[idx_1] = tmp_1;
tmp_1 = buffer_2[idx_1];
tmp_2 = buffer_2[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_2[idx_1] = tmp_1;
}
__syncthreads();
}
if (idx_1 == 0)
{
tmp_out_1[blockIdx.x] = buffer_1[0];
tmp_out_2[blockIdx.x] = buffer_2[0];
}
}
__global__ void reduce_istatus_2(int4 *tmp_out_1, int4 *tmp_out_2, int *out)
{
int idx_1 = threadIdx.x;
//no int8 in CUDA :(
__shared__ int4 buffer_1[REDUCTION_SIZE_2];
__shared__ int4 buffer_2[REDUCTION_SIZE_2];
buffer_1[idx_1] = tmp_out_1[idx_1];
buffer_2[idx_1] = tmp_out_2[idx_1];
__syncthreads();
int idx_2, active_threads = blockDim.x;
int4 tmp_1, tmp_2;
while (active_threads != 1)
{
active_threads /= 2;
if (idx_1 < active_threads)
{
idx_2 = idx_1+active_threads;
tmp_1 = buffer_1[idx_1];
tmp_2 = buffer_1[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_1[idx_1] = tmp_1;
tmp_1 = buffer_2[idx_1];
tmp_2 = buffer_2[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_2[idx_1] = tmp_1;
}
__syncthreads();
}
if (idx_1 == 0)
{
tmp_1 = buffer_1[0];
tmp_2 = buffer_2[0];
out[0] = tmp_1.x;
out[1] = tmp_1.y;
out[2] = tmp_1.z;
out[3] = tmp_1.w;
out[4] = tmp_2.x;
out[5] = tmp_2.y;
out[6] = tmp_2.z;
out[7] = tmp_2.w;
}
}
/* Assuming different processes */
enum { TRUE=1, FALSE=0 } ;
double *d_conc, *d_temp, *d_press, *d_cair, *d_khet_st, *d_khet_tr, *d_jx;
int initialized = FALSE;
/* Device pointers pointing to GPU */
double *d_rstatus, *d_absTol, *d_relTol;
int *d_istatus, *d_istatus_rd, *d_xNacc, *d_xNrej;
int4 *d_tmp_out_1, *d_tmp_out_2;
/* Allocate arrays on device for Rosenbrock */
__host__ void init_first_time(int pe, int VL_GLO, int size_khet_st, int size_khet_tr, int size_jx ){
/* Select the proper GPU CARD */
int deviceCount, device;
gpuErrchk( hipGetDeviceCount(&deviceCount) );
device = pe % deviceCount;
gpuErrchk( hipSetDevice(device) );
printf("PE[%d]: selected %d of total %d\n",pe,device,deviceCount);
hipDeviceSetCacheConfig(hipFuncCachePreferL1);
gpuErrchk( hipMalloc ((void **) &d_conc , sizeof(double)*VL_GLO*(NSPEC)) );
gpuErrchk( hipMalloc ((void **) &d_khet_st, sizeof(double)*VL_GLO*size_khet_st) );
gpuErrchk( hipMalloc ((void **) &d_khet_tr, sizeof(double)*VL_GLO*size_khet_tr) );
gpuErrchk( hipMalloc ((void **) &d_jx , sizeof(double)*VL_GLO*size_jx) );
gpuErrchk( hipMalloc ((void **) &d_rstatus , sizeof(double)*VL_GLO*2) );
gpuErrchk( hipMalloc ((void **) &d_istatus , sizeof(int)*VL_GLO*8) );
gpuErrchk( hipMalloc ((void **) &d_absTol , sizeof(double)*NVAR) );
gpuErrchk( hipMalloc ((void **) &d_relTol , sizeof(double)*NVAR) );
/* Allocate input arrays */
gpuErrchk( hipMalloc ((void **) &temp_gpu , sizeof(double)*VL_GLO) );
gpuErrchk( hipMalloc ((void **) &press_gpu , sizeof(double)*VL_GLO) );
gpuErrchk( hipMalloc ((void **) &cair_gpu , sizeof(double)*VL_GLO) );
/* Allocate arrays on device for reduce_foo */
gpuErrchk( hipMalloc ((void **) &d_istatus_rd , sizeof(int)*8));
gpuErrchk( hipMalloc ((void **) &d_tmp_out_1 , sizeof(int4)*64));
gpuErrchk( hipMalloc ((void **) &d_tmp_out_2 , sizeof(int4)*64));
gpuErrchk( hipMalloc ((void **) &d_xNacc , sizeof(int)*VL_GLO));
gpuErrchk( hipMalloc ((void **) &d_xNrej , sizeof(int)*VL_GLO));
gpuErrchk( hipMalloc ((void **) &Ghimj, sizeof(double) * VL_GLO * LU_NONZERO));
gpuErrchk( hipMalloc ((void **) &K, sizeof(double) * VL_GLO * 6*NVAR));
gpuErrchk( hipMalloc ((void **) &varNew, sizeof(double) * VL_GLO * NVAR));
gpuErrchk( hipMalloc ((void **) &Fcn0, sizeof(double) * VL_GLO * NVAR));
gpuErrchk( hipMalloc ((void **) &dFdT, sizeof(double) * VL_GLO * NVAR));
gpuErrchk( hipMalloc ((void **) &jac0, sizeof(double) * VL_GLO * LU_NONZERO));
gpuErrchk( hipMalloc ((void **) &varErr, sizeof(double) * VL_GLO * NVAR));
gpuErrchk( hipMalloc ((void **) &var, sizeof(double) * VL_GLO * NSPEC));
gpuErrchk( hipMalloc ((void **) &fix, sizeof(double) * VL_GLO * NFIX));
gpuErrchk( hipMalloc ((void **) &rconst, sizeof(double) * VL_GLO * NREACT));
initialized = TRUE;
}
/*
* TODO: We should call it in some point..
*/
extern "C" void finalize_cuda(){
/* Free memory on the device */
gpuErrchk( hipFree(d_conc ) );
gpuErrchk( hipFree(d_temp ) );
gpuErrchk( hipFree(d_press ) );
gpuErrchk( hipFree(d_cair ) );
gpuErrchk( hipFree(d_khet_st ) );
gpuErrchk( hipFree(d_khet_tr ) );
gpuErrchk( hipFree(d_jx ) );
gpuErrchk( hipFree(d_rstatus ) );
gpuErrchk( hipFree(d_istatus ) );
gpuErrchk( hipFree(d_absTol ) );
gpuErrchk( hipFree(d_relTol ) );
gpuErrchk( hipFree(d_istatus_rd ) );
gpuErrchk( hipFree(d_tmp_out_1 ) );
gpuErrchk( hipFree(d_tmp_out_2 ) );
gpuErrchk( hipFree(d_xNacc ) );
gpuErrchk( hipFree(d_xNrej ) );
gpuErrchk( hipFree(temp_gpu ) );
gpuErrchk( hipFree(press_gpu ) );
gpuErrchk( hipFree(cair_gpu ) );
}
extern "C" void kpp_integrate_cuda_( int *pe_p, int *sizes, double *time_step_len_p, double *conc, double *temp, double *press, double *cair,
double *khet_st, double *khet_tr, double *jx, double *absTol, double *relTol, int *ierr, int *istatus,
int *xNacc, int *xNrej, double *rndoff, int *icntrl=NULL, double *rcntrl=NULL
)
/* // TODO
* Parameters:
* pe_p: scalar int - processor element
* VL_GLO: scalar int - size of the system
* NSPEC: scalar int - number of species
* NREACT: scalar int - number of reactions
* NVAR: scalar int -
*
* Input data:
* conc: 2D array of doubles - size: vl_glo x number of species
* temp: 1D array of doubles - size: vl_glo
* press: 1D array of doubles - size: vl_glo
* cair: 1D array of doubles - size: vl_glo
* khet_st: 2D array of doubles - size: vl_glo x number of species
* khet_tr: 2D array of doubles - size: vl_glo x number of species
* jx: 2D array of doubles - size: vl_glo x number of species
* absTol: 1D array of doubles - size: number of species
* relTol: 1D array of doubles - size: number of species
* Control:
* icntrl: 1D array of ints - size: 4
* sizes: 1D array of ints - size: 4
* rcntrl: 1D array of doubles - size: 7
*
*
*/
{
const double DELTAMIN = 1.0E-5;
int VL_GLO = sizes[0];
int size_khet_st = sizes[1];
int size_khet_tr = sizes[2];
int size_jx = sizes[3];
double roundoff = *rndoff;
double Tstart,Tend;
Tstart = ZERO;
Tend = *time_step_len_p;
int pe = *pe_p;
// variables from rcntrl and icntrl
int autonomous, vectorTol, UplimTol, method, Max_no_steps;
double Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe;
//int rcntrl_bool = 0, icntrl_bool=0;
if (rcntrl == NULL)
{
rcntrl = new double[7];
for (int i=0; i < 7; i++)
rcntrl[i] = 0.0;
}
if (icntrl == NULL)
{
icntrl = new int[4];
for (int i=0; i < 4; i++)
icntrl[i] = 0;
}
/* Allocate arrays on device for update_rconst kernel*/
if (initialized == FALSE) init_first_time(pe, VL_GLO, size_khet_st, size_khet_tr, size_jx);
/* Copy data from host memory to device memory */
gpuErrchk( hipMemcpy(d_conc , conc , sizeof(double)*VL_GLO*NSPEC , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(temp_gpu , temp , sizeof(double)*VL_GLO , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(press_gpu , press , sizeof(double)*VL_GLO , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(cair_gpu , cair , sizeof(double)*VL_GLO , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_khet_st, khet_st , sizeof(double)*VL_GLO*size_khet_st , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_khet_tr, khet_tr , sizeof(double)*VL_GLO*size_khet_tr , hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_jx , jx , sizeof(double)*VL_GLO*size_jx , hipMemcpyHostToDevice) );
/* Copy arrays from host memory to device memory for Rosenbrock */
gpuErrchk( hipMemcpy(d_absTol, absTol, sizeof(double)*NVAR, hipMemcpyHostToDevice) );
gpuErrchk( hipMemcpy(d_relTol, relTol, sizeof(double)*NVAR, hipMemcpyHostToDevice) );
/* Compute execution configuration for update_rconst */
int block_size, grid_size;
block_size = BLOCKSIZE;
grid_size = (VL_GLO + block_size - 1)/block_size;
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
/* Execute the kernel */
//update_rconst<<<dimGrid,dimBlock>>>(d_conc, d_khet_st, d_khet_tr, d_jx, VL_GLO);
GPU_DEBUG();
// *------------------------------------------------------*
// | Default values vs input settings (icntrl, rcntrl) |
// *------------------------------------------------------*
int ierr_tmp=0;
{
// autonomous or time dependent ODE. Default is time dependent.
autonomous = !(icntrl[0] == 0);
// For Scalar tolerances (icntrl[1].NE.0) the code uses absTol(0) and relTol(0)
// For Vector tolerances (icntrl[1] == 0) the code uses absTol(0:NVAR) and relTol(0:NVAR)
if (icntrl[1] == 0)
{
vectorTol = 1; //bool
UplimTol = NVAR;
}
else
{
vectorTol = 0;
UplimTol = 1;
}
// The particular Rosenbrock method chosen
if (icntrl[2] == 0)
{
method = 4;
}
else if ((icntrl[2] >= 1) && (icntrl[2] <= 5))
{
method = icntrl[2];
}
else
{
printf("User-selected Rosenbrock method: icntrl[2]=%d\n",method);
ierr_tmp = -2;
}
// The maximum number of steps admitted
if (icntrl[3] == 0)
{
Max_no_steps = 100000;
}
else if (icntrl[3] > 0)
{
Max_no_steps=icntrl[3];
}
else
{
printf("User-selected max no. of steps: icntrl[3]=%d\n",icntrl[3]);
ierr_tmp = -1;
}
// Unit roundoff (1+ roundoff>1)
roundoff = machine_eps_flt();
// Lower bound on the step size: (positive value)
if (rcntrl[0] == ZERO)
{
Hmin = ZERO;
}
else if (rcntrl[0] > ZERO)
{
Hmin = rcntrl[0];
}
else
{
printf("User-selected Hmin: rcntrl[0]=%f\n",rcntrl[0]);
ierr_tmp = -3;
}
// Upper bound on the step size: (positive value)
if (rcntrl[1] == ZERO)
{
Hmax = fabs(Tend-Tstart);
}
else if (rcntrl[1] > ZERO)
{
Hmax = fmin(fabs(rcntrl[1]),fabs(Tend-Tstart));
}
else
{
printf("User-selected Hmax: rcntrl[1]=%f\n",rcntrl[1]);
ierr_tmp = -3;
}
// Starting step size: (positive value)
if (rcntrl[2] == ZERO)
{
Hstart = fmax(Hmin,DELTAMIN);
}
else if (rcntrl[2] > ZERO)
{
Hstart = fmin(fabs(rcntrl[2]),fabs(Tend-Tstart));
}
else
{
printf("User-selected Hstart: rcntrl[2]=%f\n",rcntrl[2]);
ierr_tmp = -3;
}
// Step size can be changed s.t. FacMin < Hnew/Hexit < FacMax
if (rcntrl[3] == ZERO)
{
FacMin = 0.2;
}
else if (rcntrl[3] > ZERO)
{
FacMin = rcntrl[3];
}
else
{
printf("User-selected FacMin: rcntrl[3]=%f\n",rcntrl[3]);
ierr_tmp = -4;
}
if (rcntrl[4] == ZERO)
{
FacMax = 6.0;
}
else if (rcntrl[4] > ZERO)
{
FacMax = rcntrl[4];
}
else
{
printf("User-selected FacMax: rcntrl[4]=%f\n",rcntrl[4]);
ierr_tmp = -4;
}
// FacRej: Factor to decrease step after 2 succesive rejections
if (rcntrl[5] == ZERO)
{
FacRej = 0.1;
}
else if (rcntrl[5] > ZERO)
{
FacRej = rcntrl[5];
}
else
{
printf("User-selected FacRej: rcntrl[5]=%f\n",rcntrl[5]);
ierr_tmp = -4;
}
// FacSafe: Safety Factor in the computation of new step size
if (rcntrl[6] == ZERO)
{
FacSafe = 0.9;
}
else if (rcntrl[6] > ZERO)
{
FacSafe = rcntrl[6];
}
else
{
printf("User-selected FacSafe: rcntrl[6]=%f\n",rcntrl[6]);
ierr_tmp = -4;
}
// Check if tolerances are reasonable
for (int i=0; i < UplimTol; i++)
{
if ((absTol[i] <= ZERO) || (relTol[i] <= 10.0*roundoff) || (relTol[i] >= 1.0))
{
printf("CCC absTol(%d) = %f \n",i,absTol[i]);
printf("CCC relTol(%d) = %f \n",i,relTol[i]);
ierr_tmp = -5;
}
}
}
switch (method){
case 2:
hipLaunchKernelGGL(( Rosenbrock_ros3), dim3(dimGrid),dim3(dimBlock), 0, 0, d_conc, Tstart, Tend, d_rstatus, d_istatus,
autonomous, vectorTol, UplimTol, Max_no_steps,
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,
d_absTol, d_relTol,
d_khet_st, d_khet_tr, d_jx,
temp_gpu, press_gpu, cair_gpu,
VL_GLO,
Ghimj, K,varNew,Fcn0,dFdT,jac0,varErr,var,fix,rconst);
break;
default:
hipLaunchKernelGGL(( Rosenbrock), dim3(dimGrid),dim3(dimBlock), 0, 0, d_conc, Tstart, Tend, d_rstatus, d_istatus,
// values calculated from icntrl and rcntrl at host
autonomous, vectorTol, UplimTol, method, Max_no_steps,
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,
// cuda global mem buffers
d_absTol, d_relTol,
d_khet_st, d_khet_tr, d_jx,
// Global input arrays
temp_gpu, press_gpu, cair_gpu,
// extra - vector lenght and processor
VL_GLO);
break;
}
GPU_DEBUG();
hipLaunchKernelGGL(( reduce_istatus_1), dim3(REDUCTION_SIZE_2),dim3(REDUCTION_SIZE_1), 0, 0, d_istatus, d_tmp_out_1, d_tmp_out_2, VL_GLO, d_xNacc, d_xNrej);
GPU_DEBUG();
hipLaunchKernelGGL(( reduce_istatus_2), dim3(1),dim3(REDUCTION_SIZE_2), 0, 0, d_tmp_out_1, d_tmp_out_2, d_istatus_rd);
GPU_DEBUG();
/* Copy the result back */
gpuErrchk( hipMemcpy( conc , d_conc , sizeof(double)*VL_GLO*NVAR, hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy( xNacc , d_xNacc , sizeof(int)*VL_GLO , hipMemcpyDeviceToHost) );
gpuErrchk( hipMemcpy( xNrej , d_xNrej , sizeof(int)*VL_GLO , hipMemcpyDeviceToHost) );
return;
}
|
1cccfb2b6895a5c3dc90fa08df461760c0304e22.cu
|
/*************************************************************
*
* kpp_integrate_cuda_prototype.cu
* Prototype file for kpp CUDA kernel
*
* Copyright 2016 The Cyprus Institute
*
* Developers: Michail Alvanos - [email protected]
* Giannis Ashiotis
* Theodoros Christoudias - [email protected]
*
********************************************************************/
#include <stdio.h>
#include <unistd.h>
#include "cuda.h"
#define NSPEC 142
#define NVAR 139
#define NFIX 3
#define NREACT 310
#define LU_NONZERO 1486
#define NBSIZE 523
#define BLOCKSIZE 64
//#define MAX_VL_GLO 12288 /* elements that will pass in each call */
#define REDUCTION_SIZE_1 64
#define REDUCTION_SIZE_2 32
#define R_gas 8.3144621
#define N_A 6.02214129e+23
#define atm2Pa 101325.0
#define ip_O2 0
#define ip_O3P 1
#define ip_O1D 2
#define ip_H2O2 3
#define ip_NO2 4
#define ip_NO2O 5
#define ip_NOO2 6
#define ip_N2O5 7
#define ip_HNO3 8
#define ip_HNO4 9
#define ip_PAN 10
#define ip_HONO 11
#define ip_CH3OOH 12
#define ip_COH2 13
#define ip_CHOH 14
#define ip_CH3CO3H 15
#define ip_CH3CHO 16
#define ip_CH3COCH3 17
#define ip_MGLYOX 18
#define ip_HOCl 19
#define ip_OClO 20
#define ip_Cl2O2 21
#define ip_ClNO3 22
#define ip_ClNO2 23
#define ip_Cl2 24
#define ip_BrO 25
#define ip_HOBr 26
#define ip_BrCl 27
#define ip_BrNO3 28
#define ip_BrNO2 29
#define ip_Br2 30
#define ip_CCl4 31
#define ip_CH3Cl 32
#define ip_CH3CCl3 33
#define ip_CFCl3 34
#define ip_CF2Cl2 35
#define ip_CH3Br 36
#define ip_CF2ClBr 37
#define ip_CF3Br 38
#define ip_CH3I 39
#define ip_C3H7I 40
#define ip_CH2ClI 41
#define ip_CH2I2 42
#define ip_IO 43
#define ip_HOI 44
#define ip_I2 45
#define ip_ICl 46
#define ip_IBr 47
#define ip_INO2 48
#define ip_INO3 49
#define ip_SO2 50
#define ip_SO3 51
#define ip_OCS 52
#define ip_CS2 53
#define ip_H2O 54
#define ip_N2O 55
#define ip_NO 56
#define ip_CO2 57
#define ip_HCl 58
#define ip_CHCl2Br 59
#define ip_CHClBr2 60
#define ip_CH2ClBr 61
#define ip_CH2Br2 62
#define ip_CHBr3 63
#define ip_SF6 64
#define ip_NO3NOO 65
#define ip_ClONO2 66
#define ip_MACR 67
#define ip_MVK 68
#define ip_GLYOX 69
#define ip_HOCH2CHO 70
#define ip_CH4 71
#define ip_O2_b1b2 72
#define ip_O2_b1 73
#define ip_O2_b2 74
#define ip_O3PO1D 75
#define ip_O3Pp 76
#define ip_H2O1D 77
#define ip_N2 78
#define ip_N2_b1 79
#define ip_N2_b2 80
#define ip_N2_b3 81
#define ip_NN2D 82
#define ip_NOp 83
#define ip_Op_em 84
#define ip_O2p_em 85
#define ip_Op_O_em 86
#define ip_N2p_em 87
#define ip_Np_N_em 88
#define ip_Np_N2D_em 89
#define ip_N_N2D_em 90
#define ip_Op_em_b 91
#define ip_se_O2_b1 92
#define ip_se_O2_b2 93
#define ip_se_N2_b1 94
#define ip_se_N2_b2 95
#define ip_se_N2_b3 96
#define ip_se_N2_b4 97
#define ip_se_Op_em 98
#define ip_O2_aurq 99
#define ip_N2_aurq 100
#define ip_H2SO4 101
#define ip_C3O2 102
#define ip_CH3NO3 103
#define ip_CH3O2NO2 104
#define ip_CH3ONO 105
#define ip_CH3O2 106
#define ip_HCOOH 107
#define ip_HO2NO2 108
#define ip_OHNO3 109
#define ip_qqqdummy 110
#define ip_CH3OCl 111
#define ip_MEO2NO2 112
#define ip_CHF2Cl 113
#define ip_F113 114
#define ip_C2H5NO3 115
#define ip_NOA 116
#define ip_MEKNO3 117
#define ip_BENZAL 118
#define ip_HOPh3Me2NO2 119
#define ip_HOC6H4NO2 120
#define ip_CH3CHO2VINY 121
#define ip_CH3COCO2H 122
#define ip_IPRCHO2HCO 123
#define ip_C2H5CHO2HCO 124
#define ip_C2H5CHO2ENOL 125
#define ip_C3H7CHO2HCO 126
#define ip_C3H7CHO2VINY 127
#define ip_PeDIONE24 128
#define ip_PINAL2HCO 129
#define ip_PINAL2ENOL 130
#define ip_CF2ClCFCl2 131
#define ip_CH3CFCl2 132
#define ip_CF3CF2Cl 133
#define ip_CF2ClCF2Cl 134
#define ip_CHCl3 135
#define ip_CH2Cl2 136
#define ip_HO2 137
#define ip_ClO 138
#define ind_BrNO2 0
#define ind_CF2ClBr 1
#define ind_CF3Br 2
#define ind_CH3I 3
#define ind_O3s 4
#define ind_CF2ClBr_c 5
#define ind_CF3Br_c 6
#define ind_LCARBON 7
#define ind_LFLUORINE 8
#define ind_LCHLORINE 9
#define ind_CH3SO3H 10
#define ind_H2SO4 11
#define ind_NO3m_cs 12
#define ind_Hp_cs 13
#define ind_Dummy 14
#define ind_CFCl3_c 15
#define ind_CF2Cl2_c 16
#define ind_N2O_c 17
#define ind_CH3CCl3_c 18
#define ind_LO3s 19
#define ind_LossHO2 20
#define ind_LossO1D 21
#define ind_LossO3 22
#define ind_LossO3Br 23
#define ind_LossO3Cl 24
#define ind_LossO3H 25
#define ind_LossO3N 26
#define ind_LossO3O 27
#define ind_LossO3R 28
#define ind_LossOH 29
#define ind_ProdHO2 30
#define ind_ProdLBr 31
#define ind_ProdLCl 32
#define ind_ProdMeO2 33
#define ind_ProdO3 34
#define ind_ProdRO2 35
#define ind_ProdSBr 36
#define ind_ProdSCl 37
#define ind_BIACET 38
#define ind_Cl2O2 39
#define ind_NC4H10 40
#define ind_CCl4 41
#define ind_CF2Cl2 42
#define ind_CFCl3 43
#define ind_CH2Br2 44
#define ind_CHBr3 45
#define ind_CH3SO3 46
#define ind_NH3 47
#define ind_C2H6 48
#define ind_C3H8 49
#define ind_ClNO2 50
#define ind_OClO 51
#define ind_CH2ClBr 52
#define ind_CH3Br 53
#define ind_CHCl2Br 54
#define ind_CHClBr2 55
#define ind_SO2 56
#define ind_CH3CCl3 57
#define ind_NACA 58
#define ind_N 59
#define ind_N2O 60
#define ind_NH2OH 61
#define ind_IC3H7NO3 62
#define ind_CH3CO3H 63
#define ind_MPAN 64
#define ind_DMSO 65
#define ind_ISOOH 66
#define ind_LHOC3H6OOH 67
#define ind_LMEKOOH 68
#define ind_IC3H7OOH 69
#define ind_NHOH 70
#define ind_C2H5OOH 71
#define ind_HYPERACET 72
#define ind_HNO4 73
#define ind_CH3CO2H 74
#define ind_CH3Cl 75
#define ind_HONO 76
#define ind_PAN 77
#define ind_HCOOH 78
#define ind_LC4H9OOH 79
#define ind_Cl2 80
#define ind_CH3SO2 81
#define ind_MVKOOH 82
#define ind_N2O5 83
#define ind_NH2O 84
#define ind_MEK 85
#define ind_CH3COCH3 86
#define ind_HNO 87
#define ind_H2O2 88
#define ind_CH3OH 89
#define ind_BrCl 90
#define ind_ISON 91
#define ind_NH2 92
#define ind_IC3H7O2 93
#define ind_CH3COCH2O2 94
#define ind_CO 95
#define ind_MGLYOX 96
#define ind_H2 97
#define ind_CH4 98
#define ind_LMEKO2 99
#define ind_Br2 100
#define ind_HNO3 101
#define ind_LC4H9O2 102
#define ind_C2H4 103
#define ind_CH3OOH 104
#define ind_BrNO3 105
#define ind_C5H8 106
#define ind_C3H6 107
#define ind_ACETOL 108
#define ind_ISO2 109
#define ind_MVK 110
#define ind_LC4H9NO3 111
#define ind_HOCl 112
#define ind_MVKO2 113
#define ind_DMS 114
#define ind_LHOC3H6O2 115
#define ind_ClNO3 116
#define ind_C2H5O2 117
#define ind_HOBr 118
#define ind_CH3CHO 119
#define ind_O1D 120
#define ind_CH3CO3 121
#define ind_H 122
#define ind_HBr 123
#define ind_O3 124
#define ind_CH3O2 125
#define ind_OH 126
#define ind_Cl 127
#define ind_H2O 128
#define ind_Br 129
#define ind_HCHO 130
#define ind_O3P 131
#define ind_BrO 132
#define ind_NO 133
#define ind_ClO 134
#define ind_NO2 135
#define ind_NO3 136
#define ind_HO2 137
#define ind_HCl 138
#define ind_O2 139
#define ind_N2 140
#define ind_CO2 141
#define ind_H2OH2O -1
#define ind_N2D -1
#define ind_LNITROGEN -1
#define ind_CH2OO -1
#define ind_CH2OOA -1
#define ind_CH3 -1
#define ind_CH3O -1
#define ind_HOCH2O2 -1
#define ind_HOCH2OH -1
#define ind_HOCH2OOH -1
#define ind_CH3NO3 -1
#define ind_CH3O2NO2 -1
#define ind_CH3ONO -1
#define ind_CN -1
#define ind_HCN -1
#define ind_HOCH2O2NO2 -1
#define ind_NCO -1
#define ind_C2H2 -1
#define ind_C2H5OH -1
#define ind_CH2CHOH -1
#define ind_CH2CO -1
#define ind_CH3CHOHO2 -1
#define ind_CH3CHOHOOH -1
#define ind_CH3CO -1
#define ind_ETHGLY -1
#define ind_GLYOX -1
#define ind_HCOCH2O2 -1
#define ind_HCOCO -1
#define ind_HCOCO2H -1
#define ind_HCOCO3 -1
#define ind_HCOCO3H -1
#define ind_HOCH2CH2O -1
#define ind_HOCH2CH2O2 -1
#define ind_HOCH2CHO -1
#define ind_HOCH2CO -1
#define ind_HOCH2CO2H -1
#define ind_HOCH2CO3 -1
#define ind_HOCH2CO3H -1
#define ind_HOCHCHO -1
#define ind_HOOCH2CHO -1
#define ind_HOOCH2CO2H -1
#define ind_HOOCH2CO3 -1
#define ind_HOOCH2CO3H -1
#define ind_HYETHO2H -1
#define ind_C2H5NO3 -1
#define ind_C2H5O2NO2 -1
#define ind_CH3CN -1
#define ind_ETHOHNO3 -1
#define ind_NCCH2O2 -1
#define ind_NO3CH2CHO -1
#define ind_NO3CH2CO3 -1
#define ind_NO3CH2PAN -1
#define ind_PHAN -1
#define ind_ALCOCH2OOH -1
#define ind_C2H5CHO -1
#define ind_C2H5CO2H -1
#define ind_C2H5CO3 -1
#define ind_C2H5CO3H -1
#define ind_C33CO -1
#define ind_CH3CHCO -1
#define ind_CH3COCO2H -1
#define ind_CH3COCO3 -1
#define ind_CH3COCO3H -1
#define ind_CHOCOCH2O2 -1
#define ind_HCOCH2CHO -1
#define ind_HCOCH2CO2H -1
#define ind_HCOCH2CO3 -1
#define ind_HCOCH2CO3H -1
#define ind_HCOCOCH2OOH -1
#define ind_HOC2H4CO2H -1
#define ind_HOC2H4CO3 -1
#define ind_HOC2H4CO3H -1
#define ind_HOCH2COCH2O2 -1
#define ind_HOCH2COCH2OOH -1
#define ind_HOCH2COCHO -1
#define ind_HYPROPO2 -1
#define ind_HYPROPO2H -1
#define ind_IPROPOL -1
#define ind_NC3H7O2 -1
#define ind_NC3H7OOH -1
#define ind_NPROPOL -1
#define ind_PROPENOL -1
#define ind_C32OH13CO -1
#define ind_C3DIALO2 -1
#define ind_C3DIALOOH -1
#define ind_HCOCOHCO3 -1
#define ind_HCOCOHCO3H -1
#define ind_METACETHO -1
#define ind_C3PAN1 -1
#define ind_C3PAN2 -1
#define ind_CH3COCH2O2NO2 -1
#define ind_NC3H7NO3 -1
#define ind_NOA -1
#define ind_PPN -1
#define ind_PR2O2HNO3 -1
#define ind_PRONO3BO2 -1
#define ind_PROPOLNO3 -1
#define ind_HCOCOHPAN -1
#define ind_BIACETO2 -1
#define ind_BIACETOH -1
#define ind_BIACETOOH -1
#define ind_BUT1ENE -1
#define ind_BUT2OLO -1
#define ind_BUT2OLO2 -1
#define ind_BUT2OLOOH -1
#define ind_BUTENOL -1
#define ind_C312COCO3 -1
#define ind_C312COCO3H -1
#define ind_C3H7CHO -1
#define ind_C413COOOH -1
#define ind_C44O2 -1
#define ind_C44OOH -1
#define ind_C4CODIAL -1
#define ind_CBUT2ENE -1
#define ind_CH3COCHCO -1
#define ind_CH3COCHO2CHO -1
#define ind_CH3COCOCO2H -1
#define ind_CH3COOHCHCHO -1
#define ind_CHOC3COO2 -1
#define ind_CO23C3CHO -1
#define ind_CO2C3CHO -1
#define ind_CO2H3CHO -1
#define ind_CO2H3CO2H -1
#define ind_CO2H3CO3 -1
#define ind_CO2H3CO3H -1
#define ind_EZCH3CO2CHCHO -1
#define ind_EZCHOCCH3CHO2 -1
#define ind_HCOCCH3CHOOH -1
#define ind_HCOCCH3CO -1
#define ind_HCOCO2CH3CHO -1
#define ind_HMAC -1
#define ind_HO12CO3C4 -1
#define ind_HVMK -1
#define ind_IBUTALOH -1
#define ind_IBUTDIAL -1
#define ind_IBUTOLBO2 -1
#define ind_IBUTOLBOOH -1
#define ind_IC4H10 -1
#define ind_IC4H9O2 -1
#define ind_IC4H9OOH -1
#define ind_IPRCHO -1
#define ind_IPRCO3 -1
#define ind_IPRHOCO2H -1
#define ind_IPRHOCO3 -1
#define ind_IPRHOCO3H -1
#define ind_MACO2 -1
#define ind_MACO2H -1
#define ind_MACO3 -1
#define ind_MACO3H -1
#define ind_MACR -1
#define ind_MACRO -1
#define ind_MACRO2 -1
#define ind_MACROH -1
#define ind_MACROOH -1
#define ind_MBOOO -1
#define ind_MEPROPENE -1
#define ind_MPROPENOL -1
#define ind_PERIBUACID -1
#define ind_TBUT2ENE -1
#define ind_TC4H9O2 -1
#define ind_TC4H9OOH -1
#define ind_BZFUCO -1
#define ind_BZFUO2 -1
#define ind_BZFUONE -1
#define ind_BZFUOOH -1
#define ind_CO14O3CHO -1
#define ind_CO14O3CO2H -1
#define ind_CO2C4DIAL -1
#define ind_EPXC4DIAL -1
#define ind_EPXDLCO2H -1
#define ind_EPXDLCO3 -1
#define ind_EPXDLCO3H -1
#define ind_HOCOC4DIAL -1
#define ind_MALANHY -1
#define ind_MALANHYO2 -1
#define ind_MALANHYOOH -1
#define ind_MALDALCO2H -1
#define ind_MALDALCO3H -1
#define ind_MALDIAL -1
#define ind_MALDIALCO3 -1
#define ind_MALDIALO2 -1
#define ind_MALDIALOOH -1
#define ind_MALNHYOHCO -1
#define ind_MECOACEOOH -1
#define ind_MECOACETO2 -1
#define ind_BUT2OLNO3 -1
#define ind_C312COPAN -1
#define ind_C4PAN5 -1
#define ind_IBUTOLBNO3 -1
#define ind_IC4H9NO3 -1
#define ind_MACRN -1
#define ind_MVKNO3 -1
#define ind_PIPN -1
#define ind_TC4H9NO3 -1
#define ind_EPXDLPAN -1
#define ind_MALDIALPAN -1
#define ind_NBZFUO2 -1
#define ind_NBZFUONE -1
#define ind_NBZFUOOH -1
#define ind_NC4DCO2H -1
#define ind_LBUT1ENO2 -1
#define ind_LBUT1ENOOH -1
#define ind_LHMVKABO2 -1
#define ind_LHMVKABOOH -1
#define ind_LBUT1ENNO3 -1
#define ind_LMEKNO3 -1
#define ind_C1ODC2O2C4OD -1
#define ind_C1ODC2O2C4OOH -1
#define ind_C1ODC2OOHC4OD -1
#define ind_C1ODC3O2C4OOH -1
#define ind_C1OOHC2O2C4OD -1
#define ind_C1OOHC2OOHC4OD -1
#define ind_C1OOHC3O2C4OD -1
#define ind_C4MDIAL -1
#define ind_C511O2 -1
#define ind_C511OOH -1
#define ind_C512O2 -1
#define ind_C512OOH -1
#define ind_C513CO -1
#define ind_C513O2 -1
#define ind_C513OOH -1
#define ind_C514O2 -1
#define ind_C514OOH -1
#define ind_C59O2 -1
#define ind_C59OOH -1
#define ind_CHOC3COCO3 -1
#define ind_CHOC3COOOH -1
#define ind_CO13C4CHO -1
#define ind_CO23C4CHO -1
#define ind_CO23C4CO3 -1
#define ind_CO23C4CO3H -1
#define ind_DB1O -1
#define ind_DB1O2 -1
#define ind_DB1OOH -1
#define ind_DB2O2 -1
#define ind_DB2OOH -1
#define ind_HCOC5 -1
#define ind_ISOPAB -1
#define ind_ISOPAOH -1
#define ind_ISOPBO2 -1
#define ind_ISOPBOH -1
#define ind_ISOPBOOH -1
#define ind_ISOPCD -1
#define ind_ISOPDO2 -1
#define ind_ISOPDOH -1
#define ind_ISOPDOOH -1
#define ind_MBO -1
#define ind_MBOACO -1
#define ind_MBOCOCO -1
#define ind_ME3FURAN -1
#define ind_ZCO3C23DBCOD -1
#define ind_ZCODC23DBCOOH -1
#define ind_ACCOMECHO -1
#define ind_ACCOMECO3 -1
#define ind_ACCOMECO3H -1
#define ind_C24O3CCO2H -1
#define ind_C4CO2DBCO3 -1
#define ind_C4CO2DCO3H -1
#define ind_C5134CO2OH -1
#define ind_C54CO -1
#define ind_C5CO14O2 -1
#define ind_C5CO14OH -1
#define ind_C5CO14OOH -1
#define ind_C5DIALCO -1
#define ind_C5DIALO2 -1
#define ind_C5DIALOOH -1
#define ind_C5DICARB -1
#define ind_C5DICARBO2 -1
#define ind_C5DICAROOH -1
#define ind_MC3ODBCO2H -1
#define ind_MMALANHY -1
#define ind_MMALANHYO2 -1
#define ind_MMALNHYOOH -1
#define ind_TLFUO2 -1
#define ind_TLFUONE -1
#define ind_TLFUOOH -1
#define ind_C4MCONO3OH -1
#define ind_C514NO3 -1
#define ind_C5PAN9 -1
#define ind_CHOC3COPAN -1
#define ind_DB1NO3 -1
#define ind_ISOPBDNO3O2 -1
#define ind_ISOPBNO3 -1
#define ind_ISOPDNO3 -1
#define ind_NC4CHO -1
#define ind_NC4OHCO3 -1
#define ind_NC4OHCO3H -1
#define ind_NC4OHCPAN -1
#define ind_NISOPO2 -1
#define ind_NISOPOOH -1
#define ind_NMBOBCO -1
#define ind_ZCPANC23DBCOD -1
#define ind_ACCOMEPAN -1
#define ind_C4CO2DBPAN -1
#define ind_C5COO2NO2 -1
#define ind_NC4MDCO2H -1
#define ind_NTLFUO2 -1
#define ind_NTLFUOOH -1
#define ind_LC578O2 -1
#define ind_LC578OOH -1
#define ind_LDISOPACO -1
#define ind_LDISOPACO2 -1
#define ind_LHC4ACCHO -1
#define ind_LHC4ACCO2H -1
#define ind_LHC4ACCO3 -1
#define ind_LHC4ACCO3H -1
#define ind_LIEPOX -1
#define ind_LISOPACO -1
#define ind_LISOPACO2 -1
#define ind_LISOPACOOH -1
#define ind_LISOPEFO -1
#define ind_LISOPEFO2 -1
#define ind_LMBOABO2 -1
#define ind_LMBOABOOH -1
#define ind_LME3FURANO2 -1
#define ind_LZCO3HC23DBCOD -1
#define ind_LC5PAN1719 -1
#define ind_LISOPACNO3 -1
#define ind_LISOPACNO3O2 -1
#define ind_LMBOABNO3 -1
#define ind_LNISO3 -1
#define ind_LNISOOH -1
#define ind_LNMBOABO2 -1
#define ind_LNMBOABOOH -1
#define ind_C614CO -1
#define ind_C614O2 -1
#define ind_C614OOH -1
#define ind_CO235C5CHO -1
#define ind_CO235C6O2 -1
#define ind_CO235C6OOH -1
#define ind_BENZENE -1
#define ind_BZBIPERO2 -1
#define ind_BZBIPEROOH -1
#define ind_BZEMUCCO -1
#define ind_BZEMUCCO2H -1
#define ind_BZEMUCCO3 -1
#define ind_BZEMUCCO3H -1
#define ind_BZEMUCO2 -1
#define ind_BZEMUCOOH -1
#define ind_BZEPOXMUC -1
#define ind_BZOBIPEROH -1
#define ind_C5CO2DBCO3 -1
#define ind_C5CO2DCO3H -1
#define ind_C5CO2OHCO3 -1
#define ind_C5COOHCO3H -1
#define ind_C6125CO -1
#define ind_C615CO2O2 -1
#define ind_C615CO2OOH -1
#define ind_C6CO4DB -1
#define ind_C6H5O -1
#define ind_C6H5O2 -1
#define ind_C6H5OOH -1
#define ind_CATEC1O -1
#define ind_CATEC1O2 -1
#define ind_CATEC1OOH -1
#define ind_CATECHOL -1
#define ind_CPDKETENE -1
#define ind_PBZQCO -1
#define ind_PBZQO2 -1
#define ind_PBZQONE -1
#define ind_PBZQOOH -1
#define ind_PHENO2 -1
#define ind_PHENOL -1
#define ind_PHENOOH -1
#define ind_C614NO3 -1
#define ind_BZBIPERNO3 -1
#define ind_BZEMUCNO3 -1
#define ind_BZEMUCPAN -1
#define ind_C5CO2DBPAN -1
#define ind_C5CO2OHPAN -1
#define ind_DNPHEN -1
#define ind_DNPHENO2 -1
#define ind_DNPHENOOH -1
#define ind_HOC6H4NO2 -1
#define ind_NBZQO2 -1
#define ind_NBZQOOH -1
#define ind_NCATECHOL -1
#define ind_NCATECO2 -1
#define ind_NCATECOOH -1
#define ind_NCPDKETENE -1
#define ind_NDNPHENO2 -1
#define ind_NDNPHENOOH -1
#define ind_NNCATECO2 -1
#define ind_NNCATECOOH -1
#define ind_NPHEN1O -1
#define ind_NPHEN1O2 -1
#define ind_NPHEN1OOH -1
#define ind_NPHENO2 -1
#define ind_NPHENOOH -1
#define ind_C235C6CO3H -1
#define ind_C716O2 -1
#define ind_C716OOH -1
#define ind_C721O2 -1
#define ind_C721OOH -1
#define ind_C722O2 -1
#define ind_C722OOH -1
#define ind_CO235C6CHO -1
#define ind_CO235C6CO3 -1
#define ind_MCPDKETENE -1
#define ind_ROO6R3O -1
#define ind_ROO6R3O2 -1
#define ind_ROO6R5O2 -1
#define ind_BENZAL -1
#define ind_C6CO2OHCO3 -1
#define ind_C6COOHCO3H -1
#define ind_C6H5CH2O2 -1
#define ind_C6H5CH2OOH -1
#define ind_C6H5CO3 -1
#define ind_C6H5CO3H -1
#define ind_C7CO4DB -1
#define ind_CRESO2 -1
#define ind_CRESOL -1
#define ind_CRESOOH -1
#define ind_MCATEC1O -1
#define ind_MCATEC1O2 -1
#define ind_MCATEC1OOH -1
#define ind_MCATECHOL -1
#define ind_OXYL1O2 -1
#define ind_OXYL1OOH -1
#define ind_PHCOOH -1
#define ind_PTLQCO -1
#define ind_PTLQO2 -1
#define ind_PTLQONE -1
#define ind_PTLQOOH -1
#define ind_TLBIPERO2 -1
#define ind_TLBIPEROOH -1
#define ind_TLEMUCCO -1
#define ind_TLEMUCCO2H -1
#define ind_TLEMUCCO3 -1
#define ind_TLEMUCCO3H -1
#define ind_TLEMUCO2 -1
#define ind_TLEMUCOOH -1
#define ind_TLEPOXMUC -1
#define ind_TLOBIPEROH -1
#define ind_TOL1O -1
#define ind_TOLUENE -1
#define ind_C7PAN3 -1
#define ind_C6CO2OHPAN -1
#define ind_C6H5CH2NO3 -1
#define ind_DNCRES -1
#define ind_DNCRESO2 -1
#define ind_DNCRESOOH -1
#define ind_MNCATECH -1
#define ind_MNCATECO2 -1
#define ind_MNCATECOOH -1
#define ind_MNCPDKETENE -1
#define ind_MNNCATCOOH -1
#define ind_MNNCATECO2 -1
#define ind_NCRES1O -1
#define ind_NCRES1O2 -1
#define ind_NCRES1OOH -1
#define ind_NCRESO2 -1
#define ind_NCRESOOH -1
#define ind_NDNCRESO2 -1
#define ind_NDNCRESOOH -1
#define ind_NPTLQO2 -1
#define ind_NPTLQOOH -1
#define ind_PBZN -1
#define ind_TLBIPERNO3 -1
#define ind_TLEMUCNO3 -1
#define ind_TLEMUCPAN -1
#define ind_TOL1OHNO2 -1
#define ind_C721CHO -1
#define ind_C721CO3 -1
#define ind_C721CO3H -1
#define ind_C810O2 -1
#define ind_C810OOH -1
#define ind_C811O2 -1
#define ind_C812O2 -1
#define ind_C812OOH -1
#define ind_C813O2 -1
#define ind_C813OOH -1
#define ind_C85O2 -1
#define ind_C85OOH -1
#define ind_C86O2 -1
#define ind_C86OOH -1
#define ind_C89O2 -1
#define ind_C89OOH -1
#define ind_C8BC -1
#define ind_C8BCCO -1
#define ind_C8BCO2 -1
#define ind_C8BCOOH -1
#define ind_NORPINIC -1
#define ind_EBENZ -1
#define ind_LXYL -1
#define ind_STYRENE -1
#define ind_STYRENO2 -1
#define ind_STYRENOOH -1
#define ind_C721PAN -1
#define ind_C810NO3 -1
#define ind_C89NO3 -1
#define ind_C8BCNO3 -1
#define ind_NSTYRENO2 -1
#define ind_NSTYRENOOH -1
#define ind_C811CO3 -1
#define ind_C811CO3H -1
#define ind_C85CO3 -1
#define ind_C85CO3H -1
#define ind_C89CO2H -1
#define ind_C89CO3 -1
#define ind_C89CO3H -1
#define ind_C96O2 -1
#define ind_C96OOH -1
#define ind_C97O2 -1
#define ind_C97OOH -1
#define ind_C98O2 -1
#define ind_C98OOH -1
#define ind_NOPINDCO -1
#define ind_NOPINDO2 -1
#define ind_NOPINDOOH -1
#define ind_NOPINONE -1
#define ind_NOPINOO -1
#define ind_NORPINAL -1
#define ind_NORPINENOL -1
#define ind_PINIC -1
#define ind_RO6R3P -1
#define ind_C811PAN -1
#define ind_C89PAN -1
#define ind_C96NO3 -1
#define ind_C9PAN2 -1
#define ind_LTMB -1
#define ind_APINAOO -1
#define ind_APINBOO -1
#define ind_APINENE -1
#define ind_BPINAO2 -1
#define ind_BPINAOOH -1
#define ind_BPINENE -1
#define ind_C106O2 -1
#define ind_C106OOH -1
#define ind_C109CO -1
#define ind_C109O2 -1
#define ind_C109OOH -1
#define ind_C96CO3 -1
#define ind_CAMPHENE -1
#define ind_CARENE -1
#define ind_MENTHEN6ONE -1
#define ind_OH2MENTHEN6ONE -1
#define ind_OHMENTHEN6ONEO2 -1
#define ind_PERPINONIC -1
#define ind_PINAL -1
#define ind_PINALO2 -1
#define ind_PINALOOH -1
#define ind_PINENOL -1
#define ind_PINONIC -1
#define ind_RO6R1O2 -1
#define ind_RO6R3O2 -1
#define ind_RO6R3OOH -1
#define ind_ROO6R1O2 -1
#define ind_SABINENE -1
#define ind_BPINANO3 -1
#define ind_C106NO3 -1
#define ind_C10PAN2 -1
#define ind_PINALNO3 -1
#define ind_RO6R1NO3 -1
#define ind_RO6R3NO3 -1
#define ind_ROO6R1NO3 -1
#define ind_LAPINABNO3 -1
#define ind_LAPINABO2 -1
#define ind_LAPINABOOH -1
#define ind_LNAPINABO2 -1
#define ind_LNAPINABOOH -1
#define ind_LNBPINABO2 -1
#define ind_LNBPINABOOH -1
#define ind_LHAROM -1
#define ind_CHF3 -1
#define ind_CHF2CF3 -1
#define ind_CH3CF3 -1
#define ind_CH2F2 -1
#define ind_CH3CHF2 -1
#define ind_CF2ClCF2Cl -1
#define ind_CF2ClCFCl2 -1
#define ind_CF3CF2Cl -1
#define ind_CH2Cl2 -1
#define ind_CH2FCF3 -1
#define ind_CH3CFCl2 -1
#define ind_CHCl3 -1
#define ind_CHF2Cl -1
#define ind_LBROMINE -1
#define ind_C3H7I -1
#define ind_CH2ClI -1
#define ind_CH2I2 -1
#define ind_HI -1
#define ind_HIO3 -1
#define ind_HOI -1
#define ind_I -1
#define ind_I2 -1
#define ind_I2O2 -1
#define ind_IBr -1
#define ind_ICl -1
#define ind_INO2 -1
#define ind_INO3 -1
#define ind_IO -1
#define ind_IPART -1
#define ind_OIO -1
#define ind_OCS -1
#define ind_S -1
#define ind_SF6 -1
#define ind_SH -1
#define ind_SO -1
#define ind_SO3 -1
#define ind_LSULFUR -1
#define ind_Hg -1
#define ind_HgO -1
#define ind_HgCl -1
#define ind_HgCl2 -1
#define ind_HgBr -1
#define ind_HgBr2 -1
#define ind_ClHgBr -1
#define ind_BrHgOBr -1
#define ind_ClHgOBr -1
#define ind_RGM_cs -1
#define ind_PRODUCTS -1
#define ind_M -1
#define ind_Op -1
#define ind_O2p -1
#define ind_Np -1
#define ind_N2p -1
#define ind_NOp -1
#define ind_em -1
#define ind_kJmol -1
#define ind_O4Sp -1
#define ind_O2Dp -1
#define ind_O2Pp -1
#define ind_LTERP -1
#define ind_LALK4 -1
#define ind_LALK5 -1
#define ind_LARO1 -1
#define ind_LARO2 -1
#define ind_LOLE1 -1
#define ind_LOLE2 -1
#define ind_LfPOG02 -1
#define ind_LfPOG03 -1
#define ind_LfPOG04 -1
#define ind_LfPOG05 -1
#define ind_LbbPOG02 -1
#define ind_LbbPOG03 -1
#define ind_LbbPOG04 -1
#define ind_LfSOGsv01 -1
#define ind_LfSOGsv02 -1
#define ind_LbbSOGsv01 -1
#define ind_LbbSOGsv02 -1
#define ind_LfSOGiv01 -1
#define ind_LfSOGiv02 -1
#define ind_LfSOGiv03 -1
#define ind_LfSOGiv04 -1
#define ind_LbbSOGiv01 -1
#define ind_LbbSOGiv02 -1
#define ind_LbbSOGiv03 -1
#define ind_LbSOGv01 -1
#define ind_LbSOGv02 -1
#define ind_LbSOGv03 -1
#define ind_LbSOGv04 -1
#define ind_LbOSOGv01 -1
#define ind_LbOSOGv02 -1
#define ind_LbOSOGv03 -1
#define ind_LaSOGv01 -1
#define ind_LaSOGv02 -1
#define ind_LaSOGv03 -1
#define ind_LaSOGv04 -1
#define ind_LaOSOGv01 -1
#define ind_LaOSOGv02 -1
#define ind_LaOSOGv03 -1
#define ind_ACBZO2 -1
#define ind_ALKNO3 -1
#define ind_ALKO2 -1
#define ind_ALKOH -1
#define ind_ALKOOH -1
#define ind_BCARY -1
#define ind_BENZO2 -1
#define ind_BENZOOH -1
#define ind_BEPOMUC -1
#define ind_BIGALD1 -1
#define ind_BIGALD2 -1
#define ind_BIGALD3 -1
#define ind_BIGALD4 -1
#define ind_BIGALKANE -1
#define ind_BIGENE -1
#define ind_BrONO -1
#define ind_BZALD -1
#define ind_BZOO -1
#define ind_BZOOH -1
#define ind_C3H7O2 -1
#define ind_C3H7OOH -1
#define ind_CFC113 -1
#define ind_CFC114 -1
#define ind_CFC115 -1
#define ind_COF2 -1
#define ind_COFCL -1
#define ind_DICARBO2 -1
#define ind_ELVOC -1
#define ind_ENEO2 -1
#define ind_EOOH -1
#define ind_F -1
#define ind_H1202 -1
#define ind_H2402 -1
#define ind_HCFC141B -1
#define ind_HCFC142B -1
#define ind_HCFC22 -1
#define ind_HF -1
#define ind_HOCH2OO -1
#define ind_HPALD -1
#define ind_IEC1O2 -1
#define ind_LIECHO -1
#define ind_LIECO3 -1
#define ind_LIECO3H -1
#define ind_LIMON -1
#define ind_LISOPNO3NO3 -1
#define ind_LISOPNO3O2 -1
#define ind_LISOPNO3OOH -1
#define ind_LISOPOOHO2 -1
#define ind_LISOPOOHOOH -1
#define ind_MALO2 -1
#define ind_MBONO3O2 -1
#define ind_MBOO2 -1
#define ind_MBOOOH -1
#define ind_MDIALO2 -1
#define ind_MEKNO3 -1
#define ind_MVKN -1
#define ind_MYRC -1
#define ind_NTERPNO3 -1
#define ind_NTERPO2 -1
#define ind_PACALD -1
#define ind_PBZNIT -1
#define ind_TEPOMUC -1
#define ind_TERP2O2 -1
#define ind_TERP2OOH -1
#define ind_TERPNO3 -1
#define ind_TERPO2 -1
#define ind_TERPOOH -1
#define ind_TERPROD1 -1
#define ind_TERPROD2 -1
#define ind_TOLO2 -1
#define ind_TOLOOH -1
#define ind_XYLENO2 -1
#define ind_XYLENOOH -1
#define ind_XYLOL -1
#define ind_XYLOLO2 -1
#define ind_XYLOLOOH -1
#define ind_O2_1D -1
#define ind_O2_1S -1
#define ind_ONIT -1
#define ind_C4H8 -1
#define ind_C4H9O3 -1
#define ind_C5H12 -1
#define ind_C5H11O2 -1
#define ind_C5H6O2 -1
#define ind_HYDRALD -1
#define ind_ISOPO2 -1
#define ind_C5H9O3 -1
#define ind_ISOPOOH -1
#define ind_C5H12O2 -1
#define ind_ONITR -1
#define ind_C5H10O4 -1
#define ind_ROO6R5P -1
#define ind_NH4 -1
#define ind_SO4 -1
#define ind_HCO -1
#define ind_ISPD -1
#define ind_ClOO -1
#define ind_Rn -1
#define ind_Pb -1
#define ind_XO2 -1
#define ind_XO2N -1
#define ind_ROOH -1
#define ind_OLE -1
#define ind_ROR -1
#define ind_ORGNTR -1
#define ind_ACO2 -1
#define ind_PAR -1
#define ind_RXPAR -1
#define ind_OHv0 -1
#define ind_OHv1 -1
#define ind_OHv2 -1
#define ind_OHv3 -1
#define ind_OHv4 -1
#define ind_OHv5 -1
#define ind_OHv6 -1
#define ind_OHv7 -1
#define ind_OHv8 -1
#define ind_OHv9 -1
#define ind_O1S -1
#define ind_O21d -1
#define ind_O2b1s -1
#define ind_O2c1s -1
#define ind_O2x -1
#define ind_O2A3D -1
#define ind_O2A3S -1
#define ind_O25P -1
#define ind_O2_a01 -1
#define ind_O3_a01 -1
#define ind_OH_a01 -1
#define ind_HO2_a01 -1
#define ind_H2O_a01 -1
#define ind_H2O2_a01 -1
#define ind_NH3_a01 -1
#define ind_NO_a01 -1
#define ind_NO2_a01 -1
#define ind_NO3_a01 -1
#define ind_HONO_a01 -1
#define ind_HNO3_a01 -1
#define ind_HNO4_a01 -1
#define ind_CH3OH_a01 -1
#define ind_HCOOH_a01 -1
#define ind_HCHO_a01 -1
#define ind_CH3O2_a01 -1
#define ind_CH3OOH_a01 -1
#define ind_CO2_a01 -1
#define ind_CH3CO2H_a01 -1
#define ind_PAN_a01 -1
#define ind_CH3CHO_a01 -1
#define ind_CH3COCH3_a01 -1
#define ind_Cl_a01 -1
#define ind_Cl2_a01 -1
#define ind_HCl_a01 -1
#define ind_HOCl_a01 -1
#define ind_Br_a01 -1
#define ind_Br2_a01 -1
#define ind_HBr_a01 -1
#define ind_HOBr_a01 -1
#define ind_BrCl_a01 -1
#define ind_I2_a01 -1
#define ind_IO_a01 -1
#define ind_HOI_a01 -1
#define ind_ICl_a01 -1
#define ind_IBr_a01 -1
#define ind_SO2_a01 -1
#define ind_H2SO4_a01 -1
#define ind_DMS_a01 -1
#define ind_DMSO_a01 -1
#define ind_Hg_a01 -1
#define ind_HgO_a01 -1
#define ind_HgOHOH_a01 -1
#define ind_HgOHCl_a01 -1
#define ind_HgCl2_a01 -1
#define ind_HgBr2_a01 -1
#define ind_HgSO3_a01 -1
#define ind_ClHgBr_a01 -1
#define ind_BrHgOBr_a01 -1
#define ind_ClHgOBr_a01 -1
#define ind_FeOH3_a01 -1
#define ind_FeCl3_a01 -1
#define ind_FeF3_a01 -1
#define ind_O2m_a01 -1
#define ind_OHm_a01 -1
#define ind_HO2m_a01 -1
#define ind_O2mm_a01 -1
#define ind_Hp_a01 -1
#define ind_NH4p_a01 -1
#define ind_NO2m_a01 -1
#define ind_NO3m_a01 -1
#define ind_NO4m_a01 -1
#define ind_CO3m_a01 -1
#define ind_HCOOm_a01 -1
#define ind_HCO3m_a01 -1
#define ind_CH3COOm_a01 -1
#define ind_Clm_a01 -1
#define ind_Cl2m_a01 -1
#define ind_ClOm_a01 -1
#define ind_ClOHm_a01 -1
#define ind_Brm_a01 -1
#define ind_Br2m_a01 -1
#define ind_BrOm_a01 -1
#define ind_BrOHm_a01 -1
#define ind_BrCl2m_a01 -1
#define ind_Br2Clm_a01 -1
#define ind_Im_a01 -1
#define ind_IO2m_a01 -1
#define ind_IO3m_a01 -1
#define ind_ICl2m_a01 -1
#define ind_IBr2m_a01 -1
#define ind_SO3m_a01 -1
#define ind_SO3mm_a01 -1
#define ind_SO4m_a01 -1
#define ind_SO4mm_a01 -1
#define ind_SO5m_a01 -1
#define ind_HSO3m_a01 -1
#define ind_HSO4m_a01 -1
#define ind_HSO5m_a01 -1
#define ind_CH3SO3m_a01 -1
#define ind_CH2OHSO3m_a01 -1
#define ind_Hgp_a01 -1
#define ind_Hgpp_a01 -1
#define ind_HgOHp_a01 -1
#define ind_HgClp_a01 -1
#define ind_HgBrp_a01 -1
#define ind_HgSO32mm_a01 -1
#define ind_Fepp_a01 -1
#define ind_FeOpp_a01 -1
#define ind_FeOHp_a01 -1
#define ind_FeOH2p_a01 -1
#define ind_FeClp_a01 -1
#define ind_Feppp_a01 -1
#define ind_FeHOpp_a01 -1
#define ind_FeHO2pp_a01 -1
#define ind_FeOHpp_a01 -1
#define ind_FeOH4m_a01 -1
#define ind_FeOHHO2p_a01 -1
#define ind_FeClpp_a01 -1
#define ind_FeCl2p_a01 -1
#define ind_FeBrpp_a01 -1
#define ind_FeBr2p_a01 -1
#define ind_FeFpp_a01 -1
#define ind_FeF2p_a01 -1
#define ind_FeSO3p_a01 -1
#define ind_FeSO4p_a01 -1
#define ind_FeSO42m_a01 -1
#define ind_FeOH2Fepppp_a01 -1
#define ind_D1O_a01 -1
#define ind_Nap_a01 -1
#define ind_LossO3Su -1
#define ihs_N2O5_H2O 0
#define ihs_HOCl_HCl 1
#define ihs_ClNO3_HCl 2
#define ihs_ClNO3_H2O 3
#define ihs_N2O5_HCl 4
#define ihs_ClNO3_HBr 5
#define ihs_BrNO3_HCl 6
#define ihs_HOCl_HBr 7
#define ihs_HOBr_HCl 8
#define ihs_HOBr_HBr 9
#define ihs_BrNO3_H2O 10
#define ihs_Hg 11
#define ihs_RGM 12
#define iht_N2O5 0
#define iht_HNO3 1
#define iht_Hg 2
#define iht_RGM 3
#define k_C6H5O_NO2 (2.08E-12)
#define k_C6H5O_O3 (2.86E-13)
#define k_adsecprim (3.0E-11)
#define k_adtertprim (5.7E-11 )
#define f_soh (3.44)
#define f_toh (2.68)
#define f_sooh (7.)
#define f_tooh (7.)
#define f_ono2 (0.04 )
#define f_ch2ono2 (0.2)
#define f_cpan (.25)
#define f_allyl (3.6)
#define f_alk (1.23)
#define f_cho (0.55)
#define f_co2h (1.67)
#define f_co (0.73)
#define f_o (8.15)
#define f_pch2oh (1.29)
#define f_tch2oh (0.53)
#define a_pan (0.56 )
#define a_cho (0.31 )
#define a_coch3 (0.76 )
#define a_ch2ono2 (0.64 )
#define a_ch2oh (1.7 )
#define a_ch2ooh (1.7 )
#define a_coh (2.2 )
#define a_cooh (2.2 )
#define a_co2h (0.25)
#define ifun 0
#define ijac 1
#define istp 2
#define iacc 3
#define irej 4
#define idec 5
#define isol 6
#define isng 7
#define itexit 0
#define ihexit 1
#define ZERO 0.0
#define ONE 1.0
#define HALF 0.5
/*
* Fortran to C macros
* GPU-friendly array deffinition
* i:VL_GLO, j:NVAR
*
*/
#define conc(i,j) conc[(j)*VL_GLO+(i)]
#define khet_st(i,j) khet_st[(j)*VL_GLO+(i)]
#define khet_tr(i,j) khet_tr[(j)*VL_GLO+(i)]
#define jx(i,j) jx[j*VL_GLO+i]
#define istatus(i,j) istatus[(j)*(VL_GLO)+(i)]
#define rstatus(i,j) rstatus[(j)*(VL_GLO)+(i)]
#define ROUND128(X) (X + (128 - 1)) & ~(128 - 1)
//#define rconst(i,j) rconst[(j)]
//3968 should be VL_GLO
#define rconst(i,j) rconst[(j)*3968 + (i)]
/* Temporary arrays allocated in stack */
// #define var(i,j) var[(j)]
// #define fix(i,j) fix[(j)]
// #define jcb(i,j) jcb[(j)]
// #define varDot(i,j) varDot[j]
// #define varNew(i,j) varNew[(j)]
// #define Fcn0(i,j) Fcn0[(j)]
// #define Fcn(i,j) Fcn[(j)]
// #define Fcn(i,j) Fcn[(j)]
// #define dFdT(i,j) dFdT[(j)]
// #define varErr(i,j) varErr[(j)]
// #define K(i,j,k) K[(j)*(NVAR)+(k)]
// #define jac0(i,j) jac0[(j)]
// #define Ghimj(i,j) Ghimj[(j)]
//3968 should be VL_GLO
#define var(i,j) var[(j)* 3968 + (i)]
#define fix(i,j) fix[(j)* 3968 + (i)]
#define jcb(i,j) jcb[(j)* 3968 + (i)]
#define varDot(i,j) varDot[j* 3968 + (i)]
#define varNew(i,j) varNew[(j)* 3968 + (i)]
#define Fcn0(i,j) Fcn0[(j)* 3968 + (i)]
#define Fcn(i,j) Fcn[(j)* 3968 + (i)]
#define Fcn(i,j) Fcn[(j)* 3968 + (i)]
#define dFdT(i,j) dFdT[(j)* 3968 + (i)]
#define varErr(i,j) varErr[(j)* 3968 + (i)]
#define K(i,j,k) K[((j)*(NVAR)+(k)) * 3968 + (i)]
#define jac0(i,j) jac0[(j)* 3968 + (i)]
#define Ghimj(i,j) Ghimj[(j)* 3968 + (i)]
/* Enable debug flags for GPU */
//#define DEBUG
#ifdef DEBUG
#define GPU_DEBUG()\
gpuErrchk( cudaPeekAtLastError() ); \
gpuErrchk( cudaDeviceSynchronize() );
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
static inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
#else
/* If debug flags are disabled */
#define GPU_DEBUG()
#define gpuErrchk(ans) ans
#endif
/** prefetches into L1 cache */
__device__ inline void prefetch_gl1(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.global.L1 [%0];": :"l"(p));
#endif
}
__device__ inline void prefetch_ll1(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.local.L1 [%0];": :"l"(p));
#endif
}
/** prefetches into L2 cache */
__device__ inline void prefetch_gl2(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.global.L2 [%0];": :"l"(p));
#endif
}
__device__ inline void prefetch_ll2(const void *p) {
#if __CUDA_ARCH__ <= 300
asm("prefetch.local.L2 [%0];": :"l"(p));
#endif
}
__device__ void update_rconst(const double * __restrict__ var,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx, double * __restrict__ rconst,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO);
/* This runs on CPU */
double machine_eps_flt()
{
double machEps = 1.0f;
do
{
machEps /= 2.0f;
// If next epsilon yields 1, then break, because current
// epsilon is the machine epsilon.
}
while ((double)(1.0 + (machEps/2.0)) != 1.0);
return machEps;
}
/* This runs on GPU */
__device__ double machine_eps_flt_cuda()
{
typedef union
{
long i64;
double f64;
} flt_64;
flt_64 s;
s.f64 = 1.;
s.i64++;
return (s.f64 - 1.);
}
__device__ static double alpha_AN(const int n, const int ro2type, const double temp, const double cair){
double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0;
double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a;
/* IF (ro2type = 1) THEN m = 0.4 ! primary RO2
ELSE IF (ro2type = 2) THEN m = 1. ! secondary RO2
ELSE IF (ro2type = 3) THEN m = 0.3 ! tertiary RO2
ELSE m = 1.
*/
double m = 1.;
Y0_298K = alpha*exp(beta*n);
Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0));
Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf));
zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2));
k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta);
alpha_a = k_ratio/(1+ k_ratio) *m;
return alpha_a;
}
__device__ static double alpha_AN(const int n, const int ro2type, const int bcarb, const int gcarb, const int abic, const double temp, const double cair){
double alpha=2.E-22, beta=1.0, Yinf_298K=0.43, F=0.41, m0=0., minf=8.0;
double Y0_298K, Y0_298K_tp, Yinf_298K_t, zeta, k_ratio, alpha_a;
double bcf=1., gcf=1., abf=1.;
double m = 1.; //According to Teng, ref3189
if (bcarb == 1) { bcf = 0.19; }// derived from Praske, ref3190: alpha_AN = 0.03 for the secondary HMKO2 relative to alpha_AN for 6C RO2 (0.16)
if (gcarb == 1) {gcf = 0.44; }// derived from Praske, ref3190: alpha_AN = 0.07 for the primary HMKO2 relative to alpha_AN for 6C RO2 (0.16)
if (abic == 1) { abf = 0.24; }// derived from the ratio of AN- yield for toluene from Elrod et al. (ref3180), 5.5 0x1.9206e69676542p+ 229t &
// 200 torr, and this SAR for linear alkyl RO2 with 9 heavy atoms, 23.3%
Y0_298K = alpha*exp(beta*n);
Y0_298K_tp = Y0_298K *cair *pow((temp/298.),(- m0));
Yinf_298K_t = Yinf_298K * pow((temp/298.),(- minf));
zeta = 1/(1+ pow(log10(Y0_298K_tp/Yinf_298K_t),2));
k_ratio = (Y0_298K_tp/(1+ Y0_298K_tp/Yinf_298K_t))*pow(F,zeta);
alpha_a = k_ratio/(1+ k_ratio) *m*bcf*gcf*abf;
return alpha_a;
}
__device__ static double k_RO2_HO2(const double temp, const int nC){
return 2.91e-13*exp(1300./temp)*(1.-exp(-0.245*nC)); // ref1630
}
__device__ double ros_ErrorNorm(double * __restrict__ var, double * __restrict__ varNew, double * __restrict__ varErr,
const double * __restrict__ absTol, const double * __restrict__ relTol,
const int vectorTol )
{
double err, scale, varMax;
int index = blockIdx.x*blockDim.x+threadIdx.x;
err = ZERO;
if (vectorTol){
for (int i=0;i<NVAR - 16;i+=16){
prefetch_ll1(&varErr(index,i));
prefetch_ll1(&absTol[i]);
prefetch_ll1(&relTol[i]);
prefetch_ll1(&var(index,i));
prefetch_ll1(&varNew(index,i));
}
for (int i=0; i<NVAR; i++)
{
varMax = fmax(fabs(var(index,i)),fabs(varNew(index,i)));
scale = absTol[i]+ relTol[i]*varMax;
err += pow((double)varErr(index,i)/scale,2.0);
}
err = sqrt((double) err/NVAR);
}else{
for (int i=0;i<NVAR - 16;i+=16){
prefetch_ll1(&varErr(index,i));
prefetch_ll1(&var(index,i));
prefetch_ll1(&varNew(index,i));
}
for (int i=0; i<NVAR; i++)
{
varMax = fmax(fabs(var(index,i)),fabs(varNew(index,i)));
scale = absTol[0]+ relTol[0]*varMax;
err += pow((double)varErr(index,i)/scale,2.0);
}
err = sqrt((double) err/NVAR);
}
return err;
}
__device__ void kppSolve(const double * __restrict__ Ghimj, double * __restrict__ K,
const int istage, const int ros_S ){
int index = blockIdx.x*blockDim.x+threadIdx.x;
//K = &K[istage*NVAR];
K(index,istage,7) = K(index,istage,7)- Ghimj(index,7)*K(index,istage,1)- Ghimj(index,8)*K(index,istage,2);
K(index,istage,8) = K(index,istage,8)- Ghimj(index,23)*K(index,istage,1)- Ghimj(index,24)*K(index,istage,2);
K(index,istage,14) = K(index,istage,14)- Ghimj(index,50)*K(index,istage,5)- Ghimj(index,51)*K(index,istage,6);
K(index,istage,19) = K(index,istage,19)- Ghimj(index,67)*K(index,istage,4);
K(index,istage,31) = K(index,istage,31)- Ghimj(index,188)*K(index,istage,1)- Ghimj(index,189)*K(index,istage,2);
K(index,istage,32) = K(index,istage,32)- Ghimj(index,193)*K(index,istage,1);
K(index,istage,34) = K(index,istage,34)- Ghimj(index,205)*K(index,istage,0);
K(index,istage,60) = K(index,istage,60)- Ghimj(index,309)*K(index,istage,59);
K(index,istage,70) = K(index,istage,70)- Ghimj(index,351)*K(index,istage,61);
K(index,istage,85) = K(index,istage,85)- Ghimj(index,426)*K(index,istage,79);
K(index,istage,86) = K(index,istage,86)- Ghimj(index,434)*K(index,istage,62)- Ghimj(index,435)*K(index,istage,69);
K(index,istage,87) = K(index,istage,87)- Ghimj(index,442)*K(index,istage,70)- Ghimj(index,443)*K(index,istage,84);
K(index,istage,90) = K(index,istage,90)- Ghimj(index,468)*K(index,istage,80);
K(index,istage,92) = K(index,istage,92)- Ghimj(index,487)*K(index,istage,47)- Ghimj(index,488)*K(index,istage,84);
K(index,istage,93) = K(index,istage,93)- Ghimj(index,495)*K(index,istage,49)- Ghimj(index,496)*K(index,istage,69);
K(index,istage,94) = K(index,istage,94)- Ghimj(index,502)*K(index,istage,72)- Ghimj(index,503)*K(index,istage,86)- Ghimj(index,504)*K(index,istage,93);
K(index,istage,95) = K(index,istage,95)- Ghimj(index,510)*K(index,istage,58)- Ghimj(index,511)*K(index,istage,77)- Ghimj(index,512)*K(index,istage,82)- Ghimj(index,513)*K(index,istage,91);
K(index,istage,96) = K(index,istage,96)- Ghimj(index,535)*K(index,istage,72)- Ghimj(index,536)*K(index,istage,82)- Ghimj(index,537)*K(index,istage,94);
K(index,istage,99) = K(index,istage,99)- Ghimj(index,563)*K(index,istage,68)- Ghimj(index,564)*K(index,istage,85);
K(index,istage,100) = K(index,istage,100)- Ghimj(index,572)*K(index,istage,90);
K(index,istage,101) = K(index,istage,101)- Ghimj(index,585)*K(index,istage,83);
K(index,istage,102) = K(index,istage,102)- Ghimj(index,598)*K(index,istage,40)- Ghimj(index,599)*K(index,istage,79);
K(index,istage,108) = K(index,istage,108)- Ghimj(index,630)*K(index,istage,64)- Ghimj(index,631)*K(index,istage,67)- Ghimj(index,632)*K(index,istage,82)- Ghimj(index,633)*K(index,istage,91)- Ghimj(index,634)*K(index,istage,94)- Ghimj(index,635)*K(index,istage,106);
K(index,istage,109) = K(index,istage,109)- Ghimj(index,647)*K(index,istage,106);
K(index,istage,110) = K(index,istage,110)- Ghimj(index,655)*K(index,istage,66)- Ghimj(index,656)*K(index,istage,91)- Ghimj(index,657)*K(index,istage,106)- Ghimj(index,658)*K(index,istage,109);
K(index,istage,111) = K(index,istage,111)- Ghimj(index,666)*K(index,istage,99)- Ghimj(index,667)*K(index,istage,102)- Ghimj(index,668)*K(index,istage,107);
K(index,istage,113) = K(index,istage,113)- Ghimj(index,685)*K(index,istage,64)- Ghimj(index,686)*K(index,istage,82)- Ghimj(index,687)*K(index,istage,106)- Ghimj(index,688)*K(index,istage,110);
K(index,istage,115) = K(index,istage,115)- Ghimj(index,703)*K(index,istage,67)- Ghimj(index,704)*K(index,istage,103)- Ghimj(index,705)*K(index,istage,107);
K(index,istage,117) = K(index,istage,117)- Ghimj(index,722)*K(index,istage,48)- Ghimj(index,723)*K(index,istage,49)- Ghimj(index,724)*K(index,istage,71)- Ghimj(index,725)*K(index,istage,79)- Ghimj(index,726)*K(index,istage,85)- Ghimj(index,727)*K(index,istage,102)- Ghimj(index,728) *K(index,istage,107)- Ghimj(index,729)*K(index,istage,111)- Ghimj(index,730)*K(index,istage,115);
K(index,istage,118) = K(index,istage,118)- Ghimj(index,741)*K(index,istage,100)- Ghimj(index,742)*K(index,istage,105)- Ghimj(index,743)*K(index,istage,112)- Ghimj(index,744)*K(index,istage,116);
K(index,istage,119) = K(index,istage,119)- Ghimj(index,758)*K(index,istage,68)- Ghimj(index,759)*K(index,istage,71)- Ghimj(index,760)*K(index,istage,79)- Ghimj(index,761)*K(index,istage,99)- Ghimj(index,762)*K(index,istage,102)- Ghimj(index,763)*K(index,istage,107)- Ghimj(index,764) *K(index,istage,111)- Ghimj(index,765)*K(index,istage,115)- Ghimj(index,766)*K(index,istage,117);
K(index,istage,120) = K(index,istage,120)- Ghimj(index,777)*K(index,istage,41)- Ghimj(index,778)*K(index,istage,42)- Ghimj(index,779)*K(index,istage,43)- Ghimj(index,780)*K(index,istage,57)- Ghimj(index,781)*K(index,istage,60)- Ghimj(index,782)*K(index,istage,75)- Ghimj(index,783) *K(index,istage,92)- Ghimj(index,784)*K(index,istage,97)- Ghimj(index,785)*K(index,istage,98)- Ghimj(index,786)*K(index,istage,107);
K(index,istage,121) = K(index,istage,121)- Ghimj(index,798)*K(index,istage,38)- Ghimj(index,799)*K(index,istage,63)- Ghimj(index,800)*K(index,istage,68)- Ghimj(index,801)*K(index,istage,72)- Ghimj(index,802)*K(index,istage,77)- Ghimj(index,803)*K(index,istage,82)- Ghimj(index,804) *K(index,istage,85)- Ghimj(index,805)*K(index,istage,86)- Ghimj(index,806)*K(index,istage,93)- Ghimj(index,807)*K(index,istage,94)- Ghimj(index,808)*K(index,istage,96)- Ghimj(index,809)*K(index,istage,99)- Ghimj(index,810)*K(index,istage,102)- Ghimj(index,811) *K(index,istage,106)- Ghimj(index,812)*K(index,istage,107)- Ghimj(index,813)*K(index,istage,108)- Ghimj(index,814)*K(index,istage,109)- Ghimj(index,815)*K(index,istage,110)- Ghimj(index,816)*K(index,istage,111)- Ghimj(index,817)*K(index,istage,113) - Ghimj(index,818)*K(index,istage,115)- Ghimj(index,819)*K(index,istage,117)- Ghimj(index,820)*K(index,istage,119);
K(index,istage,122) = K(index,istage,122)- Ghimj(index,831)*K(index,istage,75)- Ghimj(index,832)*K(index,istage,95)- Ghimj(index,833)*K(index,istage,96)- Ghimj(index,834)*K(index,istage,97)- Ghimj(index,835)*K(index,istage,98)- Ghimj(index,836)*K(index,istage,103)- Ghimj(index,837) *K(index,istage,106)- Ghimj(index,838)*K(index,istage,107)- Ghimj(index,839)*K(index,istage,108)- Ghimj(index,840)*K(index,istage,109)- Ghimj(index,841)*K(index,istage,110)- Ghimj(index,842)*K(index,istage,113)- Ghimj(index,843)*K(index,istage,115) - Ghimj(index,844)*K(index,istage,119)- Ghimj(index,845)*K(index,istage,120)- Ghimj(index,846)*K(index,istage,121);
K(index,istage,123) = K(index,istage,123)- Ghimj(index,861)*K(index,istage,103)- Ghimj(index,862)*K(index,istage,104)- Ghimj(index,863)*K(index,istage,112)- Ghimj(index,864)*K(index,istage,114)- Ghimj(index,865)*K(index,istage,116)- Ghimj(index,866)*K(index,istage,118) - Ghimj(index,867)*K(index,istage,119)- Ghimj(index,868)*K(index,istage,121);
K(index,istage,124) = K(index,istage,124)- Ghimj(index,885)*K(index,istage,81)- Ghimj(index,886)*K(index,istage,84)- Ghimj(index,887)*K(index,istage,92)- Ghimj(index,888)*K(index,istage,103)- Ghimj(index,889)*K(index,istage,106)- Ghimj(index,890)*K(index,istage,107)- Ghimj(index,891) *K(index,istage,110)- Ghimj(index,892)*K(index,istage,114)- Ghimj(index,893)*K(index,istage,120)- Ghimj(index,894)*K(index,istage,121)- Ghimj(index,895)*K(index,istage,122);
K(index,istage,125) = K(index,istage,125)- Ghimj(index,910)*K(index,istage,3)- Ghimj(index,911)*K(index,istage,53)- Ghimj(index,912)*K(index,istage,63)- Ghimj(index,913)*K(index,istage,65)- Ghimj(index,914)*K(index,istage,74)- Ghimj(index,915)*K(index,istage,75)- Ghimj(index,916) *K(index,istage,81)- Ghimj(index,917)*K(index,istage,86)- Ghimj(index,918)*K(index,istage,93)- Ghimj(index,919)*K(index,istage,94)- Ghimj(index,920)*K(index,istage,98)- Ghimj(index,921)*K(index,istage,102)- Ghimj(index,922)*K(index,istage,104)- Ghimj(index,923) *K(index,istage,106)- Ghimj(index,924)*K(index,istage,107)- Ghimj(index,925)*K(index,istage,109)- Ghimj(index,926)*K(index,istage,113)- Ghimj(index,927)*K(index,istage,114)- Ghimj(index,928)*K(index,istage,117)- Ghimj(index,929)*K(index,istage,119) - Ghimj(index,930)*K(index,istage,120)- Ghimj(index,931)*K(index,istage,121)- Ghimj(index,932)*K(index,istage,122)- Ghimj(index,933)*K(index,istage,124);
K(index,istage,126) = K(index,istage,126)- Ghimj(index,948)*K(index,istage,40)- Ghimj(index,949)*K(index,istage,44)- Ghimj(index,950)*K(index,istage,45)- Ghimj(index,951)*K(index,istage,47)- Ghimj(index,952)*K(index,istage,48)- Ghimj(index,953)*K(index,istage,49)- Ghimj(index,954) *K(index,istage,52)- Ghimj(index,955)*K(index,istage,53)- Ghimj(index,956)*K(index,istage,54)- Ghimj(index,957)*K(index,istage,55)- Ghimj(index,958)*K(index,istage,56)- Ghimj(index,959)*K(index,istage,57)- Ghimj(index,960)*K(index,istage,58)- Ghimj(index,961) *K(index,istage,61)- Ghimj(index,962)*K(index,istage,62)- Ghimj(index,963)*K(index,istage,63)- Ghimj(index,964)*K(index,istage,64)- Ghimj(index,965)*K(index,istage,65)- Ghimj(index,966)*K(index,istage,66)- Ghimj(index,967)*K(index,istage,67)- Ghimj(index,968) *K(index,istage,68)- Ghimj(index,969)*K(index,istage,69)- Ghimj(index,970)*K(index,istage,70)- Ghimj(index,971)*K(index,istage,71)- Ghimj(index,972)*K(index,istage,72)- Ghimj(index,973)*K(index,istage,73)- Ghimj(index,974)*K(index,istage,74)- Ghimj(index,975) *K(index,istage,75)- Ghimj(index,976)*K(index,istage,76)- Ghimj(index,977)*K(index,istage,77)- Ghimj(index,978)*K(index,istage,78)- Ghimj(index,979)*K(index,istage,79)- Ghimj(index,980)*K(index,istage,81)- Ghimj(index,981)*K(index,istage,82)- Ghimj(index,982) *K(index,istage,84)- Ghimj(index,983)*K(index,istage,85)- Ghimj(index,984)*K(index,istage,86)- Ghimj(index,985)*K(index,istage,87)- Ghimj(index,986)*K(index,istage,88)- Ghimj(index,987)*K(index,istage,89)- Ghimj(index,988)*K(index,istage,91)- Ghimj(index,989) *K(index,istage,92)- Ghimj(index,990)*K(index,istage,93)- Ghimj(index,991)*K(index,istage,94)- Ghimj(index,992)*K(index,istage,95)- Ghimj(index,993)*K(index,istage,96)- Ghimj(index,994)*K(index,istage,97)- Ghimj(index,995)*K(index,istage,98)- Ghimj(index,996) *K(index,istage,99)- Ghimj(index,997)*K(index,istage,100)- Ghimj(index,998)*K(index,istage,101)- Ghimj(index,999)*K(index,istage,102)- Ghimj(index,1000)*K(index,istage,103)- Ghimj(index,1001)*K(index,istage,104)- Ghimj(index,1002)*K(index,istage,105) - Ghimj(index,1003)*K(index,istage,106)- Ghimj(index,1004)*K(index,istage,107)- Ghimj(index,1005)*K(index,istage,108)- Ghimj(index,1006)*K(index,istage,109)- Ghimj(index,1007)*K(index,istage,110)- Ghimj(index,1008)*K(index,istage,111) - Ghimj(index,1009)*K(index,istage,112)- Ghimj(index,1010)*K(index,istage,113)- Ghimj(index,1011)*K(index,istage,114)- Ghimj(index,1012)*K(index,istage,115)- Ghimj(index,1013)*K(index,istage,116)- Ghimj(index,1014)*K(index,istage,117) - Ghimj(index,1015)*K(index,istage,118)- Ghimj(index,1016)*K(index,istage,119)- Ghimj(index,1017)*K(index,istage,120)- Ghimj(index,1018)*K(index,istage,121)- Ghimj(index,1019)*K(index,istage,122)- Ghimj(index,1020)*K(index,istage,123) - Ghimj(index,1021)*K(index,istage,124)- Ghimj(index,1022)*K(index,istage,125);
K(index,istage,127) = K(index,istage,127)- Ghimj(index,1036)*K(index,istage,1)- Ghimj(index,1037)*K(index,istage,39)- Ghimj(index,1038)*K(index,istage,41)- Ghimj(index,1039)*K(index,istage,42)- Ghimj(index,1040)*K(index,istage,43)- Ghimj(index,1041)*K(index,istage,50) - Ghimj(index,1042)*K(index,istage,52)- Ghimj(index,1043)*K(index,istage,54)- Ghimj(index,1044)*K(index,istage,55)- Ghimj(index,1045)*K(index,istage,57)- Ghimj(index,1046)*K(index,istage,75)- Ghimj(index,1047)*K(index,istage,80)- Ghimj(index,1048) *K(index,istage,83)- Ghimj(index,1049)*K(index,istage,88)- Ghimj(index,1050)*K(index,istage,90)- Ghimj(index,1051)*K(index,istage,97)- Ghimj(index,1052)*K(index,istage,98)- Ghimj(index,1053)*K(index,istage,100)- Ghimj(index,1054)*K(index,istage,103) - Ghimj(index,1055)*K(index,istage,104)- Ghimj(index,1056)*K(index,istage,105)- Ghimj(index,1057)*K(index,istage,106)- Ghimj(index,1058)*K(index,istage,107)- Ghimj(index,1059)*K(index,istage,112)- Ghimj(index,1060)*K(index,istage,114) - Ghimj(index,1061)*K(index,istage,116)- Ghimj(index,1062)*K(index,istage,118)- Ghimj(index,1063)*K(index,istage,119)- Ghimj(index,1064)*K(index,istage,120)- Ghimj(index,1065)*K(index,istage,121)- Ghimj(index,1066)*K(index,istage,122) - Ghimj(index,1067)*K(index,istage,123)- Ghimj(index,1068)*K(index,istage,124)- Ghimj(index,1069)*K(index,istage,125)- Ghimj(index,1070)*K(index,istage,126);
K(index,istage,128) = K(index,istage,128)- Ghimj(index,1083)*K(index,istage,40)- Ghimj(index,1084)*K(index,istage,44)- Ghimj(index,1085)*K(index,istage,45)- Ghimj(index,1086)*K(index,istage,47)- Ghimj(index,1087)*K(index,istage,48)- Ghimj(index,1088)*K(index,istage,49) - Ghimj(index,1089)*K(index,istage,52)- Ghimj(index,1090)*K(index,istage,53)- Ghimj(index,1091)*K(index,istage,54)- Ghimj(index,1092)*K(index,istage,55)- Ghimj(index,1093)*K(index,istage,57)- Ghimj(index,1094)*K(index,istage,61)- Ghimj(index,1095) *K(index,istage,63)- Ghimj(index,1096)*K(index,istage,67)- Ghimj(index,1097)*K(index,istage,70)- Ghimj(index,1098)*K(index,istage,73)- Ghimj(index,1099)*K(index,istage,74)- Ghimj(index,1100)*K(index,istage,75)- Ghimj(index,1101)*K(index,istage,76) - Ghimj(index,1102)*K(index,istage,77)- Ghimj(index,1103)*K(index,istage,78)- Ghimj(index,1104)*K(index,istage,79)- Ghimj(index,1105)*K(index,istage,83)- Ghimj(index,1106)*K(index,istage,84)- Ghimj(index,1107)*K(index,istage,86)- Ghimj(index,1108) *K(index,istage,87)- Ghimj(index,1109)*K(index,istage,88)- Ghimj(index,1110)*K(index,istage,92)- Ghimj(index,1111)*K(index,istage,93)- Ghimj(index,1112)*K(index,istage,97)- Ghimj(index,1113)*K(index,istage,98)- Ghimj(index,1114)*K(index,istage,101) - Ghimj(index,1115)*K(index,istage,102)- Ghimj(index,1116)*K(index,istage,103)- Ghimj(index,1117)*K(index,istage,104)- Ghimj(index,1118)*K(index,istage,105)- Ghimj(index,1119)*K(index,istage,106)- Ghimj(index,1120)*K(index,istage,107) - Ghimj(index,1121)*K(index,istage,110)- Ghimj(index,1122)*K(index,istage,111)- Ghimj(index,1123)*K(index,istage,112)- Ghimj(index,1124)*K(index,istage,114)- Ghimj(index,1125)*K(index,istage,115)- Ghimj(index,1126)*K(index,istage,116) - Ghimj(index,1127)*K(index,istage,117)- Ghimj(index,1128)*K(index,istage,118)- Ghimj(index,1129)*K(index,istage,119)- Ghimj(index,1130)*K(index,istage,120)- Ghimj(index,1131)*K(index,istage,121)- Ghimj(index,1132)*K(index,istage,122) - Ghimj(index,1133)*K(index,istage,123)- Ghimj(index,1134)*K(index,istage,124)- Ghimj(index,1135)*K(index,istage,125)- Ghimj(index,1136)*K(index,istage,126)- Ghimj(index,1137)*K(index,istage,127);
K(index,istage,129) = K(index,istage,129)- Ghimj(index,1149)*K(index,istage,0)- Ghimj(index,1150)*K(index,istage,1)- Ghimj(index,1151)*K(index,istage,2)- Ghimj(index,1152)*K(index,istage,44)- Ghimj(index,1153)*K(index,istage,45)- Ghimj(index,1154)*K(index,istage,52)- Ghimj(index,1155) *K(index,istage,53)- Ghimj(index,1156)*K(index,istage,54)- Ghimj(index,1157)*K(index,istage,55)- Ghimj(index,1158)*K(index,istage,80)- Ghimj(index,1159)*K(index,istage,90)- Ghimj(index,1160)*K(index,istage,100)- Ghimj(index,1161)*K(index,istage,103) - Ghimj(index,1162)*K(index,istage,104)- Ghimj(index,1163)*K(index,istage,105)- Ghimj(index,1164)*K(index,istage,112)- Ghimj(index,1165)*K(index,istage,114)- Ghimj(index,1166)*K(index,istage,116)- Ghimj(index,1167)*K(index,istage,118) - Ghimj(index,1168)*K(index,istage,119)- Ghimj(index,1169)*K(index,istage,121)- Ghimj(index,1170)*K(index,istage,123)- Ghimj(index,1171)*K(index,istage,124)- Ghimj(index,1172)*K(index,istage,125)- Ghimj(index,1173)*K(index,istage,126) - Ghimj(index,1174)*K(index,istage,127)- Ghimj(index,1175)*K(index,istage,128);
K(index,istage,130) = K(index,istage,130)- Ghimj(index,1186)*K(index,istage,58)- Ghimj(index,1187)*K(index,istage,65)- Ghimj(index,1188)*K(index,istage,66)- Ghimj(index,1189)*K(index,istage,72)- Ghimj(index,1190)*K(index,istage,77)- Ghimj(index,1191)*K(index,istage,82) - Ghimj(index,1192)*K(index,istage,89)- Ghimj(index,1193)*K(index,istage,91)- Ghimj(index,1194)*K(index,istage,93)- Ghimj(index,1195)*K(index,istage,94)- Ghimj(index,1196)*K(index,istage,98)- Ghimj(index,1197)*K(index,istage,102)- Ghimj(index,1198) *K(index,istage,103)- Ghimj(index,1199)*K(index,istage,104)- Ghimj(index,1200)*K(index,istage,106)- Ghimj(index,1201)*K(index,istage,107)- Ghimj(index,1202)*K(index,istage,108)- Ghimj(index,1203)*K(index,istage,109)- Ghimj(index,1204)*K(index,istage,110) - Ghimj(index,1205)*K(index,istage,113)- Ghimj(index,1206)*K(index,istage,114)- Ghimj(index,1207)*K(index,istage,115)- Ghimj(index,1208)*K(index,istage,117)- Ghimj(index,1209)*K(index,istage,120)- Ghimj(index,1210)*K(index,istage,121) - Ghimj(index,1211)*K(index,istage,122)- Ghimj(index,1212)*K(index,istage,124)- Ghimj(index,1213)*K(index,istage,125)- Ghimj(index,1214)*K(index,istage,126)- Ghimj(index,1215)*K(index,istage,127)- Ghimj(index,1216)*K(index,istage,128) - Ghimj(index,1217)*K(index,istage,129);
K(index,istage,131) = K(index,istage,131)- Ghimj(index,1227)*K(index,istage,51)- Ghimj(index,1228)*K(index,istage,59)- Ghimj(index,1229)*K(index,istage,75)- Ghimj(index,1230)*K(index,istage,116)- Ghimj(index,1231)*K(index,istage,118)- Ghimj(index,1232)*K(index,istage,120) - Ghimj(index,1233)*K(index,istage,122)- Ghimj(index,1234)*K(index,istage,123)- Ghimj(index,1235)*K(index,istage,124)- Ghimj(index,1236)*K(index,istage,125)- Ghimj(index,1237)*K(index,istage,126)- Ghimj(index,1238)*K(index,istage,127) - Ghimj(index,1239)*K(index,istage,128)- Ghimj(index,1240)*K(index,istage,129)- Ghimj(index,1241)*K(index,istage,130);
K(index,istage,132) = K(index,istage,132)- Ghimj(index,1250)*K(index,istage,105)- Ghimj(index,1251)*K(index,istage,114)- Ghimj(index,1252)*K(index,istage,118)- Ghimj(index,1253)*K(index,istage,123)- Ghimj(index,1254)*K(index,istage,124)- Ghimj(index,1255)*K(index,istage,125) - Ghimj(index,1256)*K(index,istage,126)- Ghimj(index,1257)*K(index,istage,127)- Ghimj(index,1258)*K(index,istage,128)- Ghimj(index,1259)*K(index,istage,129)- Ghimj(index,1260)*K(index,istage,130)- Ghimj(index,1261)*K(index,istage,131);
K(index,istage,133) = K(index,istage,133)- Ghimj(index,1269)*K(index,istage,59)- Ghimj(index,1270)*K(index,istage,60)- Ghimj(index,1271)*K(index,istage,70)- Ghimj(index,1272)*K(index,istage,76)- Ghimj(index,1273)*K(index,istage,84)- Ghimj(index,1274)*K(index,istage,87) - Ghimj(index,1275)*K(index,istage,92)- Ghimj(index,1276)*K(index,istage,93)- Ghimj(index,1277)*K(index,istage,94)- Ghimj(index,1278)*K(index,istage,99)- Ghimj(index,1279)*K(index,istage,102)- Ghimj(index,1280)*K(index,istage,109)- Ghimj(index,1281) *K(index,istage,111)- Ghimj(index,1282)*K(index,istage,113)- Ghimj(index,1283)*K(index,istage,115)- Ghimj(index,1284)*K(index,istage,117)- Ghimj(index,1285)*K(index,istage,120)- Ghimj(index,1286)*K(index,istage,121)- Ghimj(index,1287)*K(index,istage,122) - Ghimj(index,1288)*K(index,istage,124)- Ghimj(index,1289)*K(index,istage,125)- Ghimj(index,1290)*K(index,istage,126)- Ghimj(index,1291)*K(index,istage,127)- Ghimj(index,1292)*K(index,istage,128)- Ghimj(index,1293)*K(index,istage,129) - Ghimj(index,1294)*K(index,istage,130)- Ghimj(index,1295)*K(index,istage,131)- Ghimj(index,1296)*K(index,istage,132);
K(index,istage,134) = K(index,istage,134)- Ghimj(index,1303)*K(index,istage,39)- Ghimj(index,1304)*K(index,istage,41)- Ghimj(index,1305)*K(index,istage,42)- Ghimj(index,1306)*K(index,istage,43)- Ghimj(index,1307)*K(index,istage,51)- Ghimj(index,1308)*K(index,istage,75) - Ghimj(index,1309)*K(index,istage,112)- Ghimj(index,1310)*K(index,istage,116)- Ghimj(index,1311)*K(index,istage,120)- Ghimj(index,1312)*K(index,istage,122)- Ghimj(index,1313)*K(index,istage,123)- Ghimj(index,1314)*K(index,istage,124) - Ghimj(index,1315)*K(index,istage,125)- Ghimj(index,1316)*K(index,istage,126)- Ghimj(index,1317)*K(index,istage,127)- Ghimj(index,1318)*K(index,istage,128)- Ghimj(index,1319)*K(index,istage,129)- Ghimj(index,1320)*K(index,istage,130) - Ghimj(index,1321)*K(index,istage,131)- Ghimj(index,1322)*K(index,istage,132)- Ghimj(index,1323)*K(index,istage,133);
K(index,istage,135) = K(index,istage,135)- Ghimj(index,1329)*K(index,istage,0)- Ghimj(index,1330)*K(index,istage,50)- Ghimj(index,1331)*K(index,istage,58)- Ghimj(index,1332)*K(index,istage,59)- Ghimj(index,1333)*K(index,istage,62)- Ghimj(index,1334)*K(index,istage,64) - Ghimj(index,1335)*K(index,istage,73)- Ghimj(index,1336)*K(index,istage,76)- Ghimj(index,1337)*K(index,istage,77)- Ghimj(index,1338)*K(index,istage,83)- Ghimj(index,1339)*K(index,istage,87)- Ghimj(index,1340)*K(index,istage,91)- Ghimj(index,1341) *K(index,istage,92)- Ghimj(index,1342)*K(index,istage,93)- Ghimj(index,1343)*K(index,istage,94)- Ghimj(index,1344)*K(index,istage,99)- Ghimj(index,1345)*K(index,istage,101)- Ghimj(index,1346)*K(index,istage,102)- Ghimj(index,1347)*K(index,istage,105) - Ghimj(index,1348)*K(index,istage,106)- Ghimj(index,1349)*K(index,istage,109)- Ghimj(index,1350)*K(index,istage,111)- Ghimj(index,1351)*K(index,istage,113)- Ghimj(index,1352)*K(index,istage,114)- Ghimj(index,1353)*K(index,istage,115) - Ghimj(index,1354)*K(index,istage,116)- Ghimj(index,1355)*K(index,istage,117)- Ghimj(index,1356)*K(index,istage,119)- Ghimj(index,1357)*K(index,istage,121)- Ghimj(index,1358)*K(index,istage,123)- Ghimj(index,1359)*K(index,istage,124) - Ghimj(index,1360)*K(index,istage,125)- Ghimj(index,1361)*K(index,istage,126)- Ghimj(index,1362)*K(index,istage,127)- Ghimj(index,1363)*K(index,istage,128)- Ghimj(index,1364)*K(index,istage,129)- Ghimj(index,1365)*K(index,istage,130) - Ghimj(index,1366)*K(index,istage,131)- Ghimj(index,1367)*K(index,istage,132)- Ghimj(index,1368)*K(index,istage,133)- Ghimj(index,1369)*K(index,istage,134);
K(index,istage,136) = K(index,istage,136)- Ghimj(index,1374)*K(index,istage,73)- Ghimj(index,1375)*K(index,istage,83)- Ghimj(index,1376)*K(index,istage,101)- Ghimj(index,1377)*K(index,istage,105)- Ghimj(index,1378)*K(index,istage,106)- Ghimj(index,1379)*K(index,istage,107) - Ghimj(index,1380)*K(index,istage,114)- Ghimj(index,1381)*K(index,istage,116)- Ghimj(index,1382)*K(index,istage,117)- Ghimj(index,1383)*K(index,istage,119)- Ghimj(index,1384)*K(index,istage,121)- Ghimj(index,1385)*K(index,istage,123) - Ghimj(index,1386)*K(index,istage,124)- Ghimj(index,1387)*K(index,istage,125)- Ghimj(index,1388)*K(index,istage,126)- Ghimj(index,1389)*K(index,istage,127)- Ghimj(index,1390)*K(index,istage,128)- Ghimj(index,1391)*K(index,istage,129) - Ghimj(index,1392)*K(index,istage,130)- Ghimj(index,1393)*K(index,istage,131)- Ghimj(index,1394)*K(index,istage,132)- Ghimj(index,1395)*K(index,istage,133)- Ghimj(index,1396)*K(index,istage,134)- Ghimj(index,1397)*K(index,istage,135);
K(index,istage,137) = K(index,istage,137)- Ghimj(index,1401)*K(index,istage,46)- Ghimj(index,1402)*K(index,istage,56)- Ghimj(index,1403)*K(index,istage,62)- Ghimj(index,1404)*K(index,istage,65)- Ghimj(index,1405)*K(index,istage,66)- Ghimj(index,1406)*K(index,istage,69) - Ghimj(index,1407)*K(index,istage,71)- Ghimj(index,1408)*K(index,istage,73)- Ghimj(index,1409)*K(index,istage,78)- Ghimj(index,1410)*K(index,istage,79)- Ghimj(index,1411)*K(index,istage,81)- Ghimj(index,1412)*K(index,istage,82)- Ghimj(index,1413) *K(index,istage,87)- Ghimj(index,1414)*K(index,istage,88)- Ghimj(index,1415)*K(index,istage,89)- Ghimj(index,1416)*K(index,istage,91)- Ghimj(index,1417)*K(index,istage,92)- Ghimj(index,1418)*K(index,istage,93)- Ghimj(index,1419)*K(index,istage,94) - Ghimj(index,1420)*K(index,istage,96)- Ghimj(index,1421)*K(index,istage,99)- Ghimj(index,1422)*K(index,istage,102)- Ghimj(index,1423)*K(index,istage,103)- Ghimj(index,1424)*K(index,istage,104)- Ghimj(index,1425)*K(index,istage,106) - Ghimj(index,1426)*K(index,istage,107)- Ghimj(index,1427)*K(index,istage,108)- Ghimj(index,1428)*K(index,istage,109)- Ghimj(index,1429)*K(index,istage,110)- Ghimj(index,1430)*K(index,istage,111)- Ghimj(index,1431)*K(index,istage,113) - Ghimj(index,1432)*K(index,istage,114)- Ghimj(index,1433)*K(index,istage,115)- Ghimj(index,1434)*K(index,istage,117)- Ghimj(index,1435)*K(index,istage,119)- Ghimj(index,1436)*K(index,istage,121)- Ghimj(index,1437)*K(index,istage,122) - Ghimj(index,1438)*K(index,istage,124)- Ghimj(index,1439)*K(index,istage,125)- Ghimj(index,1440)*K(index,istage,126)- Ghimj(index,1441)*K(index,istage,127)- Ghimj(index,1442)*K(index,istage,128)- Ghimj(index,1443)*K(index,istage,129) - Ghimj(index,1444)*K(index,istage,130)- Ghimj(index,1445)*K(index,istage,131)- Ghimj(index,1446)*K(index,istage,132)- Ghimj(index,1447)*K(index,istage,133)- Ghimj(index,1448)*K(index,istage,134)- Ghimj(index,1449)*K(index,istage,135) - Ghimj(index,1450)*K(index,istage,136);
K(index,istage,138) = K(index,istage,138)- Ghimj(index,1453)*K(index,istage,83)- Ghimj(index,1454)*K(index,istage,88)- Ghimj(index,1455)*K(index,istage,97)- Ghimj(index,1456)*K(index,istage,98)- Ghimj(index,1457)*K(index,istage,103)- Ghimj(index,1458)*K(index,istage,104) - Ghimj(index,1459)*K(index,istage,105)- Ghimj(index,1460)*K(index,istage,106)- Ghimj(index,1461)*K(index,istage,107)- Ghimj(index,1462)*K(index,istage,112)- Ghimj(index,1463)*K(index,istage,114)- Ghimj(index,1464)*K(index,istage,116) - Ghimj(index,1465)*K(index,istage,118)- Ghimj(index,1466)*K(index,istage,119)- Ghimj(index,1467)*K(index,istage,120)- Ghimj(index,1468)*K(index,istage,121)- Ghimj(index,1469)*K(index,istage,122)- Ghimj(index,1470)*K(index,istage,123) - Ghimj(index,1471)*K(index,istage,124)- Ghimj(index,1472)*K(index,istage,125)- Ghimj(index,1473)*K(index,istage,126)- Ghimj(index,1474)*K(index,istage,127)- Ghimj(index,1475)*K(index,istage,128)- Ghimj(index,1476)*K(index,istage,129) - Ghimj(index,1477)*K(index,istage,130)- Ghimj(index,1478)*K(index,istage,131)- Ghimj(index,1479)*K(index,istage,132)- Ghimj(index,1480)*K(index,istage,133)- Ghimj(index,1481)*K(index,istage,134)- Ghimj(index,1482)*K(index,istage,135) - Ghimj(index,1483)*K(index,istage,136)- Ghimj(index,1484)*K(index,istage,137);
K(index,istage,138) = K(index,istage,138)/ Ghimj(index,1485);
K(index,istage,137) = (K(index,istage,137)- Ghimj(index,1452)*K(index,istage,138))/(Ghimj(index,1451));
K(index,istage,136) = (K(index,istage,136)- Ghimj(index,1399)*K(index,istage,137)- Ghimj(index,1400)*K(index,istage,138))/(Ghimj(index,1398));
K(index,istage,135) = (K(index,istage,135)- Ghimj(index,1371)*K(index,istage,136)- Ghimj(index,1372)*K(index,istage,137)- Ghimj(index,1373)*K(index,istage,138))/(Ghimj(index,1370));
K(index,istage,134) = (K(index,istage,134)- Ghimj(index,1325)*K(index,istage,135)- Ghimj(index,1326)*K(index,istage,136)- Ghimj(index,1327)*K(index,istage,137)- Ghimj(index,1328)*K(index,istage,138))/(Ghimj(index,1324));
K(index,istage,133) = (K(index,istage,133)- Ghimj(index,1298)*K(index,istage,134)- Ghimj(index,1299)*K(index,istage,135)- Ghimj(index,1300)*K(index,istage,136)- Ghimj(index,1301)*K(index,istage,137)- Ghimj(index,1302)*K(index,istage,138))/(Ghimj(index,1297));
K(index,istage,132) = (K(index,istage,132)- Ghimj(index,1263)*K(index,istage,133)- Ghimj(index,1264)*K(index,istage,134)- Ghimj(index,1265)*K(index,istage,135)- Ghimj(index,1266)*K(index,istage,136)- Ghimj(index,1267)*K(index,istage,137)- Ghimj(index,1268) *K(index,istage,138))/(Ghimj(index,1262));
K(index,istage,131) = (K(index,istage,131)- Ghimj(index,1243)*K(index,istage,132)- Ghimj(index,1244)*K(index,istage,133)- Ghimj(index,1245)*K(index,istage,134)- Ghimj(index,1246)*K(index,istage,135)- Ghimj(index,1247)*K(index,istage,136)- Ghimj(index,1248)*K(index,istage,137) - Ghimj(index,1249)*K(index,istage,138))/(Ghimj(index,1242));
K(index,istage,130) = (K(index,istage,130)- Ghimj(index,1219)*K(index,istage,131)- Ghimj(index,1220)*K(index,istage,132)- Ghimj(index,1221)*K(index,istage,133)- Ghimj(index,1222)*K(index,istage,134)- Ghimj(index,1223)*K(index,istage,135)- Ghimj(index,1224)*K(index,istage,136) - Ghimj(index,1225)*K(index,istage,137)- Ghimj(index,1226)*K(index,istage,138))/(Ghimj(index,1218));
K(index,istage,129) = (K(index,istage,129)- Ghimj(index,1177)*K(index,istage,130)- Ghimj(index,1178)*K(index,istage,131)- Ghimj(index,1179)*K(index,istage,132)- Ghimj(index,1180)*K(index,istage,133)- Ghimj(index,1181)*K(index,istage,134)- Ghimj(index,1182)*K(index,istage,135) - Ghimj(index,1183)*K(index,istage,136)- Ghimj(index,1184)*K(index,istage,137)- Ghimj(index,1185)*K(index,istage,138))/(Ghimj(index,1176));
K(index,istage,128) = (K(index,istage,128)- Ghimj(index,1139)*K(index,istage,129)- Ghimj(index,1140)*K(index,istage,130)- Ghimj(index,1141)*K(index,istage,131)- Ghimj(index,1142)*K(index,istage,132)- Ghimj(index,1143)*K(index,istage,133)- Ghimj(index,1144)*K(index,istage,134) - Ghimj(index,1145)*K(index,istage,135)- Ghimj(index,1146)*K(index,istage,136)- Ghimj(index,1147)*K(index,istage,137)- Ghimj(index,1148)*K(index,istage,138))/(Ghimj(index,1138));
K(index,istage,127) = (K(index,istage,127)- Ghimj(index,1072)*K(index,istage,128)- Ghimj(index,1073)*K(index,istage,129)- Ghimj(index,1074)*K(index,istage,130)- Ghimj(index,1075)*K(index,istage,131)- Ghimj(index,1076)*K(index,istage,132)- Ghimj(index,1077)*K(index,istage,133) - Ghimj(index,1078)*K(index,istage,134)- Ghimj(index,1079)*K(index,istage,135)- Ghimj(index,1080)*K(index,istage,136)- Ghimj(index,1081)*K(index,istage,137)- Ghimj(index,1082)*K(index,istage,138))/(Ghimj(index,1071));
K(index,istage,126) = (K(index,istage,126)- Ghimj(index,1024)*K(index,istage,127)- Ghimj(index,1025)*K(index,istage,128)- Ghimj(index,1026)*K(index,istage,129)- Ghimj(index,1027)*K(index,istage,130)- Ghimj(index,1028)*K(index,istage,131)- Ghimj(index,1029)*K(index,istage,132) - Ghimj(index,1030)*K(index,istage,133)- Ghimj(index,1031)*K(index,istage,134)- Ghimj(index,1032)*K(index,istage,135)- Ghimj(index,1033)*K(index,istage,136)- Ghimj(index,1034)*K(index,istage,137)- Ghimj(index,1035)*K(index,istage,138)) /(Ghimj(index,1023));
K(index,istage,125) = (K(index,istage,125)- Ghimj(index,935)*K(index,istage,126)- Ghimj(index,936)*K(index,istage,127)- Ghimj(index,937)*K(index,istage,128)- Ghimj(index,938)*K(index,istage,129)- Ghimj(index,939)*K(index,istage,130)- Ghimj(index,940)*K(index,istage,131) - Ghimj(index,941)*K(index,istage,132)- Ghimj(index,942)*K(index,istage,133)- Ghimj(index,943)*K(index,istage,134)- Ghimj(index,944)*K(index,istage,135)- Ghimj(index,945)*K(index,istage,136)- Ghimj(index,946)*K(index,istage,137)- Ghimj(index,947) *K(index,istage,138))/(Ghimj(index,934));
K(index,istage,124) = (K(index,istage,124)- Ghimj(index,897)*K(index,istage,125)- Ghimj(index,898)*K(index,istage,126)- Ghimj(index,899)*K(index,istage,127)- Ghimj(index,900)*K(index,istage,128)- Ghimj(index,901)*K(index,istage,129)- Ghimj(index,902)*K(index,istage,130) - Ghimj(index,903)*K(index,istage,131)- Ghimj(index,904)*K(index,istage,132)- Ghimj(index,905)*K(index,istage,133)- Ghimj(index,906)*K(index,istage,135)- Ghimj(index,907)*K(index,istage,136)- Ghimj(index,908)*K(index,istage,137)- Ghimj(index,909) *K(index,istage,138))/(Ghimj(index,896));
K(index,istage,123) = (K(index,istage,123)- Ghimj(index,870)*K(index,istage,124)- Ghimj(index,871)*K(index,istage,125)- Ghimj(index,872)*K(index,istage,126)- Ghimj(index,873)*K(index,istage,127)- Ghimj(index,874)*K(index,istage,128)- Ghimj(index,875)*K(index,istage,129) - Ghimj(index,876)*K(index,istage,130)- Ghimj(index,877)*K(index,istage,131)- Ghimj(index,878)*K(index,istage,132)- Ghimj(index,879)*K(index,istage,133)- Ghimj(index,880)*K(index,istage,134)- Ghimj(index,881)*K(index,istage,135)- Ghimj(index,882) *K(index,istage,136)- Ghimj(index,883)*K(index,istage,137)- Ghimj(index,884)*K(index,istage,138))/(Ghimj(index,869));
K(index,istage,122) = (K(index,istage,122)- Ghimj(index,848)*K(index,istage,124)- Ghimj(index,849)*K(index,istage,125)- Ghimj(index,850)*K(index,istage,126)- Ghimj(index,851)*K(index,istage,127)- Ghimj(index,852)*K(index,istage,128)- Ghimj(index,853)*K(index,istage,129) - Ghimj(index,854)*K(index,istage,130)- Ghimj(index,855)*K(index,istage,131)- Ghimj(index,856)*K(index,istage,133)- Ghimj(index,857)*K(index,istage,135)- Ghimj(index,858)*K(index,istage,136)- Ghimj(index,859)*K(index,istage,137)- Ghimj(index,860) *K(index,istage,138))/(Ghimj(index,847));
K(index,istage,121) = (K(index,istage,121)- Ghimj(index,822)*K(index,istage,124)- Ghimj(index,823)*K(index,istage,125)- Ghimj(index,824)*K(index,istage,126)- Ghimj(index,825)*K(index,istage,127)- Ghimj(index,826)*K(index,istage,129)- Ghimj(index,827)*K(index,istage,133) - Ghimj(index,828)*K(index,istage,135)- Ghimj(index,829)*K(index,istage,136)- Ghimj(index,830)*K(index,istage,137))/(Ghimj(index,821));
K(index,istage,120) = (K(index,istage,120)- Ghimj(index,788)*K(index,istage,122)- Ghimj(index,789)*K(index,istage,124)- Ghimj(index,790)*K(index,istage,126)- Ghimj(index,791)*K(index,istage,127)- Ghimj(index,792)*K(index,istage,128)- Ghimj(index,793)*K(index,istage,130) - Ghimj(index,794)*K(index,istage,133)- Ghimj(index,795)*K(index,istage,135)- Ghimj(index,796)*K(index,istage,136)- Ghimj(index,797)*K(index,istage,137))/(Ghimj(index,787));
K(index,istage,119) = (K(index,istage,119)- Ghimj(index,768)*K(index,istage,121)- Ghimj(index,769)*K(index,istage,124)- Ghimj(index,770)*K(index,istage,125)- Ghimj(index,771)*K(index,istage,126)- Ghimj(index,772)*K(index,istage,127)- Ghimj(index,773)*K(index,istage,129) - Ghimj(index,774)*K(index,istage,133)- Ghimj(index,775)*K(index,istage,136)- Ghimj(index,776)*K(index,istage,137))/(Ghimj(index,767));
K(index,istage,118) = (K(index,istage,118)- Ghimj(index,746)*K(index,istage,123)- Ghimj(index,747)*K(index,istage,125)- Ghimj(index,748)*K(index,istage,126)- Ghimj(index,749)*K(index,istage,127)- Ghimj(index,750)*K(index,istage,128)- Ghimj(index,751)*K(index,istage,129) - Ghimj(index,752)*K(index,istage,131)- Ghimj(index,753)*K(index,istage,132)- Ghimj(index,754)*K(index,istage,134)- Ghimj(index,755)*K(index,istage,135)- Ghimj(index,756)*K(index,istage,137)- Ghimj(index,757)*K(index,istage,138))/(Ghimj(index,745));
K(index,istage,117) = (K(index,istage,117)- Ghimj(index,732)*K(index,istage,121)- Ghimj(index,733)*K(index,istage,124)- Ghimj(index,734)*K(index,istage,125)- Ghimj(index,735)*K(index,istage,126)- Ghimj(index,736)*K(index,istage,127)- Ghimj(index,737)*K(index,istage,129) - Ghimj(index,738)*K(index,istage,133)- Ghimj(index,739)*K(index,istage,136)- Ghimj(index,740)*K(index,istage,137))/(Ghimj(index,731));
K(index,istage,116) = (K(index,istage,116)- Ghimj(index,715)*K(index,istage,123)- Ghimj(index,716)*K(index,istage,127)- Ghimj(index,717)*K(index,istage,128)- Ghimj(index,718)*K(index,istage,131)- Ghimj(index,719)*K(index,istage,134)- Ghimj(index,720)*K(index,istage,135) - Ghimj(index,721)*K(index,istage,138))/(Ghimj(index,714));
K(index,istage,115) = (K(index,istage,115)- Ghimj(index,707)*K(index,istage,124)- Ghimj(index,708)*K(index,istage,126)- Ghimj(index,709)*K(index,istage,127)- Ghimj(index,710)*K(index,istage,129)- Ghimj(index,711)*K(index,istage,133)- Ghimj(index,712)*K(index,istage,136) - Ghimj(index,713)*K(index,istage,137))/(Ghimj(index,706));
K(index,istage,114) = (K(index,istage,114)- Ghimj(index,698)*K(index,istage,126)- Ghimj(index,699)*K(index,istage,127)- Ghimj(index,700)*K(index,istage,129)- Ghimj(index,701)*K(index,istage,132)- Ghimj(index,702)*K(index,istage,136))/(Ghimj(index,697));
K(index,istage,113) = (K(index,istage,113)- Ghimj(index,690)*K(index,istage,124)- Ghimj(index,691)*K(index,istage,125)- Ghimj(index,692)*K(index,istage,126)- Ghimj(index,693)*K(index,istage,133)- Ghimj(index,694)*K(index,istage,135)- Ghimj(index,695)*K(index,istage,136) - Ghimj(index,696)*K(index,istage,137))/(Ghimj(index,689));
K(index,istage,112) = (K(index,istage,112)- Ghimj(index,678)*K(index,istage,116)- Ghimj(index,679)*K(index,istage,123)- Ghimj(index,680)*K(index,istage,126)- Ghimj(index,681)*K(index,istage,128)- Ghimj(index,682)*K(index,istage,134)- Ghimj(index,683)*K(index,istage,137) - Ghimj(index,684)*K(index,istage,138))/(Ghimj(index,677));
K(index,istage,111) = (K(index,istage,111)- Ghimj(index,670)*K(index,istage,115)- Ghimj(index,671)*K(index,istage,124)- Ghimj(index,672)*K(index,istage,125)- Ghimj(index,673)*K(index,istage,126)- Ghimj(index,674)*K(index,istage,133)- Ghimj(index,675)*K(index,istage,136) - Ghimj(index,676)*K(index,istage,137))/(Ghimj(index,669));
K(index,istage,110) = (K(index,istage,110)- Ghimj(index,660)*K(index,istage,124)- Ghimj(index,661)*K(index,istage,125)- Ghimj(index,662)*K(index,istage,126)- Ghimj(index,663)*K(index,istage,133)- Ghimj(index,664)*K(index,istage,136)- Ghimj(index,665)*K(index,istage,137)) /(Ghimj(index,659));
K(index,istage,109) = (K(index,istage,109)- Ghimj(index,649)*K(index,istage,124)- Ghimj(index,650)*K(index,istage,125)- Ghimj(index,651)*K(index,istage,126)- Ghimj(index,652)*K(index,istage,133)- Ghimj(index,653)*K(index,istage,136)- Ghimj(index,654)*K(index,istage,137)) /(Ghimj(index,648));
K(index,istage,108) = (K(index,istage,108)- Ghimj(index,637)*K(index,istage,109)- Ghimj(index,638)*K(index,istage,113)- Ghimj(index,639)*K(index,istage,115)- Ghimj(index,640)*K(index,istage,124)- Ghimj(index,641)*K(index,istage,125)- Ghimj(index,642)*K(index,istage,126) - Ghimj(index,643)*K(index,istage,133)- Ghimj(index,644)*K(index,istage,135)- Ghimj(index,645)*K(index,istage,136)- Ghimj(index,646)*K(index,istage,137))/(Ghimj(index,636));
K(index,istage,107) = (K(index,istage,107)- Ghimj(index,627)*K(index,istage,124)- Ghimj(index,628)*K(index,istage,126)- Ghimj(index,629)*K(index,istage,136))/(Ghimj(index,626));
K(index,istage,106) = (K(index,istage,106)- Ghimj(index,623)*K(index,istage,124)- Ghimj(index,624)*K(index,istage,126)- Ghimj(index,625)*K(index,istage,136))/(Ghimj(index,622));
K(index,istage,105) = (K(index,istage,105)- Ghimj(index,617)*K(index,istage,128)- Ghimj(index,618)*K(index,istage,129)- Ghimj(index,619)*K(index,istage,132)- Ghimj(index,620)*K(index,istage,135)- Ghimj(index,621)*K(index,istage,138))/(Ghimj(index,616));
K(index,istage,104) = (K(index,istage,104)- Ghimj(index,611)*K(index,istage,125)- Ghimj(index,612)*K(index,istage,126)- Ghimj(index,613)*K(index,istage,127)- Ghimj(index,614)*K(index,istage,129)- Ghimj(index,615)*K(index,istage,137))/(Ghimj(index,610));
K(index,istage,103) = (K(index,istage,103)- Ghimj(index,606)*K(index,istage,124)- Ghimj(index,607)*K(index,istage,126)- Ghimj(index,608)*K(index,istage,127)- Ghimj(index,609)*K(index,istage,129))/(Ghimj(index,605));
K(index,istage,102) = (K(index,istage,102)- Ghimj(index,601)*K(index,istage,125)- Ghimj(index,602)*K(index,istage,126)- Ghimj(index,603)*K(index,istage,133)- Ghimj(index,604)*K(index,istage,137))/(Ghimj(index,600));
K(index,istage,101) = (K(index,istage,101)- Ghimj(index,587)*K(index,istage,105)- Ghimj(index,588)*K(index,istage,114)- Ghimj(index,589)*K(index,istage,116)- Ghimj(index,590)*K(index,istage,119)- Ghimj(index,591)*K(index,istage,123)- Ghimj(index,592)*K(index,istage,126) - Ghimj(index,593)*K(index,istage,128)- Ghimj(index,594)*K(index,istage,130)- Ghimj(index,595)*K(index,istage,135)- Ghimj(index,596)*K(index,istage,136)- Ghimj(index,597)*K(index,istage,138))/(Ghimj(index,586));
K(index,istage,100) = (K(index,istage,100)- Ghimj(index,574)*K(index,istage,105)- Ghimj(index,575)*K(index,istage,112)- Ghimj(index,576)*K(index,istage,116)- Ghimj(index,577)*K(index,istage,118)- Ghimj(index,578)*K(index,istage,123)- Ghimj(index,579)*K(index,istage,126) - Ghimj(index,580)*K(index,istage,127)- Ghimj(index,581)*K(index,istage,129)- Ghimj(index,582)*K(index,istage,132)- Ghimj(index,583)*K(index,istage,134)- Ghimj(index,584)*K(index,istage,138))/(Ghimj(index,573));
K(index,istage,99) = (K(index,istage,99)- Ghimj(index,566)*K(index,istage,102)- Ghimj(index,567)*K(index,istage,111)- Ghimj(index,568)*K(index,istage,125)- Ghimj(index,569)*K(index,istage,126)- Ghimj(index,570)*K(index,istage,133)- Ghimj(index,571)*K(index,istage,137)) /(Ghimj(index,565));
K(index,istage,98) = (K(index,istage,98)- Ghimj(index,558)*K(index,istage,107)- Ghimj(index,559)*K(index,istage,120)- Ghimj(index,560)*K(index,istage,124)- Ghimj(index,561)*K(index,istage,126)- Ghimj(index,562)*K(index,istage,127))/(Ghimj(index,557));
K(index,istage,97) = (K(index,istage,97)- Ghimj(index,550)*K(index,istage,98)- Ghimj(index,551)*K(index,istage,120)- Ghimj(index,552)*K(index,istage,122)- Ghimj(index,553)*K(index,istage,126)- Ghimj(index,554)*K(index,istage,127)- Ghimj(index,555)*K(index,istage,130)- Ghimj(index,556) *K(index,istage,137))/(Ghimj(index,549));
K(index,istage,96) = (K(index,istage,96)- Ghimj(index,539)*K(index,istage,107)- Ghimj(index,540)*K(index,istage,108)- Ghimj(index,541)*K(index,istage,109)- Ghimj(index,542)*K(index,istage,110)- Ghimj(index,543)*K(index,istage,113)- Ghimj(index,544)*K(index,istage,124) - Ghimj(index,545)*K(index,istage,125)- Ghimj(index,546)*K(index,istage,126)- Ghimj(index,547)*K(index,istage,133)- Ghimj(index,548)*K(index,istage,137))/(Ghimj(index,538));
K(index,istage,95) = (K(index,istage,95)- Ghimj(index,515)*K(index,istage,96)- Ghimj(index,516)*K(index,istage,98)- Ghimj(index,517)*K(index,istage,103)- Ghimj(index,518)*K(index,istage,106)- Ghimj(index,519)*K(index,istage,107)- Ghimj(index,520)*K(index,istage,109)- Ghimj(index,521) *K(index,istage,110)- Ghimj(index,522)*K(index,istage,113)- Ghimj(index,523)*K(index,istage,119)- Ghimj(index,524)*K(index,istage,121)- Ghimj(index,525)*K(index,istage,124)- Ghimj(index,526)*K(index,istage,125)- Ghimj(index,527)*K(index,istage,126) - Ghimj(index,528)*K(index,istage,127)- Ghimj(index,529)*K(index,istage,129)- Ghimj(index,530)*K(index,istage,130)- Ghimj(index,531)*K(index,istage,133)- Ghimj(index,532)*K(index,istage,135)- Ghimj(index,533)*K(index,istage,136)- Ghimj(index,534) *K(index,istage,137))/(Ghimj(index,514));
K(index,istage,94) = (K(index,istage,94)- Ghimj(index,506)*K(index,istage,125)- Ghimj(index,507)*K(index,istage,126)- Ghimj(index,508)*K(index,istage,133)- Ghimj(index,509)*K(index,istage,137))/(Ghimj(index,505));
K(index,istage,93) = (K(index,istage,93)- Ghimj(index,498)*K(index,istage,125)- Ghimj(index,499)*K(index,istage,126)- Ghimj(index,500)*K(index,istage,133)- Ghimj(index,501)*K(index,istage,137))/(Ghimj(index,497));
K(index,istage,92) = (K(index,istage,92)- Ghimj(index,490)*K(index,istage,124)- Ghimj(index,491)*K(index,istage,126)- Ghimj(index,492)*K(index,istage,133)- Ghimj(index,493)*K(index,istage,135)- Ghimj(index,494)*K(index,istage,137))/(Ghimj(index,489));
K(index,istage,91) = (K(index,istage,91)- Ghimj(index,482)*K(index,istage,106)- Ghimj(index,483)*K(index,istage,109)- Ghimj(index,484)*K(index,istage,126)- Ghimj(index,485)*K(index,istage,133)- Ghimj(index,486)*K(index,istage,136))/(Ghimj(index,481));
K(index,istage,90) = (K(index,istage,90)- Ghimj(index,470)*K(index,istage,100)- Ghimj(index,471)*K(index,istage,105)- Ghimj(index,472)*K(index,istage,112)- Ghimj(index,473)*K(index,istage,116)- Ghimj(index,474)*K(index,istage,118)- Ghimj(index,475)*K(index,istage,123) - Ghimj(index,476)*K(index,istage,127)- Ghimj(index,477)*K(index,istage,129)- Ghimj(index,478)*K(index,istage,132)- Ghimj(index,479)*K(index,istage,134)- Ghimj(index,480)*K(index,istage,138))/(Ghimj(index,469));
K(index,istage,89) = (K(index,istage,89)- Ghimj(index,458)*K(index,istage,93)- Ghimj(index,459)*K(index,istage,94)- Ghimj(index,460)*K(index,istage,102)- Ghimj(index,461)*K(index,istage,107)- Ghimj(index,462)*K(index,istage,109)- Ghimj(index,463)*K(index,istage,113)- Ghimj(index,464) *K(index,istage,117)- Ghimj(index,465)*K(index,istage,124)- Ghimj(index,466)*K(index,istage,125)- Ghimj(index,467)*K(index,istage,126))/(Ghimj(index,457));
K(index,istage,88) = (K(index,istage,88)- Ghimj(index,451)*K(index,istage,103)- Ghimj(index,452)*K(index,istage,106)- Ghimj(index,453)*K(index,istage,124)- Ghimj(index,454)*K(index,istage,126)- Ghimj(index,455)*K(index,istage,127)- Ghimj(index,456)*K(index,istage,137)) /(Ghimj(index,450));
K(index,istage,87) = (K(index,istage,87)- Ghimj(index,445)*K(index,istage,92)- Ghimj(index,446)*K(index,istage,124)- Ghimj(index,447)*K(index,istage,126)- Ghimj(index,448)*K(index,istage,135)- Ghimj(index,449)*K(index,istage,137))/(Ghimj(index,444));
K(index,istage,86) = (K(index,istage,86)- Ghimj(index,437)*K(index,istage,93)- Ghimj(index,438)*K(index,istage,125)- Ghimj(index,439)*K(index,istage,126)- Ghimj(index,440)*K(index,istage,133)- Ghimj(index,441)*K(index,istage,137))/(Ghimj(index,436));
K(index,istage,85) = (K(index,istage,85)- Ghimj(index,428)*K(index,istage,102)- Ghimj(index,429)*K(index,istage,111)- Ghimj(index,430)*K(index,istage,125)- Ghimj(index,431)*K(index,istage,126)- Ghimj(index,432)*K(index,istage,133)- Ghimj(index,433)*K(index,istage,137)) /(Ghimj(index,427));
K(index,istage,84) = (K(index,istage,84)- Ghimj(index,422)*K(index,istage,92)- Ghimj(index,423)*K(index,istage,124)- Ghimj(index,424)*K(index,istage,135)- Ghimj(index,425)*K(index,istage,137))/(Ghimj(index,421));
K(index,istage,83) = (K(index,istage,83)- Ghimj(index,417)*K(index,istage,128)- Ghimj(index,418)*K(index,istage,135)- Ghimj(index,419)*K(index,istage,136)- Ghimj(index,420)*K(index,istage,138))/(Ghimj(index,416));
K(index,istage,82) = (K(index,istage,82)- Ghimj(index,413)*K(index,istage,113)- Ghimj(index,414)*K(index,istage,126)- Ghimj(index,415)*K(index,istage,137))/(Ghimj(index,412));
K(index,istage,81) = (K(index,istage,81)- Ghimj(index,406)*K(index,istage,114)- Ghimj(index,407)*K(index,istage,124)- Ghimj(index,408)*K(index,istage,126)- Ghimj(index,409)*K(index,istage,127)- Ghimj(index,410)*K(index,istage,129)- Ghimj(index,411)*K(index,istage,136)) /(Ghimj(index,405));
K(index,istage,80) = (K(index,istage,80)- Ghimj(index,398)*K(index,istage,90)- Ghimj(index,399)*K(index,istage,112)- Ghimj(index,400)*K(index,istage,116)- Ghimj(index,401)*K(index,istage,127)- Ghimj(index,402)*K(index,istage,129)- Ghimj(index,403)*K(index,istage,134)- Ghimj(index,404) *K(index,istage,138))/(Ghimj(index,397));
K(index,istage,79) = (K(index,istage,79)- Ghimj(index,394)*K(index,istage,102)- Ghimj(index,395)*K(index,istage,126)- Ghimj(index,396)*K(index,istage,137))/(Ghimj(index,393));
K(index,istage,78) = (K(index,istage,78)- Ghimj(index,387)*K(index,istage,103)- Ghimj(index,388)*K(index,istage,106)- Ghimj(index,389)*K(index,istage,107)- Ghimj(index,390)*K(index,istage,110)- Ghimj(index,391)*K(index,istage,124)- Ghimj(index,392)*K(index,istage,126)) /(Ghimj(index,386));
K(index,istage,77) = (K(index,istage,77)- Ghimj(index,383)*K(index,istage,121)- Ghimj(index,384)*K(index,istage,126)- Ghimj(index,385)*K(index,istage,135))/(Ghimj(index,382));
K(index,istage,76) = (K(index,istage,76)- Ghimj(index,378)*K(index,istage,87)- Ghimj(index,379)*K(index,istage,126)- Ghimj(index,380)*K(index,istage,133)- Ghimj(index,381)*K(index,istage,135))/(Ghimj(index,377));
K(index,istage,75) = (K(index,istage,75)- Ghimj(index,375)*K(index,istage,120)- Ghimj(index,376)*K(index,istage,126))/(Ghimj(index,374));
K(index,istage,74) = (K(index,istage,74)- Ghimj(index,369)*K(index,istage,117)- Ghimj(index,370)*K(index,istage,121)- Ghimj(index,371)*K(index,istage,125)- Ghimj(index,372)*K(index,istage,126)- Ghimj(index,373)*K(index,istage,137))/(Ghimj(index,368));
K(index,istage,73) = (K(index,istage,73)- Ghimj(index,365)*K(index,istage,126)- Ghimj(index,366)*K(index,istage,135)- Ghimj(index,367)*K(index,istage,137))/(Ghimj(index,364));
K(index,istage,72) = (K(index,istage,72)- Ghimj(index,361)*K(index,istage,94)- Ghimj(index,362)*K(index,istage,126)- Ghimj(index,363)*K(index,istage,137))/(Ghimj(index,360));
K(index,istage,71) = (K(index,istage,71)- Ghimj(index,357)*K(index,istage,117)- Ghimj(index,358)*K(index,istage,126)- Ghimj(index,359)*K(index,istage,137))/(Ghimj(index,356));
K(index,istage,70) = (K(index,istage,70)- Ghimj(index,353)*K(index,istage,84)- Ghimj(index,354)*K(index,istage,87)- Ghimj(index,355)*K(index,istage,126))/(Ghimj(index,352));
K(index,istage,69) = (K(index,istage,69)- Ghimj(index,348)*K(index,istage,93)- Ghimj(index,349)*K(index,istage,126)- Ghimj(index,350)*K(index,istage,137))/(Ghimj(index,347));
K(index,istage,68) = (K(index,istage,68)- Ghimj(index,344)*K(index,istage,99)- Ghimj(index,345)*K(index,istage,126)- Ghimj(index,346)*K(index,istage,137))/(Ghimj(index,343));
K(index,istage,67) = (K(index,istage,67)- Ghimj(index,340)*K(index,istage,115)- Ghimj(index,341)*K(index,istage,126)- Ghimj(index,342)*K(index,istage,137))/(Ghimj(index,339));
K(index,istage,66) = (K(index,istage,66)- Ghimj(index,336)*K(index,istage,109)- Ghimj(index,337)*K(index,istage,126)- Ghimj(index,338)*K(index,istage,137))/(Ghimj(index,335));
K(index,istage,65) = (K(index,istage,65)- Ghimj(index,332)*K(index,istage,114)- Ghimj(index,333)*K(index,istage,126)- Ghimj(index,334)*K(index,istage,132))/(Ghimj(index,331));
K(index,istage,64) = (K(index,istage,64)- Ghimj(index,328)*K(index,istage,113)- Ghimj(index,329)*K(index,istage,126)- Ghimj(index,330)*K(index,istage,135))/(Ghimj(index,327));
K(index,istage,63) = (K(index,istage,63)- Ghimj(index,324)*K(index,istage,121)- Ghimj(index,325)*K(index,istage,126)- Ghimj(index,326)*K(index,istage,137))/(Ghimj(index,323));
K(index,istage,62) = (K(index,istage,62)- Ghimj(index,320)*K(index,istage,93)- Ghimj(index,321)*K(index,istage,126)- Ghimj(index,322)*K(index,istage,133))/(Ghimj(index,319));
K(index,istage,61) = (K(index,istage,61)- Ghimj(index,316)*K(index,istage,70)- Ghimj(index,317)*K(index,istage,87)- Ghimj(index,318)*K(index,istage,126))/(Ghimj(index,315));
K(index,istage,60) = (K(index,istage,60)- Ghimj(index,311)*K(index,istage,92)- Ghimj(index,312)*K(index,istage,120)- Ghimj(index,313)*K(index,istage,133)- Ghimj(index,314)*K(index,istage,135))/(Ghimj(index,310));
K(index,istage,59) = (K(index,istage,59)- Ghimj(index,307)*K(index,istage,133)- Ghimj(index,308)*K(index,istage,135))/(Ghimj(index,306));
K(index,istage,58) = (K(index,istage,58)- Ghimj(index,304)*K(index,istage,91)- Ghimj(index,305)*K(index,istage,126))/(Ghimj(index,303));
K(index,istage,57) = (K(index,istage,57)- Ghimj(index,301)*K(index,istage,120)- Ghimj(index,302)*K(index,istage,126))/(Ghimj(index,300));
K(index,istage,56) = (K(index,istage,56)- Ghimj(index,297)*K(index,istage,65)- Ghimj(index,298)*K(index,istage,81)- Ghimj(index,299)*K(index,istage,126))/(Ghimj(index,296));
K(index,istage,55) = (K(index,istage,55)- Ghimj(index,295)*K(index,istage,126))/(Ghimj(index,294));
K(index,istage,54) = (K(index,istage,54)- Ghimj(index,293)*K(index,istage,126))/(Ghimj(index,292));
K(index,istage,53) = (K(index,istage,53)- Ghimj(index,291)*K(index,istage,126))/(Ghimj(index,290));
K(index,istage,52) = (K(index,istage,52)- Ghimj(index,289)*K(index,istage,126))/(Ghimj(index,288));
K(index,istage,51) = (K(index,istage,51)- Ghimj(index,286)*K(index,istage,132)- Ghimj(index,287)*K(index,istage,134))/(Ghimj(index,285));
K(index,istage,50) = (K(index,istage,50)- Ghimj(index,283)*K(index,istage,83)- Ghimj(index,284)*K(index,istage,138))/(Ghimj(index,282));
K(index,istage,49) = (K(index,istage,49)- Ghimj(index,281)*K(index,istage,126))/(Ghimj(index,280));
K(index,istage,48) = (K(index,istage,48)- Ghimj(index,279)*K(index,istage,126))/(Ghimj(index,278));
K(index,istage,47) = (K(index,istage,47)- Ghimj(index,277)*K(index,istage,126))/(Ghimj(index,276));
K(index,istage,46) = (K(index,istage,46)- Ghimj(index,273)*K(index,istage,81)- Ghimj(index,274)*K(index,istage,124)- Ghimj(index,275)*K(index,istage,137))/(Ghimj(index,272));
K(index,istage,45) = (K(index,istage,45)- Ghimj(index,271)*K(index,istage,126))/(Ghimj(index,270));
K(index,istage,44) = (K(index,istage,44)- Ghimj(index,269)*K(index,istage,126))/(Ghimj(index,268));
K(index,istage,43) = (K(index,istage,43)- Ghimj(index,267)*K(index,istage,120))/(Ghimj(index,266));
K(index,istage,42) = (K(index,istage,42)- Ghimj(index,265)*K(index,istage,120))/(Ghimj(index,264));
K(index,istage,41) = (K(index,istage,41)- Ghimj(index,263)*K(index,istage,120))/(Ghimj(index,262));
K(index,istage,40) = (K(index,istage,40)- Ghimj(index,261)*K(index,istage,126))/(Ghimj(index,260));
K(index,istage,39) = (K(index,istage,39)- Ghimj(index,259)*K(index,istage,134))/(Ghimj(index,258));
K(index,istage,38) = (K(index,istage,38)- Ghimj(index,256)*K(index,istage,68)- Ghimj(index,257)*K(index,istage,126))/(Ghimj(index,255));
K(index,istage,37) = (K(index,istage,37)- Ghimj(index,252)*K(index,istage,52)- Ghimj(index,253)*K(index,istage,54)- Ghimj(index,254)*K(index,istage,55))/(Ghimj(index,251));
K(index,istage,36) = (K(index,istage,36)- Ghimj(index,245)*K(index,istage,44)- Ghimj(index,246)*K(index,istage,45)- Ghimj(index,247)*K(index,istage,52)- Ghimj(index,248)*K(index,istage,54)- Ghimj(index,249)*K(index,istage,55)- Ghimj(index,250)*K(index,istage,126))/(Ghimj(index,244));
K(index,istage,35) = (K(index,istage,35)- Ghimj(index,234)*K(index,istage,93)- Ghimj(index,235)*K(index,istage,94)- Ghimj(index,236)*K(index,istage,99)- Ghimj(index,237)*K(index,istage,102)- Ghimj(index,238)*K(index,istage,109)- Ghimj(index,239)*K(index,istage,113)- Ghimj(index,240) *K(index,istage,115)- Ghimj(index,241)*K(index,istage,117)- Ghimj(index,242)*K(index,istage,121)- Ghimj(index,243)*K(index,istage,133))/(Ghimj(index,233));
K(index,istage,34) = (K(index,istage,34)- Ghimj(index,207)*K(index,istage,50)- Ghimj(index,208)*K(index,istage,51)- Ghimj(index,209)*K(index,istage,59)- Ghimj(index,210)*K(index,istage,60)- Ghimj(index,211)*K(index,istage,65)- Ghimj(index,212)*K(index,istage,73)- Ghimj(index,213) *K(index,istage,76)- Ghimj(index,214)*K(index,istage,93)- Ghimj(index,215)*K(index,istage,94)- Ghimj(index,216)*K(index,istage,99)- Ghimj(index,217)*K(index,istage,100)- Ghimj(index,218)*K(index,istage,101)- Ghimj(index,219)*K(index,istage,102)- Ghimj(index,220) *K(index,istage,109)- Ghimj(index,221)*K(index,istage,113)- Ghimj(index,222)*K(index,istage,114)- Ghimj(index,223)*K(index,istage,115)- Ghimj(index,224)*K(index,istage,117)- Ghimj(index,225)*K(index,istage,121)- Ghimj(index,226)*K(index,istage,122) - Ghimj(index,227)*K(index,istage,125)- Ghimj(index,228)*K(index,istage,126)- Ghimj(index,229)*K(index,istage,127)- Ghimj(index,230)*K(index,istage,129)- Ghimj(index,231)*K(index,istage,133)- Ghimj(index,232)*K(index,istage,137))/(Ghimj(index,206));
K(index,istage,33) = (K(index,istage,33)- Ghimj(index,203)*K(index,istage,125)- Ghimj(index,204)*K(index,istage,133))/(Ghimj(index,202));
K(index,istage,32) = (K(index,istage,32)- Ghimj(index,195)*K(index,istage,41)- Ghimj(index,196)*K(index,istage,42)- Ghimj(index,197)*K(index,istage,43)- Ghimj(index,198)*K(index,istage,57)- Ghimj(index,199)*K(index,istage,75)- Ghimj(index,200)*K(index,istage,120)- Ghimj(index,201) *K(index,istage,126))/(Ghimj(index,194));
K(index,istage,31) = (K(index,istage,31)- Ghimj(index,191)*K(index,istage,53)- Ghimj(index,192)*K(index,istage,126))/(Ghimj(index,190));
K(index,istage,30) = (K(index,istage,30)- Ghimj(index,186)*K(index,istage,133)- Ghimj(index,187)*K(index,istage,137))/(Ghimj(index,185));
K(index,istage,29) = (K(index,istage,29)- Ghimj(index,183)*K(index,istage,124)- Ghimj(index,184)*K(index,istage,126))/(Ghimj(index,182));
K(index,istage,28) = (K(index,istage,28)- Ghimj(index,171)*K(index,istage,103)- Ghimj(index,172)*K(index,istage,106)- Ghimj(index,173)*K(index,istage,107)- Ghimj(index,174)*K(index,istage,110)- Ghimj(index,175)*K(index,istage,117)- Ghimj(index,176)*K(index,istage,119) - Ghimj(index,177)*K(index,istage,121)- Ghimj(index,178)*K(index,istage,124)- Ghimj(index,179)*K(index,istage,125)- Ghimj(index,180)*K(index,istage,130)- Ghimj(index,181)*K(index,istage,136))/(Ghimj(index,170));
K(index,istage,27) = (K(index,istage,27)- Ghimj(index,164)*K(index,istage,60)- Ghimj(index,165)*K(index,istage,98)- Ghimj(index,166)*K(index,istage,120)- Ghimj(index,167)*K(index,istage,124)- Ghimj(index,168)*K(index,istage,128)- Ghimj(index,169)*K(index,istage,131)) /(Ghimj(index,163));
K(index,istage,26) = (K(index,istage,26)- Ghimj(index,149)*K(index,istage,83)- Ghimj(index,150)*K(index,istage,84)- Ghimj(index,151)*K(index,istage,87)- Ghimj(index,152)*K(index,istage,92)- Ghimj(index,153)*K(index,istage,105)- Ghimj(index,154)*K(index,istage,116)- Ghimj(index,155) *K(index,istage,123)- Ghimj(index,156)*K(index,istage,124)- Ghimj(index,157)*K(index,istage,128)- Ghimj(index,158)*K(index,istage,131)- Ghimj(index,159)*K(index,istage,135)- Ghimj(index,160)*K(index,istage,136)- Ghimj(index,161)*K(index,istage,137) - Ghimj(index,162)*K(index,istage,138))/(Ghimj(index,148));
K(index,istage,25) = (K(index,istage,25)- Ghimj(index,141)*K(index,istage,97)- Ghimj(index,142)*K(index,istage,120)- Ghimj(index,143)*K(index,istage,122)- Ghimj(index,144)*K(index,istage,124)- Ghimj(index,145)*K(index,istage,126)- Ghimj(index,146)*K(index,istage,131)- Ghimj(index,147) *K(index,istage,137))/(Ghimj(index,140));
K(index,istage,24) = (K(index,istage,24)- Ghimj(index,124)*K(index,istage,39)- Ghimj(index,125)*K(index,istage,57)- Ghimj(index,126)*K(index,istage,75)- Ghimj(index,127)*K(index,istage,83)- Ghimj(index,128)*K(index,istage,105)- Ghimj(index,129)*K(index,istage,112)- Ghimj(index,130) *K(index,istage,116)- Ghimj(index,131)*K(index,istage,118)- Ghimj(index,132)*K(index,istage,120)- Ghimj(index,133)*K(index,istage,123)- Ghimj(index,134)*K(index,istage,125)- Ghimj(index,135)*K(index,istage,126)- Ghimj(index,136)*K(index,istage,131) - Ghimj(index,137)*K(index,istage,132)- Ghimj(index,138)*K(index,istage,134)- Ghimj(index,139)*K(index,istage,138))/(Ghimj(index,123));
K(index,istage,23) = (K(index,istage,23)- Ghimj(index,113)*K(index,istage,105)- Ghimj(index,114)*K(index,istage,112)- Ghimj(index,115)*K(index,istage,116)- Ghimj(index,116)*K(index,istage,118)- Ghimj(index,117)*K(index,istage,123)- Ghimj(index,118)*K(index,istage,125) - Ghimj(index,119)*K(index,istage,131)- Ghimj(index,120)*K(index,istage,132)- Ghimj(index,121)*K(index,istage,134)- Ghimj(index,122)*K(index,istage,138))/(Ghimj(index,112));
K(index,istage,22) = (K(index,istage,22)- Ghimj(index,76)*K(index,istage,39)- Ghimj(index,77)*K(index,istage,57)- Ghimj(index,78)*K(index,istage,60)- Ghimj(index,79)*K(index,istage,75)- Ghimj(index,80)*K(index,istage,83)- Ghimj(index,81)*K(index,istage,84)- Ghimj(index,82)*K(index,istage,87) - Ghimj(index,83)*K(index,istage,92)- Ghimj(index,84)*K(index,istage,97)- Ghimj(index,85)*K(index,istage,98)- Ghimj(index,86)*K(index,istage,103)- Ghimj(index,87)*K(index,istage,105)- Ghimj(index,88)*K(index,istage,106)- Ghimj(index,89)*K(index,istage,107)- Ghimj(index,90) *K(index,istage,110)- Ghimj(index,91)*K(index,istage,112)- Ghimj(index,92)*K(index,istage,116)- Ghimj(index,93)*K(index,istage,117)- Ghimj(index,94)*K(index,istage,118)- Ghimj(index,95)*K(index,istage,119)- Ghimj(index,96)*K(index,istage,120)- Ghimj(index,97) *K(index,istage,121)- Ghimj(index,98)*K(index,istage,122)- Ghimj(index,99)*K(index,istage,123)- Ghimj(index,100)*K(index,istage,124)- Ghimj(index,101)*K(index,istage,125)- Ghimj(index,102)*K(index,istage,126)- Ghimj(index,103)*K(index,istage,128)- Ghimj(index,104) *K(index,istage,130)- Ghimj(index,105)*K(index,istage,131)- Ghimj(index,106)*K(index,istage,132)- Ghimj(index,107)*K(index,istage,134)- Ghimj(index,108)*K(index,istage,135)- Ghimj(index,109)*K(index,istage,136)- Ghimj(index,110)*K(index,istage,137) - Ghimj(index,111)*K(index,istage,138))/(Ghimj(index,75));
K(index,istage,21) = (K(index,istage,21)- Ghimj(index,73)*K(index,istage,120)- Ghimj(index,74)*K(index,istage,128))/(Ghimj(index,72));
K(index,istage,20) = (K(index,istage,20)- Ghimj(index,70)*K(index,istage,124)- Ghimj(index,71)*K(index,istage,137))/(Ghimj(index,69));
K(index,istage,19) = K(index,istage,19)/ Ghimj(index,68);
K(index,istage,18) = (K(index,istage,18)- Ghimj(index,65)*K(index,istage,120)- Ghimj(index,66)*K(index,istage,126))/(Ghimj(index,64));
K(index,istage,17) = (K(index,istage,17)- Ghimj(index,63)*K(index,istage,120))/(Ghimj(index,62));
K(index,istage,16) = (K(index,istage,16)- Ghimj(index,61)*K(index,istage,120))/(Ghimj(index,60));
K(index,istage,15) = (K(index,istage,15)- Ghimj(index,59)*K(index,istage,120))/(Ghimj(index,58));
K(index,istage,14) = (K(index,istage,14)- Ghimj(index,53)*K(index,istage,15)- Ghimj(index,54)*K(index,istage,16)- Ghimj(index,55)*K(index,istage,17)- Ghimj(index,56)*K(index,istage,18)- Ghimj(index,57)*K(index,istage,120))/(Ghimj(index,52));
K(index,istage,13) = (K(index,istage,13)- Ghimj(index,49)*K(index,istage,83))/(Ghimj(index,48));
K(index,istage,12) = (K(index,istage,12)- Ghimj(index,47)*K(index,istage,83))/(Ghimj(index,46));
K(index,istage,11) = (K(index,istage,11)- Ghimj(index,44)*K(index,istage,56)- Ghimj(index,45)*K(index,istage,126))/(Ghimj(index,43));
K(index,istage,10) = (K(index,istage,10)- Ghimj(index,39)*K(index,istage,46)- Ghimj(index,40)*K(index,istage,65)- Ghimj(index,41)*K(index,istage,126)- Ghimj(index,42)*K(index,istage,137))/(Ghimj(index,38));
K(index,istage,9) = (K(index,istage,9)- Ghimj(index,30)*K(index,istage,42)- Ghimj(index,31)*K(index,istage,43)- Ghimj(index,32)*K(index,istage,52)- Ghimj(index,33)*K(index,istage,54)- Ghimj(index,34)*K(index,istage,55)- Ghimj(index,35)*K(index,istage,75)- Ghimj(index,36)*K(index,istage,120) - Ghimj(index,37)*K(index,istage,126))/(Ghimj(index,29));
K(index,istage,8) = (K(index,istage,8)- Ghimj(index,26)*K(index,istage,42)- Ghimj(index,27)*K(index,istage,43)- Ghimj(index,28)*K(index,istage,120))/(Ghimj(index,25));
K(index,istage,7) = (K(index,istage,7)- Ghimj(index,10)*K(index,istage,41)- Ghimj(index,11)*K(index,istage,42)- Ghimj(index,12)*K(index,istage,43)- Ghimj(index,13)*K(index,istage,44)- Ghimj(index,14)*K(index,istage,45)- Ghimj(index,15)*K(index,istage,52)- Ghimj(index,16)*K(index,istage,53)- Ghimj(index,17) *K(index,istage,54)- Ghimj(index,18)*K(index,istage,55)- Ghimj(index,19)*K(index,istage,57)- Ghimj(index,20)*K(index,istage,75)- Ghimj(index,21)*K(index,istage,120)- Ghimj(index,22)*K(index,istage,126))/(Ghimj(index,9));
K(index,istage,6) = K(index,istage,6)/ Ghimj(index,6);
K(index,istage,5) = K(index,istage,5)/ Ghimj(index,5);
K(index,istage,4) = K(index,istage,4)/ Ghimj(index,4);
K(index,istage,3) = K(index,istage,3)/ Ghimj(index,3);
K(index,istage,2) = K(index,istage,2)/ Ghimj(index,2);
K(index,istage,1) = K(index,istage,1)/ Ghimj(index,1);
K(index,istage,0) = K(index,istage,0)/ Ghimj(index,0);
}
__device__ void ros_Solve(double * __restrict__ Ghimj, double * __restrict__ K, int &Nsol, const int istage, const int ros_S)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
#pragma unroll 4
for (int i=0;i<LU_NONZERO-16;i+=16){
prefetch_ll1(&Ghimj(index,i));
}
kppSolve(Ghimj, K, istage, ros_S);
Nsol++;
}
__device__ void kppDecomp(double *Ghimj, int VL_GLO)
{
double a=0.0;
double dummy, W_0, W_1, W_2, W_3, W_4, W_5, W_6, W_7, W_8, W_9, W_10, W_11, W_12, W_13, W_14, W_15, W_16, W_17, W_18, W_19, W_20, W_21, W_22, W_23, W_24, W_25, W_26, W_27, W_28, W_29, W_30, W_31, W_32, W_33, W_34, W_35, W_36, W_37, W_38, W_39, W_40, W_41, W_42, W_43, W_44, W_45, W_46, W_47, W_48, W_49, W_50, W_51, W_52, W_53, W_54, W_55, W_56, W_57, W_58, W_59, W_60, W_61, W_62, W_63, W_64, W_65, W_66, W_67, W_68, W_69, W_70, W_71, W_72, W_73, W_74, W_75, W_76, W_77, W_78, W_79, W_80, W_81, W_82, W_83, W_84, W_85, W_86, W_87, W_88, W_89, W_90, W_91, W_92, W_93, W_94, W_95, W_96, W_97, W_98, W_99, W_100, W_101, W_102, W_103, W_104, W_105, W_106, W_107, W_108, W_109, W_110, W_111, W_112, W_113, W_114, W_115, W_116, W_117, W_118, W_119, W_120, W_121, W_122, W_123, W_124, W_125, W_126, W_127, W_128, W_129, W_130, W_131, W_132, W_133, W_134, W_135, W_136, W_137, W_138, W_139, W_140, W_141;
int index = blockIdx.x*blockDim.x+threadIdx.x;
W_1 = Ghimj(index,7);
W_2 = Ghimj(index,8);
W_7 = Ghimj(index,9);
W_41 = Ghimj(index,10);
W_42 = Ghimj(index,11);
W_43 = Ghimj(index,12);
W_44 = Ghimj(index,13);
W_45 = Ghimj(index,14);
W_52 = Ghimj(index,15);
W_53 = Ghimj(index,16);
W_54 = Ghimj(index,17);
W_55 = Ghimj(index,18);
W_57 = Ghimj(index,19);
W_75 = Ghimj(index,20);
W_120 = Ghimj(index,21);
W_126 = Ghimj(index,22);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_2/ Ghimj(index,2);
W_2 = -a;
Ghimj(index,7) = W_1;
Ghimj(index,8) = W_2;
Ghimj(index,9) = W_7;
Ghimj(index,10) = W_41;
Ghimj(index,11) = W_42;
Ghimj(index,12) = W_43;
Ghimj(index,13) = W_44;
Ghimj(index,14) = W_45;
Ghimj(index,15) = W_52;
Ghimj(index,16) = W_53;
Ghimj(index,17) = W_54;
Ghimj(index,18) = W_55;
Ghimj(index,19) = W_57;
Ghimj(index,20) = W_75;
Ghimj(index,21) = W_120;
Ghimj(index,22) = W_126;
W_1 = Ghimj(index,23);
W_2 = Ghimj(index,24);
W_8 = Ghimj(index,25);
W_42 = Ghimj(index,26);
W_43 = Ghimj(index,27);
W_120 = Ghimj(index,28);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_2/ Ghimj(index,2);
W_2 = -a;
Ghimj(index,23) = W_1;
Ghimj(index,24) = W_2;
Ghimj(index,25) = W_8;
Ghimj(index,26) = W_42;
Ghimj(index,27) = W_43;
Ghimj(index,28) = W_120;
W_5 = Ghimj(index,50);
W_6 = Ghimj(index,51);
W_14 = Ghimj(index,52);
W_15 = Ghimj(index,53);
W_16 = Ghimj(index,54);
W_17 = Ghimj(index,55);
W_18 = Ghimj(index,56);
W_120 = Ghimj(index,57);
a = - W_5/ Ghimj(index,5);
W_5 = -a;
a = - W_6/ Ghimj(index,6);
W_6 = -a;
Ghimj(index,50) = W_5;
Ghimj(index,51) = W_6;
Ghimj(index,52) = W_14;
Ghimj(index,53) = W_15;
Ghimj(index,54) = W_16;
Ghimj(index,55) = W_17;
Ghimj(index,56) = W_18;
Ghimj(index,57) = W_120;
W_4 = Ghimj(index,67);
W_19 = Ghimj(index,68);
a = - W_4/ Ghimj(index,4);
W_4 = -a;
Ghimj(index,67) = W_4;
Ghimj(index,68) = W_19;
W_1 = Ghimj(index,188);
W_2 = Ghimj(index,189);
W_31 = Ghimj(index,190);
W_53 = Ghimj(index,191);
W_126 = Ghimj(index,192);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_2/ Ghimj(index,2);
W_2 = -a;
Ghimj(index,188) = W_1;
Ghimj(index,189) = W_2;
Ghimj(index,190) = W_31;
Ghimj(index,191) = W_53;
Ghimj(index,192) = W_126;
W_1 = Ghimj(index,193);
W_32 = Ghimj(index,194);
W_41 = Ghimj(index,195);
W_42 = Ghimj(index,196);
W_43 = Ghimj(index,197);
W_57 = Ghimj(index,198);
W_75 = Ghimj(index,199);
W_120 = Ghimj(index,200);
W_126 = Ghimj(index,201);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
Ghimj(index,193) = W_1;
Ghimj(index,194) = W_32;
Ghimj(index,195) = W_41;
Ghimj(index,196) = W_42;
Ghimj(index,197) = W_43;
Ghimj(index,198) = W_57;
Ghimj(index,199) = W_75;
Ghimj(index,200) = W_120;
Ghimj(index,201) = W_126;
W_0 = Ghimj(index,205);
W_34 = Ghimj(index,206);
W_50 = Ghimj(index,207);
W_51 = Ghimj(index,208);
W_59 = Ghimj(index,209);
W_60 = Ghimj(index,210);
W_65 = Ghimj(index,211);
W_73 = Ghimj(index,212);
W_76 = Ghimj(index,213);
W_93 = Ghimj(index,214);
W_94 = Ghimj(index,215);
W_99 = Ghimj(index,216);
W_100 = Ghimj(index,217);
W_101 = Ghimj(index,218);
W_102 = Ghimj(index,219);
W_109 = Ghimj(index,220);
W_113 = Ghimj(index,221);
W_114 = Ghimj(index,222);
W_115 = Ghimj(index,223);
W_117 = Ghimj(index,224);
W_121 = Ghimj(index,225);
W_122 = Ghimj(index,226);
W_125 = Ghimj(index,227);
W_126 = Ghimj(index,228);
W_127 = Ghimj(index,229);
W_129 = Ghimj(index,230);
W_133 = Ghimj(index,231);
W_137 = Ghimj(index,232);
a = - W_0/ Ghimj(index,0);
W_0 = -a;
Ghimj(index,205) = W_0;
Ghimj(index,206) = W_34;
Ghimj(index,207) = W_50;
Ghimj(index,208) = W_51;
Ghimj(index,209) = W_59;
Ghimj(index,210) = W_60;
Ghimj(index,211) = W_65;
Ghimj(index,212) = W_73;
Ghimj(index,213) = W_76;
Ghimj(index,214) = W_93;
Ghimj(index,215) = W_94;
Ghimj(index,216) = W_99;
Ghimj(index,217) = W_100;
Ghimj(index,218) = W_101;
Ghimj(index,219) = W_102;
Ghimj(index,220) = W_109;
Ghimj(index,221) = W_113;
Ghimj(index,222) = W_114;
Ghimj(index,223) = W_115;
Ghimj(index,224) = W_117;
Ghimj(index,225) = W_121;
Ghimj(index,226) = W_122;
Ghimj(index,227) = W_125;
Ghimj(index,228) = W_126;
Ghimj(index,229) = W_127;
Ghimj(index,230) = W_129;
Ghimj(index,231) = W_133;
Ghimj(index,232) = W_137;
W_59 = Ghimj(index,309);
W_60 = Ghimj(index,310);
W_92 = Ghimj(index,311);
W_120 = Ghimj(index,312);
W_133 = Ghimj(index,313);
W_135 = Ghimj(index,314);
a = - W_59/ Ghimj(index,306);
W_59 = -a;
W_133 = W_133+ a *Ghimj(index,307);
W_135 = W_135+ a *Ghimj(index,308);
Ghimj(index,309) = W_59;
Ghimj(index,310) = W_60;
Ghimj(index,311) = W_92;
Ghimj(index,312) = W_120;
Ghimj(index,313) = W_133;
Ghimj(index,314) = W_135;
W_61 = Ghimj(index,351);
W_70 = Ghimj(index,352);
W_84 = Ghimj(index,353);
W_87 = Ghimj(index,354);
W_126 = Ghimj(index,355);
a = - W_61/ Ghimj(index,315);
W_61 = -a;
W_70 = W_70+ a *Ghimj(index,316);
W_87 = W_87+ a *Ghimj(index,317);
W_126 = W_126+ a *Ghimj(index,318);
Ghimj(index,351) = W_61;
Ghimj(index,352) = W_70;
Ghimj(index,353) = W_84;
Ghimj(index,354) = W_87;
Ghimj(index,355) = W_126;
W_79 = Ghimj(index,426);
W_85 = Ghimj(index,427);
W_102 = Ghimj(index,428);
W_111 = Ghimj(index,429);
W_125 = Ghimj(index,430);
W_126 = Ghimj(index,431);
W_133 = Ghimj(index,432);
W_137 = Ghimj(index,433);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
Ghimj(index,426) = W_79;
Ghimj(index,427) = W_85;
Ghimj(index,428) = W_102;
Ghimj(index,429) = W_111;
Ghimj(index,430) = W_125;
Ghimj(index,431) = W_126;
Ghimj(index,432) = W_133;
Ghimj(index,433) = W_137;
W_62 = Ghimj(index,434);
W_69 = Ghimj(index,435);
W_86 = Ghimj(index,436);
W_93 = Ghimj(index,437);
W_125 = Ghimj(index,438);
W_126 = Ghimj(index,439);
W_133 = Ghimj(index,440);
W_137 = Ghimj(index,441);
a = - W_62/ Ghimj(index,319);
W_62 = -a;
W_93 = W_93+ a *Ghimj(index,320);
W_126 = W_126+ a *Ghimj(index,321);
W_133 = W_133+ a *Ghimj(index,322);
a = - W_69/ Ghimj(index,347);
W_69 = -a;
W_93 = W_93+ a *Ghimj(index,348);
W_126 = W_126+ a *Ghimj(index,349);
W_137 = W_137+ a *Ghimj(index,350);
Ghimj(index,434) = W_62;
Ghimj(index,435) = W_69;
Ghimj(index,436) = W_86;
Ghimj(index,437) = W_93;
Ghimj(index,438) = W_125;
Ghimj(index,439) = W_126;
Ghimj(index,440) = W_133;
Ghimj(index,441) = W_137;
W_70 = Ghimj(index,442);
W_84 = Ghimj(index,443);
W_87 = Ghimj(index,444);
W_92 = Ghimj(index,445);
W_124 = Ghimj(index,446);
W_126 = Ghimj(index,447);
W_135 = Ghimj(index,448);
W_137 = Ghimj(index,449);
a = - W_70/ Ghimj(index,352);
W_70 = -a;
W_84 = W_84+ a *Ghimj(index,353);
W_87 = W_87+ a *Ghimj(index,354);
W_126 = W_126+ a *Ghimj(index,355);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
Ghimj(index,442) = W_70;
Ghimj(index,443) = W_84;
Ghimj(index,444) = W_87;
Ghimj(index,445) = W_92;
Ghimj(index,446) = W_124;
Ghimj(index,447) = W_126;
Ghimj(index,448) = W_135;
Ghimj(index,449) = W_137;
W_80 = Ghimj(index,468);
W_90 = Ghimj(index,469);
W_100 = Ghimj(index,470);
W_105 = Ghimj(index,471);
W_112 = Ghimj(index,472);
W_116 = Ghimj(index,473);
W_118 = Ghimj(index,474);
W_123 = Ghimj(index,475);
W_127 = Ghimj(index,476);
W_129 = Ghimj(index,477);
W_132 = Ghimj(index,478);
W_134 = Ghimj(index,479);
W_138 = Ghimj(index,480);
a = - W_80/ Ghimj(index,397);
W_80 = -a;
W_90 = W_90+ a *Ghimj(index,398);
W_112 = W_112+ a *Ghimj(index,399);
W_116 = W_116+ a *Ghimj(index,400);
W_127 = W_127+ a *Ghimj(index,401);
W_129 = W_129+ a *Ghimj(index,402);
W_134 = W_134+ a *Ghimj(index,403);
W_138 = W_138+ a *Ghimj(index,404);
Ghimj(index,468) = W_80;
Ghimj(index,469) = W_90;
Ghimj(index,470) = W_100;
Ghimj(index,471) = W_105;
Ghimj(index,472) = W_112;
Ghimj(index,473) = W_116;
Ghimj(index,474) = W_118;
Ghimj(index,475) = W_123;
Ghimj(index,476) = W_127;
Ghimj(index,477) = W_129;
Ghimj(index,478) = W_132;
Ghimj(index,479) = W_134;
Ghimj(index,480) = W_138;
W_47 = Ghimj(index,487);
W_84 = Ghimj(index,488);
W_92 = Ghimj(index,489);
W_124 = Ghimj(index,490);
W_126 = Ghimj(index,491);
W_133 = Ghimj(index,492);
W_135 = Ghimj(index,493);
W_137 = Ghimj(index,494);
a = - W_47/ Ghimj(index,276);
W_47 = -a;
W_126 = W_126+ a *Ghimj(index,277);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
Ghimj(index,487) = W_47;
Ghimj(index,488) = W_84;
Ghimj(index,489) = W_92;
Ghimj(index,490) = W_124;
Ghimj(index,491) = W_126;
Ghimj(index,492) = W_133;
Ghimj(index,493) = W_135;
Ghimj(index,494) = W_137;
W_49 = Ghimj(index,495);
W_69 = Ghimj(index,496);
W_93 = Ghimj(index,497);
W_125 = Ghimj(index,498);
W_126 = Ghimj(index,499);
W_133 = Ghimj(index,500);
W_137 = Ghimj(index,501);
a = - W_49/ Ghimj(index,280);
W_49 = -a;
W_126 = W_126+ a *Ghimj(index,281);
a = - W_69/ Ghimj(index,347);
W_69 = -a;
W_93 = W_93+ a *Ghimj(index,348);
W_126 = W_126+ a *Ghimj(index,349);
W_137 = W_137+ a *Ghimj(index,350);
Ghimj(index,495) = W_49;
Ghimj(index,496) = W_69;
Ghimj(index,497) = W_93;
Ghimj(index,498) = W_125;
Ghimj(index,499) = W_126;
Ghimj(index,500) = W_133;
Ghimj(index,501) = W_137;
W_72 = Ghimj(index,502);
W_86 = Ghimj(index,503);
W_93 = Ghimj(index,504);
W_94 = Ghimj(index,505);
W_125 = Ghimj(index,506);
W_126 = Ghimj(index,507);
W_133 = Ghimj(index,508);
W_137 = Ghimj(index,509);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
Ghimj(index,502) = W_72;
Ghimj(index,503) = W_86;
Ghimj(index,504) = W_93;
Ghimj(index,505) = W_94;
Ghimj(index,506) = W_125;
Ghimj(index,507) = W_126;
Ghimj(index,508) = W_133;
Ghimj(index,509) = W_137;
W_58 = Ghimj(index,510);
W_77 = Ghimj(index,511);
W_82 = Ghimj(index,512);
W_91 = Ghimj(index,513);
W_95 = Ghimj(index,514);
W_96 = Ghimj(index,515);
W_98 = Ghimj(index,516);
W_103 = Ghimj(index,517);
W_106 = Ghimj(index,518);
W_107 = Ghimj(index,519);
W_109 = Ghimj(index,520);
W_110 = Ghimj(index,521);
W_113 = Ghimj(index,522);
W_119 = Ghimj(index,523);
W_121 = Ghimj(index,524);
W_124 = Ghimj(index,525);
W_125 = Ghimj(index,526);
W_126 = Ghimj(index,527);
W_127 = Ghimj(index,528);
W_129 = Ghimj(index,529);
W_130 = Ghimj(index,530);
W_133 = Ghimj(index,531);
W_135 = Ghimj(index,532);
W_136 = Ghimj(index,533);
W_137 = Ghimj(index,534);
a = - W_58/ Ghimj(index,303);
W_58 = -a;
W_91 = W_91+ a *Ghimj(index,304);
W_126 = W_126+ a *Ghimj(index,305);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
Ghimj(index,510) = W_58;
Ghimj(index,511) = W_77;
Ghimj(index,512) = W_82;
Ghimj(index,513) = W_91;
Ghimj(index,514) = W_95;
Ghimj(index,515) = W_96;
Ghimj(index,516) = W_98;
Ghimj(index,517) = W_103;
Ghimj(index,518) = W_106;
Ghimj(index,519) = W_107;
Ghimj(index,520) = W_109;
Ghimj(index,521) = W_110;
Ghimj(index,522) = W_113;
Ghimj(index,523) = W_119;
Ghimj(index,524) = W_121;
Ghimj(index,525) = W_124;
Ghimj(index,526) = W_125;
Ghimj(index,527) = W_126;
Ghimj(index,528) = W_127;
Ghimj(index,529) = W_129;
Ghimj(index,530) = W_130;
Ghimj(index,531) = W_133;
Ghimj(index,532) = W_135;
Ghimj(index,533) = W_136;
Ghimj(index,534) = W_137;
W_72 = Ghimj(index,535);
W_82 = Ghimj(index,536);
W_94 = Ghimj(index,537);
W_96 = Ghimj(index,538);
W_107 = Ghimj(index,539);
W_108 = Ghimj(index,540);
W_109 = Ghimj(index,541);
W_110 = Ghimj(index,542);
W_113 = Ghimj(index,543);
W_124 = Ghimj(index,544);
W_125 = Ghimj(index,545);
W_126 = Ghimj(index,546);
W_133 = Ghimj(index,547);
W_137 = Ghimj(index,548);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
Ghimj(index,535) = W_72;
Ghimj(index,536) = W_82;
Ghimj(index,537) = W_94;
Ghimj(index,538) = W_96;
Ghimj(index,539) = W_107;
Ghimj(index,540) = W_108;
Ghimj(index,541) = W_109;
Ghimj(index,542) = W_110;
Ghimj(index,543) = W_113;
Ghimj(index,544) = W_124;
Ghimj(index,545) = W_125;
Ghimj(index,546) = W_126;
Ghimj(index,547) = W_133;
Ghimj(index,548) = W_137;
W_68 = Ghimj(index,563);
W_85 = Ghimj(index,564);
W_99 = Ghimj(index,565);
W_102 = Ghimj(index,566);
W_111 = Ghimj(index,567);
W_125 = Ghimj(index,568);
W_126 = Ghimj(index,569);
W_133 = Ghimj(index,570);
W_137 = Ghimj(index,571);
a = - W_68/ Ghimj(index,343);
W_68 = -a;
W_99 = W_99+ a *Ghimj(index,344);
W_126 = W_126+ a *Ghimj(index,345);
W_137 = W_137+ a *Ghimj(index,346);
a = - W_85/ Ghimj(index,427);
W_85 = -a;
W_102 = W_102+ a *Ghimj(index,428);
W_111 = W_111+ a *Ghimj(index,429);
W_125 = W_125+ a *Ghimj(index,430);
W_126 = W_126+ a *Ghimj(index,431);
W_133 = W_133+ a *Ghimj(index,432);
W_137 = W_137+ a *Ghimj(index,433);
Ghimj(index,563) = W_68;
Ghimj(index,564) = W_85;
Ghimj(index,565) = W_99;
Ghimj(index,566) = W_102;
Ghimj(index,567) = W_111;
Ghimj(index,568) = W_125;
Ghimj(index,569) = W_126;
Ghimj(index,570) = W_133;
Ghimj(index,571) = W_137;
W_90 = Ghimj(index,572);
W_100 = Ghimj(index,573);
W_105 = Ghimj(index,574);
W_112 = Ghimj(index,575);
W_116 = Ghimj(index,576);
W_118 = Ghimj(index,577);
W_123 = Ghimj(index,578);
W_126 = Ghimj(index,579);
W_127 = Ghimj(index,580);
W_129 = Ghimj(index,581);
W_132 = Ghimj(index,582);
W_134 = Ghimj(index,583);
W_138 = Ghimj(index,584);
a = - W_90/ Ghimj(index,469);
W_90 = -a;
W_100 = W_100+ a *Ghimj(index,470);
W_105 = W_105+ a *Ghimj(index,471);
W_112 = W_112+ a *Ghimj(index,472);
W_116 = W_116+ a *Ghimj(index,473);
W_118 = W_118+ a *Ghimj(index,474);
W_123 = W_123+ a *Ghimj(index,475);
W_127 = W_127+ a *Ghimj(index,476);
W_129 = W_129+ a *Ghimj(index,477);
W_132 = W_132+ a *Ghimj(index,478);
W_134 = W_134+ a *Ghimj(index,479);
W_138 = W_138+ a *Ghimj(index,480);
Ghimj(index,572) = W_90;
Ghimj(index,573) = W_100;
Ghimj(index,574) = W_105;
Ghimj(index,575) = W_112;
Ghimj(index,576) = W_116;
Ghimj(index,577) = W_118;
Ghimj(index,578) = W_123;
Ghimj(index,579) = W_126;
Ghimj(index,580) = W_127;
Ghimj(index,581) = W_129;
Ghimj(index,582) = W_132;
Ghimj(index,583) = W_134;
Ghimj(index,584) = W_138;
W_83 = Ghimj(index,585);
W_101 = Ghimj(index,586);
W_105 = Ghimj(index,587);
W_114 = Ghimj(index,588);
W_116 = Ghimj(index,589);
W_119 = Ghimj(index,590);
W_123 = Ghimj(index,591);
W_126 = Ghimj(index,592);
W_128 = Ghimj(index,593);
W_130 = Ghimj(index,594);
W_135 = Ghimj(index,595);
W_136 = Ghimj(index,596);
W_138 = Ghimj(index,597);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
Ghimj(index,585) = W_83;
Ghimj(index,586) = W_101;
Ghimj(index,587) = W_105;
Ghimj(index,588) = W_114;
Ghimj(index,589) = W_116;
Ghimj(index,590) = W_119;
Ghimj(index,591) = W_123;
Ghimj(index,592) = W_126;
Ghimj(index,593) = W_128;
Ghimj(index,594) = W_130;
Ghimj(index,595) = W_135;
Ghimj(index,596) = W_136;
Ghimj(index,597) = W_138;
W_40 = Ghimj(index,598);
W_79 = Ghimj(index,599);
W_102 = Ghimj(index,600);
W_125 = Ghimj(index,601);
W_126 = Ghimj(index,602);
W_133 = Ghimj(index,603);
W_137 = Ghimj(index,604);
a = - W_40/ Ghimj(index,260);
W_40 = -a;
W_126 = W_126+ a *Ghimj(index,261);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
Ghimj(index,598) = W_40;
Ghimj(index,599) = W_79;
Ghimj(index,600) = W_102;
Ghimj(index,601) = W_125;
Ghimj(index,602) = W_126;
Ghimj(index,603) = W_133;
Ghimj(index,604) = W_137;
W_64 = Ghimj(index,630);
W_67 = Ghimj(index,631);
W_82 = Ghimj(index,632);
W_91 = Ghimj(index,633);
W_94 = Ghimj(index,634);
W_106 = Ghimj(index,635);
W_108 = Ghimj(index,636);
W_109 = Ghimj(index,637);
W_113 = Ghimj(index,638);
W_115 = Ghimj(index,639);
W_124 = Ghimj(index,640);
W_125 = Ghimj(index,641);
W_126 = Ghimj(index,642);
W_133 = Ghimj(index,643);
W_135 = Ghimj(index,644);
W_136 = Ghimj(index,645);
W_137 = Ghimj(index,646);
a = - W_64/ Ghimj(index,327);
W_64 = -a;
W_113 = W_113+ a *Ghimj(index,328);
W_126 = W_126+ a *Ghimj(index,329);
W_135 = W_135+ a *Ghimj(index,330);
a = - W_67/ Ghimj(index,339);
W_67 = -a;
W_115 = W_115+ a *Ghimj(index,340);
W_126 = W_126+ a *Ghimj(index,341);
W_137 = W_137+ a *Ghimj(index,342);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
Ghimj(index,630) = W_64;
Ghimj(index,631) = W_67;
Ghimj(index,632) = W_82;
Ghimj(index,633) = W_91;
Ghimj(index,634) = W_94;
Ghimj(index,635) = W_106;
Ghimj(index,636) = W_108;
Ghimj(index,637) = W_109;
Ghimj(index,638) = W_113;
Ghimj(index,639) = W_115;
Ghimj(index,640) = W_124;
Ghimj(index,641) = W_125;
Ghimj(index,642) = W_126;
Ghimj(index,643) = W_133;
Ghimj(index,644) = W_135;
Ghimj(index,645) = W_136;
Ghimj(index,646) = W_137;
W_106 = Ghimj(index,647);
W_109 = Ghimj(index,648);
W_124 = Ghimj(index,649);
W_125 = Ghimj(index,650);
W_126 = Ghimj(index,651);
W_133 = Ghimj(index,652);
W_136 = Ghimj(index,653);
W_137 = Ghimj(index,654);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
Ghimj(index,647) = W_106;
Ghimj(index,648) = W_109;
Ghimj(index,649) = W_124;
Ghimj(index,650) = W_125;
Ghimj(index,651) = W_126;
Ghimj(index,652) = W_133;
Ghimj(index,653) = W_136;
Ghimj(index,654) = W_137;
W_66 = Ghimj(index,655);
W_91 = Ghimj(index,656);
W_106 = Ghimj(index,657);
W_109 = Ghimj(index,658);
W_110 = Ghimj(index,659);
W_124 = Ghimj(index,660);
W_125 = Ghimj(index,661);
W_126 = Ghimj(index,662);
W_133 = Ghimj(index,663);
W_136 = Ghimj(index,664);
W_137 = Ghimj(index,665);
a = - W_66/ Ghimj(index,335);
W_66 = -a;
W_109 = W_109+ a *Ghimj(index,336);
W_126 = W_126+ a *Ghimj(index,337);
W_137 = W_137+ a *Ghimj(index,338);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
Ghimj(index,655) = W_66;
Ghimj(index,656) = W_91;
Ghimj(index,657) = W_106;
Ghimj(index,658) = W_109;
Ghimj(index,659) = W_110;
Ghimj(index,660) = W_124;
Ghimj(index,661) = W_125;
Ghimj(index,662) = W_126;
Ghimj(index,663) = W_133;
Ghimj(index,664) = W_136;
Ghimj(index,665) = W_137;
W_99 = Ghimj(index,666);
W_102 = Ghimj(index,667);
W_107 = Ghimj(index,668);
W_111 = Ghimj(index,669);
W_115 = Ghimj(index,670);
W_124 = Ghimj(index,671);
W_125 = Ghimj(index,672);
W_126 = Ghimj(index,673);
W_133 = Ghimj(index,674);
W_136 = Ghimj(index,675);
W_137 = Ghimj(index,676);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
Ghimj(index,666) = W_99;
Ghimj(index,667) = W_102;
Ghimj(index,668) = W_107;
Ghimj(index,669) = W_111;
Ghimj(index,670) = W_115;
Ghimj(index,671) = W_124;
Ghimj(index,672) = W_125;
Ghimj(index,673) = W_126;
Ghimj(index,674) = W_133;
Ghimj(index,675) = W_136;
Ghimj(index,676) = W_137;
W_64 = Ghimj(index,685);
W_82 = Ghimj(index,686);
W_106 = Ghimj(index,687);
W_110 = Ghimj(index,688);
W_113 = Ghimj(index,689);
W_124 = Ghimj(index,690);
W_125 = Ghimj(index,691);
W_126 = Ghimj(index,692);
W_133 = Ghimj(index,693);
W_135 = Ghimj(index,694);
W_136 = Ghimj(index,695);
W_137 = Ghimj(index,696);
a = - W_64/ Ghimj(index,327);
W_64 = -a;
W_113 = W_113+ a *Ghimj(index,328);
W_126 = W_126+ a *Ghimj(index,329);
W_135 = W_135+ a *Ghimj(index,330);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
Ghimj(index,685) = W_64;
Ghimj(index,686) = W_82;
Ghimj(index,687) = W_106;
Ghimj(index,688) = W_110;
Ghimj(index,689) = W_113;
Ghimj(index,690) = W_124;
Ghimj(index,691) = W_125;
Ghimj(index,692) = W_126;
Ghimj(index,693) = W_133;
Ghimj(index,694) = W_135;
Ghimj(index,695) = W_136;
Ghimj(index,696) = W_137;
W_67 = Ghimj(index,703);
W_103 = Ghimj(index,704);
W_107 = Ghimj(index,705);
W_115 = Ghimj(index,706);
W_124 = Ghimj(index,707);
W_126 = Ghimj(index,708);
W_127 = Ghimj(index,709);
W_129 = Ghimj(index,710);
W_133 = Ghimj(index,711);
W_136 = Ghimj(index,712);
W_137 = Ghimj(index,713);
a = - W_67/ Ghimj(index,339);
W_67 = -a;
W_115 = W_115+ a *Ghimj(index,340);
W_126 = W_126+ a *Ghimj(index,341);
W_137 = W_137+ a *Ghimj(index,342);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
Ghimj(index,703) = W_67;
Ghimj(index,704) = W_103;
Ghimj(index,705) = W_107;
Ghimj(index,706) = W_115;
Ghimj(index,707) = W_124;
Ghimj(index,708) = W_126;
Ghimj(index,709) = W_127;
Ghimj(index,710) = W_129;
Ghimj(index,711) = W_133;
Ghimj(index,712) = W_136;
Ghimj(index,713) = W_137;
W_48 = Ghimj(index,722);
W_49 = Ghimj(index,723);
W_71 = Ghimj(index,724);
W_79 = Ghimj(index,725);
W_85 = Ghimj(index,726);
W_102 = Ghimj(index,727);
W_107 = Ghimj(index,728);
W_111 = Ghimj(index,729);
W_115 = Ghimj(index,730);
W_117 = Ghimj(index,731);
W_121 = Ghimj(index,732);
W_124 = Ghimj(index,733);
W_125 = Ghimj(index,734);
W_126 = Ghimj(index,735);
W_127 = Ghimj(index,736);
W_129 = Ghimj(index,737);
W_133 = Ghimj(index,738);
W_136 = Ghimj(index,739);
W_137 = Ghimj(index,740);
a = - W_48/ Ghimj(index,278);
W_48 = -a;
W_126 = W_126+ a *Ghimj(index,279);
a = - W_49/ Ghimj(index,280);
W_49 = -a;
W_126 = W_126+ a *Ghimj(index,281);
a = - W_71/ Ghimj(index,356);
W_71 = -a;
W_117 = W_117+ a *Ghimj(index,357);
W_126 = W_126+ a *Ghimj(index,358);
W_137 = W_137+ a *Ghimj(index,359);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_85/ Ghimj(index,427);
W_85 = -a;
W_102 = W_102+ a *Ghimj(index,428);
W_111 = W_111+ a *Ghimj(index,429);
W_125 = W_125+ a *Ghimj(index,430);
W_126 = W_126+ a *Ghimj(index,431);
W_133 = W_133+ a *Ghimj(index,432);
W_137 = W_137+ a *Ghimj(index,433);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
Ghimj(index,722) = W_48;
Ghimj(index,723) = W_49;
Ghimj(index,724) = W_71;
Ghimj(index,725) = W_79;
Ghimj(index,726) = W_85;
Ghimj(index,727) = W_102;
Ghimj(index,728) = W_107;
Ghimj(index,729) = W_111;
Ghimj(index,730) = W_115;
Ghimj(index,731) = W_117;
Ghimj(index,732) = W_121;
Ghimj(index,733) = W_124;
Ghimj(index,734) = W_125;
Ghimj(index,735) = W_126;
Ghimj(index,736) = W_127;
Ghimj(index,737) = W_129;
Ghimj(index,738) = W_133;
Ghimj(index,739) = W_136;
Ghimj(index,740) = W_137;
W_100 = Ghimj(index,741);
W_105 = Ghimj(index,742);
W_112 = Ghimj(index,743);
W_116 = Ghimj(index,744);
W_118 = Ghimj(index,745);
W_123 = Ghimj(index,746);
W_125 = Ghimj(index,747);
W_126 = Ghimj(index,748);
W_127 = Ghimj(index,749);
W_128 = Ghimj(index,750);
W_129 = Ghimj(index,751);
W_131 = Ghimj(index,752);
W_132 = Ghimj(index,753);
W_134 = Ghimj(index,754);
W_135 = Ghimj(index,755);
W_137 = Ghimj(index,756);
W_138 = Ghimj(index,757);
a = - W_100/ Ghimj(index,573);
W_100 = -a;
W_105 = W_105+ a *Ghimj(index,574);
W_112 = W_112+ a *Ghimj(index,575);
W_116 = W_116+ a *Ghimj(index,576);
W_118 = W_118+ a *Ghimj(index,577);
W_123 = W_123+ a *Ghimj(index,578);
W_126 = W_126+ a *Ghimj(index,579);
W_127 = W_127+ a *Ghimj(index,580);
W_129 = W_129+ a *Ghimj(index,581);
W_132 = W_132+ a *Ghimj(index,582);
W_134 = W_134+ a *Ghimj(index,583);
W_138 = W_138+ a *Ghimj(index,584);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
Ghimj(index,741) = W_100;
Ghimj(index,742) = W_105;
Ghimj(index,743) = W_112;
Ghimj(index,744) = W_116;
Ghimj(index,745) = W_118;
Ghimj(index,746) = W_123;
Ghimj(index,747) = W_125;
Ghimj(index,748) = W_126;
Ghimj(index,749) = W_127;
Ghimj(index,750) = W_128;
Ghimj(index,751) = W_129;
Ghimj(index,752) = W_131;
Ghimj(index,753) = W_132;
Ghimj(index,754) = W_134;
Ghimj(index,755) = W_135;
Ghimj(index,756) = W_137;
Ghimj(index,757) = W_138;
W_68 = Ghimj(index,758);
W_71 = Ghimj(index,759);
W_79 = Ghimj(index,760);
W_99 = Ghimj(index,761);
W_102 = Ghimj(index,762);
W_107 = Ghimj(index,763);
W_111 = Ghimj(index,764);
W_115 = Ghimj(index,765);
W_117 = Ghimj(index,766);
W_119 = Ghimj(index,767);
W_121 = Ghimj(index,768);
W_124 = Ghimj(index,769);
W_125 = Ghimj(index,770);
W_126 = Ghimj(index,771);
W_127 = Ghimj(index,772);
W_129 = Ghimj(index,773);
W_133 = Ghimj(index,774);
W_136 = Ghimj(index,775);
W_137 = Ghimj(index,776);
a = - W_68/ Ghimj(index,343);
W_68 = -a;
W_99 = W_99+ a *Ghimj(index,344);
W_126 = W_126+ a *Ghimj(index,345);
W_137 = W_137+ a *Ghimj(index,346);
a = - W_71/ Ghimj(index,356);
W_71 = -a;
W_117 = W_117+ a *Ghimj(index,357);
W_126 = W_126+ a *Ghimj(index,358);
W_137 = W_137+ a *Ghimj(index,359);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
Ghimj(index,758) = W_68;
Ghimj(index,759) = W_71;
Ghimj(index,760) = W_79;
Ghimj(index,761) = W_99;
Ghimj(index,762) = W_102;
Ghimj(index,763) = W_107;
Ghimj(index,764) = W_111;
Ghimj(index,765) = W_115;
Ghimj(index,766) = W_117;
Ghimj(index,767) = W_119;
Ghimj(index,768) = W_121;
Ghimj(index,769) = W_124;
Ghimj(index,770) = W_125;
Ghimj(index,771) = W_126;
Ghimj(index,772) = W_127;
Ghimj(index,773) = W_129;
Ghimj(index,774) = W_133;
Ghimj(index,775) = W_136;
Ghimj(index,776) = W_137;
W_41 = Ghimj(index,777);
W_42 = Ghimj(index,778);
W_43 = Ghimj(index,779);
W_57 = Ghimj(index,780);
W_60 = Ghimj(index,781);
W_75 = Ghimj(index,782);
W_92 = Ghimj(index,783);
W_97 = Ghimj(index,784);
W_98 = Ghimj(index,785);
W_107 = Ghimj(index,786);
W_120 = Ghimj(index,787);
W_122 = Ghimj(index,788);
W_124 = Ghimj(index,789);
W_126 = Ghimj(index,790);
W_127 = Ghimj(index,791);
W_128 = Ghimj(index,792);
W_130 = Ghimj(index,793);
W_133 = Ghimj(index,794);
W_135 = Ghimj(index,795);
W_136 = Ghimj(index,796);
W_137 = Ghimj(index,797);
a = - W_41/ Ghimj(index,262);
W_41 = -a;
W_120 = W_120+ a *Ghimj(index,263);
a = - W_42/ Ghimj(index,264);
W_42 = -a;
W_120 = W_120+ a *Ghimj(index,265);
a = - W_43/ Ghimj(index,266);
W_43 = -a;
W_120 = W_120+ a *Ghimj(index,267);
a = - W_57/ Ghimj(index,300);
W_57 = -a;
W_120 = W_120+ a *Ghimj(index,301);
W_126 = W_126+ a *Ghimj(index,302);
a = - W_60/ Ghimj(index,310);
W_60 = -a;
W_92 = W_92+ a *Ghimj(index,311);
W_120 = W_120+ a *Ghimj(index,312);
W_133 = W_133+ a *Ghimj(index,313);
W_135 = W_135+ a *Ghimj(index,314);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
Ghimj(index,777) = W_41;
Ghimj(index,778) = W_42;
Ghimj(index,779) = W_43;
Ghimj(index,780) = W_57;
Ghimj(index,781) = W_60;
Ghimj(index,782) = W_75;
Ghimj(index,783) = W_92;
Ghimj(index,784) = W_97;
Ghimj(index,785) = W_98;
Ghimj(index,786) = W_107;
Ghimj(index,787) = W_120;
Ghimj(index,788) = W_122;
Ghimj(index,789) = W_124;
Ghimj(index,790) = W_126;
Ghimj(index,791) = W_127;
Ghimj(index,792) = W_128;
Ghimj(index,793) = W_130;
Ghimj(index,794) = W_133;
Ghimj(index,795) = W_135;
Ghimj(index,796) = W_136;
Ghimj(index,797) = W_137;
W_38 = Ghimj(index,798);
W_63 = Ghimj(index,799);
W_68 = Ghimj(index,800);
W_72 = Ghimj(index,801);
W_77 = Ghimj(index,802);
W_82 = Ghimj(index,803);
W_85 = Ghimj(index,804);
W_86 = Ghimj(index,805);
W_93 = Ghimj(index,806);
W_94 = Ghimj(index,807);
W_96 = Ghimj(index,808);
W_99 = Ghimj(index,809);
W_102 = Ghimj(index,810);
W_106 = Ghimj(index,811);
W_107 = Ghimj(index,812);
W_108 = Ghimj(index,813);
W_109 = Ghimj(index,814);
W_110 = Ghimj(index,815);
W_111 = Ghimj(index,816);
W_113 = Ghimj(index,817);
W_115 = Ghimj(index,818);
W_117 = Ghimj(index,819);
W_119 = Ghimj(index,820);
W_121 = Ghimj(index,821);
W_124 = Ghimj(index,822);
W_125 = Ghimj(index,823);
W_126 = Ghimj(index,824);
W_127 = Ghimj(index,825);
W_129 = Ghimj(index,826);
W_133 = Ghimj(index,827);
W_135 = Ghimj(index,828);
W_136 = Ghimj(index,829);
W_137 = Ghimj(index,830);
a = - W_38/ Ghimj(index,255);
W_38 = -a;
W_68 = W_68+ a *Ghimj(index,256);
W_126 = W_126+ a *Ghimj(index,257);
a = - W_63/ Ghimj(index,323);
W_63 = -a;
W_121 = W_121+ a *Ghimj(index,324);
W_126 = W_126+ a *Ghimj(index,325);
W_137 = W_137+ a *Ghimj(index,326);
a = - W_68/ Ghimj(index,343);
W_68 = -a;
W_99 = W_99+ a *Ghimj(index,344);
W_126 = W_126+ a *Ghimj(index,345);
W_137 = W_137+ a *Ghimj(index,346);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_85/ Ghimj(index,427);
W_85 = -a;
W_102 = W_102+ a *Ghimj(index,428);
W_111 = W_111+ a *Ghimj(index,429);
W_125 = W_125+ a *Ghimj(index,430);
W_126 = W_126+ a *Ghimj(index,431);
W_133 = W_133+ a *Ghimj(index,432);
W_137 = W_137+ a *Ghimj(index,433);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_96/ Ghimj(index,538);
W_96 = -a;
W_107 = W_107+ a *Ghimj(index,539);
W_108 = W_108+ a *Ghimj(index,540);
W_109 = W_109+ a *Ghimj(index,541);
W_110 = W_110+ a *Ghimj(index,542);
W_113 = W_113+ a *Ghimj(index,543);
W_124 = W_124+ a *Ghimj(index,544);
W_125 = W_125+ a *Ghimj(index,545);
W_126 = W_126+ a *Ghimj(index,546);
W_133 = W_133+ a *Ghimj(index,547);
W_137 = W_137+ a *Ghimj(index,548);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
Ghimj(index,798) = W_38;
Ghimj(index,799) = W_63;
Ghimj(index,800) = W_68;
Ghimj(index,801) = W_72;
Ghimj(index,802) = W_77;
Ghimj(index,803) = W_82;
Ghimj(index,804) = W_85;
Ghimj(index,805) = W_86;
Ghimj(index,806) = W_93;
Ghimj(index,807) = W_94;
Ghimj(index,808) = W_96;
Ghimj(index,809) = W_99;
Ghimj(index,810) = W_102;
Ghimj(index,811) = W_106;
Ghimj(index,812) = W_107;
Ghimj(index,813) = W_108;
Ghimj(index,814) = W_109;
Ghimj(index,815) = W_110;
Ghimj(index,816) = W_111;
Ghimj(index,817) = W_113;
Ghimj(index,818) = W_115;
Ghimj(index,819) = W_117;
Ghimj(index,820) = W_119;
Ghimj(index,821) = W_121;
Ghimj(index,822) = W_124;
Ghimj(index,823) = W_125;
Ghimj(index,824) = W_126;
Ghimj(index,825) = W_127;
Ghimj(index,826) = W_129;
Ghimj(index,827) = W_133;
Ghimj(index,828) = W_135;
Ghimj(index,829) = W_136;
Ghimj(index,830) = W_137;
W_75 = Ghimj(index,831);
W_95 = Ghimj(index,832);
W_96 = Ghimj(index,833);
W_97 = Ghimj(index,834);
W_98 = Ghimj(index,835);
W_103 = Ghimj(index,836);
W_106 = Ghimj(index,837);
W_107 = Ghimj(index,838);
W_108 = Ghimj(index,839);
W_109 = Ghimj(index,840);
W_110 = Ghimj(index,841);
W_113 = Ghimj(index,842);
W_115 = Ghimj(index,843);
W_119 = Ghimj(index,844);
W_120 = Ghimj(index,845);
W_121 = Ghimj(index,846);
W_122 = Ghimj(index,847);
W_124 = Ghimj(index,848);
W_125 = Ghimj(index,849);
W_126 = Ghimj(index,850);
W_127 = Ghimj(index,851);
W_128 = Ghimj(index,852);
W_129 = Ghimj(index,853);
W_130 = Ghimj(index,854);
W_131 = Ghimj(index,855);
W_133 = Ghimj(index,856);
W_135 = Ghimj(index,857);
W_136 = Ghimj(index,858);
W_137 = Ghimj(index,859);
W_138 = Ghimj(index,860);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_95/ Ghimj(index,514);
W_95 = -a;
W_96 = W_96+ a *Ghimj(index,515);
W_98 = W_98+ a *Ghimj(index,516);
W_103 = W_103+ a *Ghimj(index,517);
W_106 = W_106+ a *Ghimj(index,518);
W_107 = W_107+ a *Ghimj(index,519);
W_109 = W_109+ a *Ghimj(index,520);
W_110 = W_110+ a *Ghimj(index,521);
W_113 = W_113+ a *Ghimj(index,522);
W_119 = W_119+ a *Ghimj(index,523);
W_121 = W_121+ a *Ghimj(index,524);
W_124 = W_124+ a *Ghimj(index,525);
W_125 = W_125+ a *Ghimj(index,526);
W_126 = W_126+ a *Ghimj(index,527);
W_127 = W_127+ a *Ghimj(index,528);
W_129 = W_129+ a *Ghimj(index,529);
W_130 = W_130+ a *Ghimj(index,530);
W_133 = W_133+ a *Ghimj(index,531);
W_135 = W_135+ a *Ghimj(index,532);
W_136 = W_136+ a *Ghimj(index,533);
W_137 = W_137+ a *Ghimj(index,534);
a = - W_96/ Ghimj(index,538);
W_96 = -a;
W_107 = W_107+ a *Ghimj(index,539);
W_108 = W_108+ a *Ghimj(index,540);
W_109 = W_109+ a *Ghimj(index,541);
W_110 = W_110+ a *Ghimj(index,542);
W_113 = W_113+ a *Ghimj(index,543);
W_124 = W_124+ a *Ghimj(index,544);
W_125 = W_125+ a *Ghimj(index,545);
W_126 = W_126+ a *Ghimj(index,546);
W_133 = W_133+ a *Ghimj(index,547);
W_137 = W_137+ a *Ghimj(index,548);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
Ghimj(index,831) = W_75;
Ghimj(index,832) = W_95;
Ghimj(index,833) = W_96;
Ghimj(index,834) = W_97;
Ghimj(index,835) = W_98;
Ghimj(index,836) = W_103;
Ghimj(index,837) = W_106;
Ghimj(index,838) = W_107;
Ghimj(index,839) = W_108;
Ghimj(index,840) = W_109;
Ghimj(index,841) = W_110;
Ghimj(index,842) = W_113;
Ghimj(index,843) = W_115;
Ghimj(index,844) = W_119;
Ghimj(index,845) = W_120;
Ghimj(index,846) = W_121;
Ghimj(index,847) = W_122;
Ghimj(index,848) = W_124;
Ghimj(index,849) = W_125;
Ghimj(index,850) = W_126;
Ghimj(index,851) = W_127;
Ghimj(index,852) = W_128;
Ghimj(index,853) = W_129;
Ghimj(index,854) = W_130;
Ghimj(index,855) = W_131;
Ghimj(index,856) = W_133;
Ghimj(index,857) = W_135;
Ghimj(index,858) = W_136;
Ghimj(index,859) = W_137;
Ghimj(index,860) = W_138;
W_103 = Ghimj(index,861);
W_104 = Ghimj(index,862);
W_112 = Ghimj(index,863);
W_114 = Ghimj(index,864);
W_116 = Ghimj(index,865);
W_118 = Ghimj(index,866);
W_119 = Ghimj(index,867);
W_121 = Ghimj(index,868);
W_123 = Ghimj(index,869);
W_124 = Ghimj(index,870);
W_125 = Ghimj(index,871);
W_126 = Ghimj(index,872);
W_127 = Ghimj(index,873);
W_128 = Ghimj(index,874);
W_129 = Ghimj(index,875);
W_130 = Ghimj(index,876);
W_131 = Ghimj(index,877);
W_132 = Ghimj(index,878);
W_133 = Ghimj(index,879);
W_134 = Ghimj(index,880);
W_135 = Ghimj(index,881);
W_136 = Ghimj(index,882);
W_137 = Ghimj(index,883);
W_138 = Ghimj(index,884);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
Ghimj(index,861) = W_103;
Ghimj(index,862) = W_104;
Ghimj(index,863) = W_112;
Ghimj(index,864) = W_114;
Ghimj(index,865) = W_116;
Ghimj(index,866) = W_118;
Ghimj(index,867) = W_119;
Ghimj(index,868) = W_121;
Ghimj(index,869) = W_123;
Ghimj(index,870) = W_124;
Ghimj(index,871) = W_125;
Ghimj(index,872) = W_126;
Ghimj(index,873) = W_127;
Ghimj(index,874) = W_128;
Ghimj(index,875) = W_129;
Ghimj(index,876) = W_130;
Ghimj(index,877) = W_131;
Ghimj(index,878) = W_132;
Ghimj(index,879) = W_133;
Ghimj(index,880) = W_134;
Ghimj(index,881) = W_135;
Ghimj(index,882) = W_136;
Ghimj(index,883) = W_137;
Ghimj(index,884) = W_138;
W_81 = Ghimj(index,885);
W_84 = Ghimj(index,886);
W_92 = Ghimj(index,887);
W_103 = Ghimj(index,888);
W_106 = Ghimj(index,889);
W_107 = Ghimj(index,890);
W_110 = Ghimj(index,891);
W_114 = Ghimj(index,892);
W_120 = Ghimj(index,893);
W_121 = Ghimj(index,894);
W_122 = Ghimj(index,895);
W_124 = Ghimj(index,896);
W_125 = Ghimj(index,897);
W_126 = Ghimj(index,898);
W_127 = Ghimj(index,899);
W_128 = Ghimj(index,900);
W_129 = Ghimj(index,901);
W_130 = Ghimj(index,902);
W_131 = Ghimj(index,903);
W_132 = Ghimj(index,904);
W_133 = Ghimj(index,905);
W_135 = Ghimj(index,906);
W_136 = Ghimj(index,907);
W_137 = Ghimj(index,908);
W_138 = Ghimj(index,909);
a = - W_81/ Ghimj(index,405);
W_81 = -a;
W_114 = W_114+ a *Ghimj(index,406);
W_124 = W_124+ a *Ghimj(index,407);
W_126 = W_126+ a *Ghimj(index,408);
W_127 = W_127+ a *Ghimj(index,409);
W_129 = W_129+ a *Ghimj(index,410);
W_136 = W_136+ a *Ghimj(index,411);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
Ghimj(index,885) = W_81;
Ghimj(index,886) = W_84;
Ghimj(index,887) = W_92;
Ghimj(index,888) = W_103;
Ghimj(index,889) = W_106;
Ghimj(index,890) = W_107;
Ghimj(index,891) = W_110;
Ghimj(index,892) = W_114;
Ghimj(index,893) = W_120;
Ghimj(index,894) = W_121;
Ghimj(index,895) = W_122;
Ghimj(index,896) = W_124;
Ghimj(index,897) = W_125;
Ghimj(index,898) = W_126;
Ghimj(index,899) = W_127;
Ghimj(index,900) = W_128;
Ghimj(index,901) = W_129;
Ghimj(index,902) = W_130;
Ghimj(index,903) = W_131;
Ghimj(index,904) = W_132;
Ghimj(index,905) = W_133;
Ghimj(index,906) = W_135;
Ghimj(index,907) = W_136;
Ghimj(index,908) = W_137;
Ghimj(index,909) = W_138;
W_3 = Ghimj(index,910);
W_53 = Ghimj(index,911);
W_63 = Ghimj(index,912);
W_65 = Ghimj(index,913);
W_74 = Ghimj(index,914);
W_75 = Ghimj(index,915);
W_81 = Ghimj(index,916);
W_86 = Ghimj(index,917);
W_93 = Ghimj(index,918);
W_94 = Ghimj(index,919);
W_98 = Ghimj(index,920);
W_102 = Ghimj(index,921);
W_104 = Ghimj(index,922);
W_106 = Ghimj(index,923);
W_107 = Ghimj(index,924);
W_109 = Ghimj(index,925);
W_113 = Ghimj(index,926);
W_114 = Ghimj(index,927);
W_117 = Ghimj(index,928);
W_119 = Ghimj(index,929);
W_120 = Ghimj(index,930);
W_121 = Ghimj(index,931);
W_122 = Ghimj(index,932);
W_124 = Ghimj(index,933);
W_125 = Ghimj(index,934);
W_126 = Ghimj(index,935);
W_127 = Ghimj(index,936);
W_128 = Ghimj(index,937);
W_129 = Ghimj(index,938);
W_130 = Ghimj(index,939);
W_131 = Ghimj(index,940);
W_132 = Ghimj(index,941);
W_133 = Ghimj(index,942);
W_134 = Ghimj(index,943);
W_135 = Ghimj(index,944);
W_136 = Ghimj(index,945);
W_137 = Ghimj(index,946);
W_138 = Ghimj(index,947);
a = - W_3/ Ghimj(index,3);
W_3 = -a;
a = - W_53/ Ghimj(index,290);
W_53 = -a;
W_126 = W_126+ a *Ghimj(index,291);
a = - W_63/ Ghimj(index,323);
W_63 = -a;
W_121 = W_121+ a *Ghimj(index,324);
W_126 = W_126+ a *Ghimj(index,325);
W_137 = W_137+ a *Ghimj(index,326);
a = - W_65/ Ghimj(index,331);
W_65 = -a;
W_114 = W_114+ a *Ghimj(index,332);
W_126 = W_126+ a *Ghimj(index,333);
W_132 = W_132+ a *Ghimj(index,334);
a = - W_74/ Ghimj(index,368);
W_74 = -a;
W_117 = W_117+ a *Ghimj(index,369);
W_121 = W_121+ a *Ghimj(index,370);
W_125 = W_125+ a *Ghimj(index,371);
W_126 = W_126+ a *Ghimj(index,372);
W_137 = W_137+ a *Ghimj(index,373);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_81/ Ghimj(index,405);
W_81 = -a;
W_114 = W_114+ a *Ghimj(index,406);
W_124 = W_124+ a *Ghimj(index,407);
W_126 = W_126+ a *Ghimj(index,408);
W_127 = W_127+ a *Ghimj(index,409);
W_129 = W_129+ a *Ghimj(index,410);
W_136 = W_136+ a *Ghimj(index,411);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
Ghimj(index,910) = W_3;
Ghimj(index,911) = W_53;
Ghimj(index,912) = W_63;
Ghimj(index,913) = W_65;
Ghimj(index,914) = W_74;
Ghimj(index,915) = W_75;
Ghimj(index,916) = W_81;
Ghimj(index,917) = W_86;
Ghimj(index,918) = W_93;
Ghimj(index,919) = W_94;
Ghimj(index,920) = W_98;
Ghimj(index,921) = W_102;
Ghimj(index,922) = W_104;
Ghimj(index,923) = W_106;
Ghimj(index,924) = W_107;
Ghimj(index,925) = W_109;
Ghimj(index,926) = W_113;
Ghimj(index,927) = W_114;
Ghimj(index,928) = W_117;
Ghimj(index,929) = W_119;
Ghimj(index,930) = W_120;
Ghimj(index,931) = W_121;
Ghimj(index,932) = W_122;
Ghimj(index,933) = W_124;
Ghimj(index,934) = W_125;
Ghimj(index,935) = W_126;
Ghimj(index,936) = W_127;
Ghimj(index,937) = W_128;
Ghimj(index,938) = W_129;
Ghimj(index,939) = W_130;
Ghimj(index,940) = W_131;
Ghimj(index,941) = W_132;
Ghimj(index,942) = W_133;
Ghimj(index,943) = W_134;
Ghimj(index,944) = W_135;
Ghimj(index,945) = W_136;
Ghimj(index,946) = W_137;
Ghimj(index,947) = W_138;
W_40 = Ghimj(index,948);
W_44 = Ghimj(index,949);
W_45 = Ghimj(index,950);
W_47 = Ghimj(index,951);
W_48 = Ghimj(index,952);
W_49 = Ghimj(index,953);
W_52 = Ghimj(index,954);
W_53 = Ghimj(index,955);
W_54 = Ghimj(index,956);
W_55 = Ghimj(index,957);
W_56 = Ghimj(index,958);
W_57 = Ghimj(index,959);
W_58 = Ghimj(index,960);
W_61 = Ghimj(index,961);
W_62 = Ghimj(index,962);
W_63 = Ghimj(index,963);
W_64 = Ghimj(index,964);
W_65 = Ghimj(index,965);
W_66 = Ghimj(index,966);
W_67 = Ghimj(index,967);
W_68 = Ghimj(index,968);
W_69 = Ghimj(index,969);
W_70 = Ghimj(index,970);
W_71 = Ghimj(index,971);
W_72 = Ghimj(index,972);
W_73 = Ghimj(index,973);
W_74 = Ghimj(index,974);
W_75 = Ghimj(index,975);
W_76 = Ghimj(index,976);
W_77 = Ghimj(index,977);
W_78 = Ghimj(index,978);
W_79 = Ghimj(index,979);
W_81 = Ghimj(index,980);
W_82 = Ghimj(index,981);
W_84 = Ghimj(index,982);
W_85 = Ghimj(index,983);
W_86 = Ghimj(index,984);
W_87 = Ghimj(index,985);
W_88 = Ghimj(index,986);
W_89 = Ghimj(index,987);
W_91 = Ghimj(index,988);
W_92 = Ghimj(index,989);
W_93 = Ghimj(index,990);
W_94 = Ghimj(index,991);
W_95 = Ghimj(index,992);
W_96 = Ghimj(index,993);
W_97 = Ghimj(index,994);
W_98 = Ghimj(index,995);
W_99 = Ghimj(index,996);
W_100 = Ghimj(index,997);
W_101 = Ghimj(index,998);
W_102 = Ghimj(index,999);
W_103 = Ghimj(index,1000);
W_104 = Ghimj(index,1001);
W_105 = Ghimj(index,1002);
W_106 = Ghimj(index,1003);
W_107 = Ghimj(index,1004);
W_108 = Ghimj(index,1005);
W_109 = Ghimj(index,1006);
W_110 = Ghimj(index,1007);
W_111 = Ghimj(index,1008);
W_112 = Ghimj(index,1009);
W_113 = Ghimj(index,1010);
W_114 = Ghimj(index,1011);
W_115 = Ghimj(index,1012);
W_116 = Ghimj(index,1013);
W_117 = Ghimj(index,1014);
W_118 = Ghimj(index,1015);
W_119 = Ghimj(index,1016);
W_120 = Ghimj(index,1017);
W_121 = Ghimj(index,1018);
W_122 = Ghimj(index,1019);
W_123 = Ghimj(index,1020);
W_124 = Ghimj(index,1021);
W_125 = Ghimj(index,1022);
W_126 = Ghimj(index,1023);
W_127 = Ghimj(index,1024);
W_128 = Ghimj(index,1025);
W_129 = Ghimj(index,1026);
W_130 = Ghimj(index,1027);
W_131 = Ghimj(index,1028);
W_132 = Ghimj(index,1029);
W_133 = Ghimj(index,1030);
W_134 = Ghimj(index,1031);
W_135 = Ghimj(index,1032);
W_136 = Ghimj(index,1033);
W_137 = Ghimj(index,1034);
W_138 = Ghimj(index,1035);
a = - W_40/ Ghimj(index,260);
W_40 = -a;
W_126 = W_126+ a *Ghimj(index,261);
a = - W_44/ Ghimj(index,268);
W_44 = -a;
W_126 = W_126+ a *Ghimj(index,269);
a = - W_45/ Ghimj(index,270);
W_45 = -a;
W_126 = W_126+ a *Ghimj(index,271);
a = - W_47/ Ghimj(index,276);
W_47 = -a;
W_126 = W_126+ a *Ghimj(index,277);
a = - W_48/ Ghimj(index,278);
W_48 = -a;
W_126 = W_126+ a *Ghimj(index,279);
a = - W_49/ Ghimj(index,280);
W_49 = -a;
W_126 = W_126+ a *Ghimj(index,281);
a = - W_52/ Ghimj(index,288);
W_52 = -a;
W_126 = W_126+ a *Ghimj(index,289);
a = - W_53/ Ghimj(index,290);
W_53 = -a;
W_126 = W_126+ a *Ghimj(index,291);
a = - W_54/ Ghimj(index,292);
W_54 = -a;
W_126 = W_126+ a *Ghimj(index,293);
a = - W_55/ Ghimj(index,294);
W_55 = -a;
W_126 = W_126+ a *Ghimj(index,295);
a = - W_56/ Ghimj(index,296);
W_56 = -a;
W_65 = W_65+ a *Ghimj(index,297);
W_81 = W_81+ a *Ghimj(index,298);
W_126 = W_126+ a *Ghimj(index,299);
a = - W_57/ Ghimj(index,300);
W_57 = -a;
W_120 = W_120+ a *Ghimj(index,301);
W_126 = W_126+ a *Ghimj(index,302);
a = - W_58/ Ghimj(index,303);
W_58 = -a;
W_91 = W_91+ a *Ghimj(index,304);
W_126 = W_126+ a *Ghimj(index,305);
a = - W_61/ Ghimj(index,315);
W_61 = -a;
W_70 = W_70+ a *Ghimj(index,316);
W_87 = W_87+ a *Ghimj(index,317);
W_126 = W_126+ a *Ghimj(index,318);
a = - W_62/ Ghimj(index,319);
W_62 = -a;
W_93 = W_93+ a *Ghimj(index,320);
W_126 = W_126+ a *Ghimj(index,321);
W_133 = W_133+ a *Ghimj(index,322);
a = - W_63/ Ghimj(index,323);
W_63 = -a;
W_121 = W_121+ a *Ghimj(index,324);
W_126 = W_126+ a *Ghimj(index,325);
W_137 = W_137+ a *Ghimj(index,326);
a = - W_64/ Ghimj(index,327);
W_64 = -a;
W_113 = W_113+ a *Ghimj(index,328);
W_126 = W_126+ a *Ghimj(index,329);
W_135 = W_135+ a *Ghimj(index,330);
a = - W_65/ Ghimj(index,331);
W_65 = -a;
W_114 = W_114+ a *Ghimj(index,332);
W_126 = W_126+ a *Ghimj(index,333);
W_132 = W_132+ a *Ghimj(index,334);
a = - W_66/ Ghimj(index,335);
W_66 = -a;
W_109 = W_109+ a *Ghimj(index,336);
W_126 = W_126+ a *Ghimj(index,337);
W_137 = W_137+ a *Ghimj(index,338);
a = - W_67/ Ghimj(index,339);
W_67 = -a;
W_115 = W_115+ a *Ghimj(index,340);
W_126 = W_126+ a *Ghimj(index,341);
W_137 = W_137+ a *Ghimj(index,342);
a = - W_68/ Ghimj(index,343);
W_68 = -a;
W_99 = W_99+ a *Ghimj(index,344);
W_126 = W_126+ a *Ghimj(index,345);
W_137 = W_137+ a *Ghimj(index,346);
a = - W_69/ Ghimj(index,347);
W_69 = -a;
W_93 = W_93+ a *Ghimj(index,348);
W_126 = W_126+ a *Ghimj(index,349);
W_137 = W_137+ a *Ghimj(index,350);
a = - W_70/ Ghimj(index,352);
W_70 = -a;
W_84 = W_84+ a *Ghimj(index,353);
W_87 = W_87+ a *Ghimj(index,354);
W_126 = W_126+ a *Ghimj(index,355);
a = - W_71/ Ghimj(index,356);
W_71 = -a;
W_117 = W_117+ a *Ghimj(index,357);
W_126 = W_126+ a *Ghimj(index,358);
W_137 = W_137+ a *Ghimj(index,359);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_74/ Ghimj(index,368);
W_74 = -a;
W_117 = W_117+ a *Ghimj(index,369);
W_121 = W_121+ a *Ghimj(index,370);
W_125 = W_125+ a *Ghimj(index,371);
W_126 = W_126+ a *Ghimj(index,372);
W_137 = W_137+ a *Ghimj(index,373);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_76/ Ghimj(index,377);
W_76 = -a;
W_87 = W_87+ a *Ghimj(index,378);
W_126 = W_126+ a *Ghimj(index,379);
W_133 = W_133+ a *Ghimj(index,380);
W_135 = W_135+ a *Ghimj(index,381);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_78/ Ghimj(index,386);
W_78 = -a;
W_103 = W_103+ a *Ghimj(index,387);
W_106 = W_106+ a *Ghimj(index,388);
W_107 = W_107+ a *Ghimj(index,389);
W_110 = W_110+ a *Ghimj(index,390);
W_124 = W_124+ a *Ghimj(index,391);
W_126 = W_126+ a *Ghimj(index,392);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_81/ Ghimj(index,405);
W_81 = -a;
W_114 = W_114+ a *Ghimj(index,406);
W_124 = W_124+ a *Ghimj(index,407);
W_126 = W_126+ a *Ghimj(index,408);
W_127 = W_127+ a *Ghimj(index,409);
W_129 = W_129+ a *Ghimj(index,410);
W_136 = W_136+ a *Ghimj(index,411);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
a = - W_85/ Ghimj(index,427);
W_85 = -a;
W_102 = W_102+ a *Ghimj(index,428);
W_111 = W_111+ a *Ghimj(index,429);
W_125 = W_125+ a *Ghimj(index,430);
W_126 = W_126+ a *Ghimj(index,431);
W_133 = W_133+ a *Ghimj(index,432);
W_137 = W_137+ a *Ghimj(index,433);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_89/ Ghimj(index,457);
W_89 = -a;
W_93 = W_93+ a *Ghimj(index,458);
W_94 = W_94+ a *Ghimj(index,459);
W_102 = W_102+ a *Ghimj(index,460);
W_107 = W_107+ a *Ghimj(index,461);
W_109 = W_109+ a *Ghimj(index,462);
W_113 = W_113+ a *Ghimj(index,463);
W_117 = W_117+ a *Ghimj(index,464);
W_124 = W_124+ a *Ghimj(index,465);
W_125 = W_125+ a *Ghimj(index,466);
W_126 = W_126+ a *Ghimj(index,467);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_95/ Ghimj(index,514);
W_95 = -a;
W_96 = W_96+ a *Ghimj(index,515);
W_98 = W_98+ a *Ghimj(index,516);
W_103 = W_103+ a *Ghimj(index,517);
W_106 = W_106+ a *Ghimj(index,518);
W_107 = W_107+ a *Ghimj(index,519);
W_109 = W_109+ a *Ghimj(index,520);
W_110 = W_110+ a *Ghimj(index,521);
W_113 = W_113+ a *Ghimj(index,522);
W_119 = W_119+ a *Ghimj(index,523);
W_121 = W_121+ a *Ghimj(index,524);
W_124 = W_124+ a *Ghimj(index,525);
W_125 = W_125+ a *Ghimj(index,526);
W_126 = W_126+ a *Ghimj(index,527);
W_127 = W_127+ a *Ghimj(index,528);
W_129 = W_129+ a *Ghimj(index,529);
W_130 = W_130+ a *Ghimj(index,530);
W_133 = W_133+ a *Ghimj(index,531);
W_135 = W_135+ a *Ghimj(index,532);
W_136 = W_136+ a *Ghimj(index,533);
W_137 = W_137+ a *Ghimj(index,534);
a = - W_96/ Ghimj(index,538);
W_96 = -a;
W_107 = W_107+ a *Ghimj(index,539);
W_108 = W_108+ a *Ghimj(index,540);
W_109 = W_109+ a *Ghimj(index,541);
W_110 = W_110+ a *Ghimj(index,542);
W_113 = W_113+ a *Ghimj(index,543);
W_124 = W_124+ a *Ghimj(index,544);
W_125 = W_125+ a *Ghimj(index,545);
W_126 = W_126+ a *Ghimj(index,546);
W_133 = W_133+ a *Ghimj(index,547);
W_137 = W_137+ a *Ghimj(index,548);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_100/ Ghimj(index,573);
W_100 = -a;
W_105 = W_105+ a *Ghimj(index,574);
W_112 = W_112+ a *Ghimj(index,575);
W_116 = W_116+ a *Ghimj(index,576);
W_118 = W_118+ a *Ghimj(index,577);
W_123 = W_123+ a *Ghimj(index,578);
W_126 = W_126+ a *Ghimj(index,579);
W_127 = W_127+ a *Ghimj(index,580);
W_129 = W_129+ a *Ghimj(index,581);
W_132 = W_132+ a *Ghimj(index,582);
W_134 = W_134+ a *Ghimj(index,583);
W_138 = W_138+ a *Ghimj(index,584);
a = - W_101/ Ghimj(index,586);
W_101 = -a;
W_105 = W_105+ a *Ghimj(index,587);
W_114 = W_114+ a *Ghimj(index,588);
W_116 = W_116+ a *Ghimj(index,589);
W_119 = W_119+ a *Ghimj(index,590);
W_123 = W_123+ a *Ghimj(index,591);
W_126 = W_126+ a *Ghimj(index,592);
W_128 = W_128+ a *Ghimj(index,593);
W_130 = W_130+ a *Ghimj(index,594);
W_135 = W_135+ a *Ghimj(index,595);
W_136 = W_136+ a *Ghimj(index,596);
W_138 = W_138+ a *Ghimj(index,597);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
Ghimj(index,948) = W_40;
Ghimj(index,949) = W_44;
Ghimj(index,950) = W_45;
Ghimj(index,951) = W_47;
Ghimj(index,952) = W_48;
Ghimj(index,953) = W_49;
Ghimj(index,954) = W_52;
Ghimj(index,955) = W_53;
Ghimj(index,956) = W_54;
Ghimj(index,957) = W_55;
Ghimj(index,958) = W_56;
Ghimj(index,959) = W_57;
Ghimj(index,960) = W_58;
Ghimj(index,961) = W_61;
Ghimj(index,962) = W_62;
Ghimj(index,963) = W_63;
Ghimj(index,964) = W_64;
Ghimj(index,965) = W_65;
Ghimj(index,966) = W_66;
Ghimj(index,967) = W_67;
Ghimj(index,968) = W_68;
Ghimj(index,969) = W_69;
Ghimj(index,970) = W_70;
Ghimj(index,971) = W_71;
Ghimj(index,972) = W_72;
Ghimj(index,973) = W_73;
Ghimj(index,974) = W_74;
Ghimj(index,975) = W_75;
Ghimj(index,976) = W_76;
Ghimj(index,977) = W_77;
Ghimj(index,978) = W_78;
Ghimj(index,979) = W_79;
Ghimj(index,980) = W_81;
Ghimj(index,981) = W_82;
Ghimj(index,982) = W_84;
Ghimj(index,983) = W_85;
Ghimj(index,984) = W_86;
Ghimj(index,985) = W_87;
Ghimj(index,986) = W_88;
Ghimj(index,987) = W_89;
Ghimj(index,988) = W_91;
Ghimj(index,989) = W_92;
Ghimj(index,990) = W_93;
Ghimj(index,991) = W_94;
Ghimj(index,992) = W_95;
Ghimj(index,993) = W_96;
Ghimj(index,994) = W_97;
Ghimj(index,995) = W_98;
Ghimj(index,996) = W_99;
Ghimj(index,997) = W_100;
Ghimj(index,998) = W_101;
Ghimj(index,999) = W_102;
Ghimj(index,1000) = W_103;
Ghimj(index,1001) = W_104;
Ghimj(index,1002) = W_105;
Ghimj(index,1003) = W_106;
Ghimj(index,1004) = W_107;
Ghimj(index,1005) = W_108;
Ghimj(index,1006) = W_109;
Ghimj(index,1007) = W_110;
Ghimj(index,1008) = W_111;
Ghimj(index,1009) = W_112;
Ghimj(index,1010) = W_113;
Ghimj(index,1011) = W_114;
Ghimj(index,1012) = W_115;
Ghimj(index,1013) = W_116;
Ghimj(index,1014) = W_117;
Ghimj(index,1015) = W_118;
Ghimj(index,1016) = W_119;
Ghimj(index,1017) = W_120;
Ghimj(index,1018) = W_121;
Ghimj(index,1019) = W_122;
Ghimj(index,1020) = W_123;
Ghimj(index,1021) = W_124;
Ghimj(index,1022) = W_125;
Ghimj(index,1023) = W_126;
Ghimj(index,1024) = W_127;
Ghimj(index,1025) = W_128;
Ghimj(index,1026) = W_129;
Ghimj(index,1027) = W_130;
Ghimj(index,1028) = W_131;
Ghimj(index,1029) = W_132;
Ghimj(index,1030) = W_133;
Ghimj(index,1031) = W_134;
Ghimj(index,1032) = W_135;
Ghimj(index,1033) = W_136;
Ghimj(index,1034) = W_137;
Ghimj(index,1035) = W_138;
W_1 = Ghimj(index,1036);
W_39 = Ghimj(index,1037);
W_41 = Ghimj(index,1038);
W_42 = Ghimj(index,1039);
W_43 = Ghimj(index,1040);
W_50 = Ghimj(index,1041);
W_52 = Ghimj(index,1042);
W_54 = Ghimj(index,1043);
W_55 = Ghimj(index,1044);
W_57 = Ghimj(index,1045);
W_75 = Ghimj(index,1046);
W_80 = Ghimj(index,1047);
W_83 = Ghimj(index,1048);
W_88 = Ghimj(index,1049);
W_90 = Ghimj(index,1050);
W_97 = Ghimj(index,1051);
W_98 = Ghimj(index,1052);
W_100 = Ghimj(index,1053);
W_103 = Ghimj(index,1054);
W_104 = Ghimj(index,1055);
W_105 = Ghimj(index,1056);
W_106 = Ghimj(index,1057);
W_107 = Ghimj(index,1058);
W_112 = Ghimj(index,1059);
W_114 = Ghimj(index,1060);
W_116 = Ghimj(index,1061);
W_118 = Ghimj(index,1062);
W_119 = Ghimj(index,1063);
W_120 = Ghimj(index,1064);
W_121 = Ghimj(index,1065);
W_122 = Ghimj(index,1066);
W_123 = Ghimj(index,1067);
W_124 = Ghimj(index,1068);
W_125 = Ghimj(index,1069);
W_126 = Ghimj(index,1070);
W_127 = Ghimj(index,1071);
W_128 = Ghimj(index,1072);
W_129 = Ghimj(index,1073);
W_130 = Ghimj(index,1074);
W_131 = Ghimj(index,1075);
W_132 = Ghimj(index,1076);
W_133 = Ghimj(index,1077);
W_134 = Ghimj(index,1078);
W_135 = Ghimj(index,1079);
W_136 = Ghimj(index,1080);
W_137 = Ghimj(index,1081);
W_138 = Ghimj(index,1082);
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_39/ Ghimj(index,258);
W_39 = -a;
W_134 = W_134+ a *Ghimj(index,259);
a = - W_41/ Ghimj(index,262);
W_41 = -a;
W_120 = W_120+ a *Ghimj(index,263);
a = - W_42/ Ghimj(index,264);
W_42 = -a;
W_120 = W_120+ a *Ghimj(index,265);
a = - W_43/ Ghimj(index,266);
W_43 = -a;
W_120 = W_120+ a *Ghimj(index,267);
a = - W_50/ Ghimj(index,282);
W_50 = -a;
W_83 = W_83+ a *Ghimj(index,283);
W_138 = W_138+ a *Ghimj(index,284);
a = - W_52/ Ghimj(index,288);
W_52 = -a;
W_126 = W_126+ a *Ghimj(index,289);
a = - W_54/ Ghimj(index,292);
W_54 = -a;
W_126 = W_126+ a *Ghimj(index,293);
a = - W_55/ Ghimj(index,294);
W_55 = -a;
W_126 = W_126+ a *Ghimj(index,295);
a = - W_57/ Ghimj(index,300);
W_57 = -a;
W_120 = W_120+ a *Ghimj(index,301);
W_126 = W_126+ a *Ghimj(index,302);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_80/ Ghimj(index,397);
W_80 = -a;
W_90 = W_90+ a *Ghimj(index,398);
W_112 = W_112+ a *Ghimj(index,399);
W_116 = W_116+ a *Ghimj(index,400);
W_127 = W_127+ a *Ghimj(index,401);
W_129 = W_129+ a *Ghimj(index,402);
W_134 = W_134+ a *Ghimj(index,403);
W_138 = W_138+ a *Ghimj(index,404);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_90/ Ghimj(index,469);
W_90 = -a;
W_100 = W_100+ a *Ghimj(index,470);
W_105 = W_105+ a *Ghimj(index,471);
W_112 = W_112+ a *Ghimj(index,472);
W_116 = W_116+ a *Ghimj(index,473);
W_118 = W_118+ a *Ghimj(index,474);
W_123 = W_123+ a *Ghimj(index,475);
W_127 = W_127+ a *Ghimj(index,476);
W_129 = W_129+ a *Ghimj(index,477);
W_132 = W_132+ a *Ghimj(index,478);
W_134 = W_134+ a *Ghimj(index,479);
W_138 = W_138+ a *Ghimj(index,480);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_100/ Ghimj(index,573);
W_100 = -a;
W_105 = W_105+ a *Ghimj(index,574);
W_112 = W_112+ a *Ghimj(index,575);
W_116 = W_116+ a *Ghimj(index,576);
W_118 = W_118+ a *Ghimj(index,577);
W_123 = W_123+ a *Ghimj(index,578);
W_126 = W_126+ a *Ghimj(index,579);
W_127 = W_127+ a *Ghimj(index,580);
W_129 = W_129+ a *Ghimj(index,581);
W_132 = W_132+ a *Ghimj(index,582);
W_134 = W_134+ a *Ghimj(index,583);
W_138 = W_138+ a *Ghimj(index,584);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
Ghimj(index,1036) = W_1;
Ghimj(index,1037) = W_39;
Ghimj(index,1038) = W_41;
Ghimj(index,1039) = W_42;
Ghimj(index,1040) = W_43;
Ghimj(index,1041) = W_50;
Ghimj(index,1042) = W_52;
Ghimj(index,1043) = W_54;
Ghimj(index,1044) = W_55;
Ghimj(index,1045) = W_57;
Ghimj(index,1046) = W_75;
Ghimj(index,1047) = W_80;
Ghimj(index,1048) = W_83;
Ghimj(index,1049) = W_88;
Ghimj(index,1050) = W_90;
Ghimj(index,1051) = W_97;
Ghimj(index,1052) = W_98;
Ghimj(index,1053) = W_100;
Ghimj(index,1054) = W_103;
Ghimj(index,1055) = W_104;
Ghimj(index,1056) = W_105;
Ghimj(index,1057) = W_106;
Ghimj(index,1058) = W_107;
Ghimj(index,1059) = W_112;
Ghimj(index,1060) = W_114;
Ghimj(index,1061) = W_116;
Ghimj(index,1062) = W_118;
Ghimj(index,1063) = W_119;
Ghimj(index,1064) = W_120;
Ghimj(index,1065) = W_121;
Ghimj(index,1066) = W_122;
Ghimj(index,1067) = W_123;
Ghimj(index,1068) = W_124;
Ghimj(index,1069) = W_125;
Ghimj(index,1070) = W_126;
Ghimj(index,1071) = W_127;
Ghimj(index,1072) = W_128;
Ghimj(index,1073) = W_129;
Ghimj(index,1074) = W_130;
Ghimj(index,1075) = W_131;
Ghimj(index,1076) = W_132;
Ghimj(index,1077) = W_133;
Ghimj(index,1078) = W_134;
Ghimj(index,1079) = W_135;
Ghimj(index,1080) = W_136;
Ghimj(index,1081) = W_137;
Ghimj(index,1082) = W_138;
W_40 = Ghimj(index,1083);
W_44 = Ghimj(index,1084);
W_45 = Ghimj(index,1085);
W_47 = Ghimj(index,1086);
W_48 = Ghimj(index,1087);
W_49 = Ghimj(index,1088);
W_52 = Ghimj(index,1089);
W_53 = Ghimj(index,1090);
W_54 = Ghimj(index,1091);
W_55 = Ghimj(index,1092);
W_57 = Ghimj(index,1093);
W_61 = Ghimj(index,1094);
W_63 = Ghimj(index,1095);
W_67 = Ghimj(index,1096);
W_70 = Ghimj(index,1097);
W_73 = Ghimj(index,1098);
W_74 = Ghimj(index,1099);
W_75 = Ghimj(index,1100);
W_76 = Ghimj(index,1101);
W_77 = Ghimj(index,1102);
W_78 = Ghimj(index,1103);
W_79 = Ghimj(index,1104);
W_83 = Ghimj(index,1105);
W_84 = Ghimj(index,1106);
W_86 = Ghimj(index,1107);
W_87 = Ghimj(index,1108);
W_88 = Ghimj(index,1109);
W_92 = Ghimj(index,1110);
W_93 = Ghimj(index,1111);
W_97 = Ghimj(index,1112);
W_98 = Ghimj(index,1113);
W_101 = Ghimj(index,1114);
W_102 = Ghimj(index,1115);
W_103 = Ghimj(index,1116);
W_104 = Ghimj(index,1117);
W_105 = Ghimj(index,1118);
W_106 = Ghimj(index,1119);
W_107 = Ghimj(index,1120);
W_110 = Ghimj(index,1121);
W_111 = Ghimj(index,1122);
W_112 = Ghimj(index,1123);
W_114 = Ghimj(index,1124);
W_115 = Ghimj(index,1125);
W_116 = Ghimj(index,1126);
W_117 = Ghimj(index,1127);
W_118 = Ghimj(index,1128);
W_119 = Ghimj(index,1129);
W_120 = Ghimj(index,1130);
W_121 = Ghimj(index,1131);
W_122 = Ghimj(index,1132);
W_123 = Ghimj(index,1133);
W_124 = Ghimj(index,1134);
W_125 = Ghimj(index,1135);
W_126 = Ghimj(index,1136);
W_127 = Ghimj(index,1137);
W_128 = Ghimj(index,1138);
W_129 = Ghimj(index,1139);
W_130 = Ghimj(index,1140);
W_131 = Ghimj(index,1141);
W_132 = Ghimj(index,1142);
W_133 = Ghimj(index,1143);
W_134 = Ghimj(index,1144);
W_135 = Ghimj(index,1145);
W_136 = Ghimj(index,1146);
W_137 = Ghimj(index,1147);
W_138 = Ghimj(index,1148);
a = - W_40/ Ghimj(index,260);
W_40 = -a;
W_126 = W_126+ a *Ghimj(index,261);
a = - W_44/ Ghimj(index,268);
W_44 = -a;
W_126 = W_126+ a *Ghimj(index,269);
a = - W_45/ Ghimj(index,270);
W_45 = -a;
W_126 = W_126+ a *Ghimj(index,271);
a = - W_47/ Ghimj(index,276);
W_47 = -a;
W_126 = W_126+ a *Ghimj(index,277);
a = - W_48/ Ghimj(index,278);
W_48 = -a;
W_126 = W_126+ a *Ghimj(index,279);
a = - W_49/ Ghimj(index,280);
W_49 = -a;
W_126 = W_126+ a *Ghimj(index,281);
a = - W_52/ Ghimj(index,288);
W_52 = -a;
W_126 = W_126+ a *Ghimj(index,289);
a = - W_53/ Ghimj(index,290);
W_53 = -a;
W_126 = W_126+ a *Ghimj(index,291);
a = - W_54/ Ghimj(index,292);
W_54 = -a;
W_126 = W_126+ a *Ghimj(index,293);
a = - W_55/ Ghimj(index,294);
W_55 = -a;
W_126 = W_126+ a *Ghimj(index,295);
a = - W_57/ Ghimj(index,300);
W_57 = -a;
W_120 = W_120+ a *Ghimj(index,301);
W_126 = W_126+ a *Ghimj(index,302);
a = - W_61/ Ghimj(index,315);
W_61 = -a;
W_70 = W_70+ a *Ghimj(index,316);
W_87 = W_87+ a *Ghimj(index,317);
W_126 = W_126+ a *Ghimj(index,318);
a = - W_63/ Ghimj(index,323);
W_63 = -a;
W_121 = W_121+ a *Ghimj(index,324);
W_126 = W_126+ a *Ghimj(index,325);
W_137 = W_137+ a *Ghimj(index,326);
a = - W_67/ Ghimj(index,339);
W_67 = -a;
W_115 = W_115+ a *Ghimj(index,340);
W_126 = W_126+ a *Ghimj(index,341);
W_137 = W_137+ a *Ghimj(index,342);
a = - W_70/ Ghimj(index,352);
W_70 = -a;
W_84 = W_84+ a *Ghimj(index,353);
W_87 = W_87+ a *Ghimj(index,354);
W_126 = W_126+ a *Ghimj(index,355);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_74/ Ghimj(index,368);
W_74 = -a;
W_117 = W_117+ a *Ghimj(index,369);
W_121 = W_121+ a *Ghimj(index,370);
W_125 = W_125+ a *Ghimj(index,371);
W_126 = W_126+ a *Ghimj(index,372);
W_137 = W_137+ a *Ghimj(index,373);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_76/ Ghimj(index,377);
W_76 = -a;
W_87 = W_87+ a *Ghimj(index,378);
W_126 = W_126+ a *Ghimj(index,379);
W_133 = W_133+ a *Ghimj(index,380);
W_135 = W_135+ a *Ghimj(index,381);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_78/ Ghimj(index,386);
W_78 = -a;
W_103 = W_103+ a *Ghimj(index,387);
W_106 = W_106+ a *Ghimj(index,388);
W_107 = W_107+ a *Ghimj(index,389);
W_110 = W_110+ a *Ghimj(index,390);
W_124 = W_124+ a *Ghimj(index,391);
W_126 = W_126+ a *Ghimj(index,392);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
a = - W_86/ Ghimj(index,436);
W_86 = -a;
W_93 = W_93+ a *Ghimj(index,437);
W_125 = W_125+ a *Ghimj(index,438);
W_126 = W_126+ a *Ghimj(index,439);
W_133 = W_133+ a *Ghimj(index,440);
W_137 = W_137+ a *Ghimj(index,441);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_101/ Ghimj(index,586);
W_101 = -a;
W_105 = W_105+ a *Ghimj(index,587);
W_114 = W_114+ a *Ghimj(index,588);
W_116 = W_116+ a *Ghimj(index,589);
W_119 = W_119+ a *Ghimj(index,590);
W_123 = W_123+ a *Ghimj(index,591);
W_126 = W_126+ a *Ghimj(index,592);
W_128 = W_128+ a *Ghimj(index,593);
W_130 = W_130+ a *Ghimj(index,594);
W_135 = W_135+ a *Ghimj(index,595);
W_136 = W_136+ a *Ghimj(index,596);
W_138 = W_138+ a *Ghimj(index,597);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
Ghimj(index,1083) = W_40;
Ghimj(index,1084) = W_44;
Ghimj(index,1085) = W_45;
Ghimj(index,1086) = W_47;
Ghimj(index,1087) = W_48;
Ghimj(index,1088) = W_49;
Ghimj(index,1089) = W_52;
Ghimj(index,1090) = W_53;
Ghimj(index,1091) = W_54;
Ghimj(index,1092) = W_55;
Ghimj(index,1093) = W_57;
Ghimj(index,1094) = W_61;
Ghimj(index,1095) = W_63;
Ghimj(index,1096) = W_67;
Ghimj(index,1097) = W_70;
Ghimj(index,1098) = W_73;
Ghimj(index,1099) = W_74;
Ghimj(index,1100) = W_75;
Ghimj(index,1101) = W_76;
Ghimj(index,1102) = W_77;
Ghimj(index,1103) = W_78;
Ghimj(index,1104) = W_79;
Ghimj(index,1105) = W_83;
Ghimj(index,1106) = W_84;
Ghimj(index,1107) = W_86;
Ghimj(index,1108) = W_87;
Ghimj(index,1109) = W_88;
Ghimj(index,1110) = W_92;
Ghimj(index,1111) = W_93;
Ghimj(index,1112) = W_97;
Ghimj(index,1113) = W_98;
Ghimj(index,1114) = W_101;
Ghimj(index,1115) = W_102;
Ghimj(index,1116) = W_103;
Ghimj(index,1117) = W_104;
Ghimj(index,1118) = W_105;
Ghimj(index,1119) = W_106;
Ghimj(index,1120) = W_107;
Ghimj(index,1121) = W_110;
Ghimj(index,1122) = W_111;
Ghimj(index,1123) = W_112;
Ghimj(index,1124) = W_114;
Ghimj(index,1125) = W_115;
Ghimj(index,1126) = W_116;
Ghimj(index,1127) = W_117;
Ghimj(index,1128) = W_118;
Ghimj(index,1129) = W_119;
Ghimj(index,1130) = W_120;
Ghimj(index,1131) = W_121;
Ghimj(index,1132) = W_122;
Ghimj(index,1133) = W_123;
Ghimj(index,1134) = W_124;
Ghimj(index,1135) = W_125;
Ghimj(index,1136) = W_126;
Ghimj(index,1137) = W_127;
Ghimj(index,1138) = W_128;
Ghimj(index,1139) = W_129;
Ghimj(index,1140) = W_130;
Ghimj(index,1141) = W_131;
Ghimj(index,1142) = W_132;
Ghimj(index,1143) = W_133;
Ghimj(index,1144) = W_134;
Ghimj(index,1145) = W_135;
Ghimj(index,1146) = W_136;
Ghimj(index,1147) = W_137;
Ghimj(index,1148) = W_138;
W_0 = Ghimj(index,1149);
W_1 = Ghimj(index,1150);
W_2 = Ghimj(index,1151);
W_44 = Ghimj(index,1152);
W_45 = Ghimj(index,1153);
W_52 = Ghimj(index,1154);
W_53 = Ghimj(index,1155);
W_54 = Ghimj(index,1156);
W_55 = Ghimj(index,1157);
W_80 = Ghimj(index,1158);
W_90 = Ghimj(index,1159);
W_100 = Ghimj(index,1160);
W_103 = Ghimj(index,1161);
W_104 = Ghimj(index,1162);
W_105 = Ghimj(index,1163);
W_112 = Ghimj(index,1164);
W_114 = Ghimj(index,1165);
W_116 = Ghimj(index,1166);
W_118 = Ghimj(index,1167);
W_119 = Ghimj(index,1168);
W_121 = Ghimj(index,1169);
W_123 = Ghimj(index,1170);
W_124 = Ghimj(index,1171);
W_125 = Ghimj(index,1172);
W_126 = Ghimj(index,1173);
W_127 = Ghimj(index,1174);
W_128 = Ghimj(index,1175);
W_129 = Ghimj(index,1176);
W_130 = Ghimj(index,1177);
W_131 = Ghimj(index,1178);
W_132 = Ghimj(index,1179);
W_133 = Ghimj(index,1180);
W_134 = Ghimj(index,1181);
W_135 = Ghimj(index,1182);
W_136 = Ghimj(index,1183);
W_137 = Ghimj(index,1184);
W_138 = Ghimj(index,1185);
a = - W_0/ Ghimj(index,0);
W_0 = -a;
a = - W_1/ Ghimj(index,1);
W_1 = -a;
a = - W_2/ Ghimj(index,2);
W_2 = -a;
a = - W_44/ Ghimj(index,268);
W_44 = -a;
W_126 = W_126+ a *Ghimj(index,269);
a = - W_45/ Ghimj(index,270);
W_45 = -a;
W_126 = W_126+ a *Ghimj(index,271);
a = - W_52/ Ghimj(index,288);
W_52 = -a;
W_126 = W_126+ a *Ghimj(index,289);
a = - W_53/ Ghimj(index,290);
W_53 = -a;
W_126 = W_126+ a *Ghimj(index,291);
a = - W_54/ Ghimj(index,292);
W_54 = -a;
W_126 = W_126+ a *Ghimj(index,293);
a = - W_55/ Ghimj(index,294);
W_55 = -a;
W_126 = W_126+ a *Ghimj(index,295);
a = - W_80/ Ghimj(index,397);
W_80 = -a;
W_90 = W_90+ a *Ghimj(index,398);
W_112 = W_112+ a *Ghimj(index,399);
W_116 = W_116+ a *Ghimj(index,400);
W_127 = W_127+ a *Ghimj(index,401);
W_129 = W_129+ a *Ghimj(index,402);
W_134 = W_134+ a *Ghimj(index,403);
W_138 = W_138+ a *Ghimj(index,404);
a = - W_90/ Ghimj(index,469);
W_90 = -a;
W_100 = W_100+ a *Ghimj(index,470);
W_105 = W_105+ a *Ghimj(index,471);
W_112 = W_112+ a *Ghimj(index,472);
W_116 = W_116+ a *Ghimj(index,473);
W_118 = W_118+ a *Ghimj(index,474);
W_123 = W_123+ a *Ghimj(index,475);
W_127 = W_127+ a *Ghimj(index,476);
W_129 = W_129+ a *Ghimj(index,477);
W_132 = W_132+ a *Ghimj(index,478);
W_134 = W_134+ a *Ghimj(index,479);
W_138 = W_138+ a *Ghimj(index,480);
a = - W_100/ Ghimj(index,573);
W_100 = -a;
W_105 = W_105+ a *Ghimj(index,574);
W_112 = W_112+ a *Ghimj(index,575);
W_116 = W_116+ a *Ghimj(index,576);
W_118 = W_118+ a *Ghimj(index,577);
W_123 = W_123+ a *Ghimj(index,578);
W_126 = W_126+ a *Ghimj(index,579);
W_127 = W_127+ a *Ghimj(index,580);
W_129 = W_129+ a *Ghimj(index,581);
W_132 = W_132+ a *Ghimj(index,582);
W_134 = W_134+ a *Ghimj(index,583);
W_138 = W_138+ a *Ghimj(index,584);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
Ghimj(index,1149) = W_0;
Ghimj(index,1150) = W_1;
Ghimj(index,1151) = W_2;
Ghimj(index,1152) = W_44;
Ghimj(index,1153) = W_45;
Ghimj(index,1154) = W_52;
Ghimj(index,1155) = W_53;
Ghimj(index,1156) = W_54;
Ghimj(index,1157) = W_55;
Ghimj(index,1158) = W_80;
Ghimj(index,1159) = W_90;
Ghimj(index,1160) = W_100;
Ghimj(index,1161) = W_103;
Ghimj(index,1162) = W_104;
Ghimj(index,1163) = W_105;
Ghimj(index,1164) = W_112;
Ghimj(index,1165) = W_114;
Ghimj(index,1166) = W_116;
Ghimj(index,1167) = W_118;
Ghimj(index,1168) = W_119;
Ghimj(index,1169) = W_121;
Ghimj(index,1170) = W_123;
Ghimj(index,1171) = W_124;
Ghimj(index,1172) = W_125;
Ghimj(index,1173) = W_126;
Ghimj(index,1174) = W_127;
Ghimj(index,1175) = W_128;
Ghimj(index,1176) = W_129;
Ghimj(index,1177) = W_130;
Ghimj(index,1178) = W_131;
Ghimj(index,1179) = W_132;
Ghimj(index,1180) = W_133;
Ghimj(index,1181) = W_134;
Ghimj(index,1182) = W_135;
Ghimj(index,1183) = W_136;
Ghimj(index,1184) = W_137;
Ghimj(index,1185) = W_138;
W_58 = Ghimj(index,1186);
W_65 = Ghimj(index,1187);
W_66 = Ghimj(index,1188);
W_72 = Ghimj(index,1189);
W_77 = Ghimj(index,1190);
W_82 = Ghimj(index,1191);
W_89 = Ghimj(index,1192);
W_91 = Ghimj(index,1193);
W_93 = Ghimj(index,1194);
W_94 = Ghimj(index,1195);
W_98 = Ghimj(index,1196);
W_102 = Ghimj(index,1197);
W_103 = Ghimj(index,1198);
W_104 = Ghimj(index,1199);
W_106 = Ghimj(index,1200);
W_107 = Ghimj(index,1201);
W_108 = Ghimj(index,1202);
W_109 = Ghimj(index,1203);
W_110 = Ghimj(index,1204);
W_113 = Ghimj(index,1205);
W_114 = Ghimj(index,1206);
W_115 = Ghimj(index,1207);
W_117 = Ghimj(index,1208);
W_120 = Ghimj(index,1209);
W_121 = Ghimj(index,1210);
W_122 = Ghimj(index,1211);
W_124 = Ghimj(index,1212);
W_125 = Ghimj(index,1213);
W_126 = Ghimj(index,1214);
W_127 = Ghimj(index,1215);
W_128 = Ghimj(index,1216);
W_129 = Ghimj(index,1217);
W_130 = Ghimj(index,1218);
W_131 = Ghimj(index,1219);
W_132 = Ghimj(index,1220);
W_133 = Ghimj(index,1221);
W_134 = Ghimj(index,1222);
W_135 = Ghimj(index,1223);
W_136 = Ghimj(index,1224);
W_137 = Ghimj(index,1225);
W_138 = Ghimj(index,1226);
a = - W_58/ Ghimj(index,303);
W_58 = -a;
W_91 = W_91+ a *Ghimj(index,304);
W_126 = W_126+ a *Ghimj(index,305);
a = - W_65/ Ghimj(index,331);
W_65 = -a;
W_114 = W_114+ a *Ghimj(index,332);
W_126 = W_126+ a *Ghimj(index,333);
W_132 = W_132+ a *Ghimj(index,334);
a = - W_66/ Ghimj(index,335);
W_66 = -a;
W_109 = W_109+ a *Ghimj(index,336);
W_126 = W_126+ a *Ghimj(index,337);
W_137 = W_137+ a *Ghimj(index,338);
a = - W_72/ Ghimj(index,360);
W_72 = -a;
W_94 = W_94+ a *Ghimj(index,361);
W_126 = W_126+ a *Ghimj(index,362);
W_137 = W_137+ a *Ghimj(index,363);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_89/ Ghimj(index,457);
W_89 = -a;
W_93 = W_93+ a *Ghimj(index,458);
W_94 = W_94+ a *Ghimj(index,459);
W_102 = W_102+ a *Ghimj(index,460);
W_107 = W_107+ a *Ghimj(index,461);
W_109 = W_109+ a *Ghimj(index,462);
W_113 = W_113+ a *Ghimj(index,463);
W_117 = W_117+ a *Ghimj(index,464);
W_124 = W_124+ a *Ghimj(index,465);
W_125 = W_125+ a *Ghimj(index,466);
W_126 = W_126+ a *Ghimj(index,467);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
Ghimj(index,1186) = W_58;
Ghimj(index,1187) = W_65;
Ghimj(index,1188) = W_66;
Ghimj(index,1189) = W_72;
Ghimj(index,1190) = W_77;
Ghimj(index,1191) = W_82;
Ghimj(index,1192) = W_89;
Ghimj(index,1193) = W_91;
Ghimj(index,1194) = W_93;
Ghimj(index,1195) = W_94;
Ghimj(index,1196) = W_98;
Ghimj(index,1197) = W_102;
Ghimj(index,1198) = W_103;
Ghimj(index,1199) = W_104;
Ghimj(index,1200) = W_106;
Ghimj(index,1201) = W_107;
Ghimj(index,1202) = W_108;
Ghimj(index,1203) = W_109;
Ghimj(index,1204) = W_110;
Ghimj(index,1205) = W_113;
Ghimj(index,1206) = W_114;
Ghimj(index,1207) = W_115;
Ghimj(index,1208) = W_117;
Ghimj(index,1209) = W_120;
Ghimj(index,1210) = W_121;
Ghimj(index,1211) = W_122;
Ghimj(index,1212) = W_124;
Ghimj(index,1213) = W_125;
Ghimj(index,1214) = W_126;
Ghimj(index,1215) = W_127;
Ghimj(index,1216) = W_128;
Ghimj(index,1217) = W_129;
Ghimj(index,1218) = W_130;
Ghimj(index,1219) = W_131;
Ghimj(index,1220) = W_132;
Ghimj(index,1221) = W_133;
Ghimj(index,1222) = W_134;
Ghimj(index,1223) = W_135;
Ghimj(index,1224) = W_136;
Ghimj(index,1225) = W_137;
Ghimj(index,1226) = W_138;
W_51 = Ghimj(index,1227);
W_59 = Ghimj(index,1228);
W_75 = Ghimj(index,1229);
W_116 = Ghimj(index,1230);
W_118 = Ghimj(index,1231);
W_120 = Ghimj(index,1232);
W_122 = Ghimj(index,1233);
W_123 = Ghimj(index,1234);
W_124 = Ghimj(index,1235);
W_125 = Ghimj(index,1236);
W_126 = Ghimj(index,1237);
W_127 = Ghimj(index,1238);
W_128 = Ghimj(index,1239);
W_129 = Ghimj(index,1240);
W_130 = Ghimj(index,1241);
W_131 = Ghimj(index,1242);
W_132 = Ghimj(index,1243);
W_133 = Ghimj(index,1244);
W_134 = Ghimj(index,1245);
W_135 = Ghimj(index,1246);
W_136 = Ghimj(index,1247);
W_137 = Ghimj(index,1248);
W_138 = Ghimj(index,1249);
a = - W_51/ Ghimj(index,285);
W_51 = -a;
W_132 = W_132+ a *Ghimj(index,286);
W_134 = W_134+ a *Ghimj(index,287);
a = - W_59/ Ghimj(index,306);
W_59 = -a;
W_133 = W_133+ a *Ghimj(index,307);
W_135 = W_135+ a *Ghimj(index,308);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
Ghimj(index,1227) = W_51;
Ghimj(index,1228) = W_59;
Ghimj(index,1229) = W_75;
Ghimj(index,1230) = W_116;
Ghimj(index,1231) = W_118;
Ghimj(index,1232) = W_120;
Ghimj(index,1233) = W_122;
Ghimj(index,1234) = W_123;
Ghimj(index,1235) = W_124;
Ghimj(index,1236) = W_125;
Ghimj(index,1237) = W_126;
Ghimj(index,1238) = W_127;
Ghimj(index,1239) = W_128;
Ghimj(index,1240) = W_129;
Ghimj(index,1241) = W_130;
Ghimj(index,1242) = W_131;
Ghimj(index,1243) = W_132;
Ghimj(index,1244) = W_133;
Ghimj(index,1245) = W_134;
Ghimj(index,1246) = W_135;
Ghimj(index,1247) = W_136;
Ghimj(index,1248) = W_137;
Ghimj(index,1249) = W_138;
W_105 = Ghimj(index,1250);
W_114 = Ghimj(index,1251);
W_118 = Ghimj(index,1252);
W_123 = Ghimj(index,1253);
W_124 = Ghimj(index,1254);
W_125 = Ghimj(index,1255);
W_126 = Ghimj(index,1256);
W_127 = Ghimj(index,1257);
W_128 = Ghimj(index,1258);
W_129 = Ghimj(index,1259);
W_130 = Ghimj(index,1260);
W_131 = Ghimj(index,1261);
W_132 = Ghimj(index,1262);
W_133 = Ghimj(index,1263);
W_134 = Ghimj(index,1264);
W_135 = Ghimj(index,1265);
W_136 = Ghimj(index,1266);
W_137 = Ghimj(index,1267);
W_138 = Ghimj(index,1268);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
Ghimj(index,1250) = W_105;
Ghimj(index,1251) = W_114;
Ghimj(index,1252) = W_118;
Ghimj(index,1253) = W_123;
Ghimj(index,1254) = W_124;
Ghimj(index,1255) = W_125;
Ghimj(index,1256) = W_126;
Ghimj(index,1257) = W_127;
Ghimj(index,1258) = W_128;
Ghimj(index,1259) = W_129;
Ghimj(index,1260) = W_130;
Ghimj(index,1261) = W_131;
Ghimj(index,1262) = W_132;
Ghimj(index,1263) = W_133;
Ghimj(index,1264) = W_134;
Ghimj(index,1265) = W_135;
Ghimj(index,1266) = W_136;
Ghimj(index,1267) = W_137;
Ghimj(index,1268) = W_138;
W_59 = Ghimj(index,1269);
W_60 = Ghimj(index,1270);
W_70 = Ghimj(index,1271);
W_76 = Ghimj(index,1272);
W_84 = Ghimj(index,1273);
W_87 = Ghimj(index,1274);
W_92 = Ghimj(index,1275);
W_93 = Ghimj(index,1276);
W_94 = Ghimj(index,1277);
W_99 = Ghimj(index,1278);
W_102 = Ghimj(index,1279);
W_109 = Ghimj(index,1280);
W_111 = Ghimj(index,1281);
W_113 = Ghimj(index,1282);
W_115 = Ghimj(index,1283);
W_117 = Ghimj(index,1284);
W_120 = Ghimj(index,1285);
W_121 = Ghimj(index,1286);
W_122 = Ghimj(index,1287);
W_124 = Ghimj(index,1288);
W_125 = Ghimj(index,1289);
W_126 = Ghimj(index,1290);
W_127 = Ghimj(index,1291);
W_128 = Ghimj(index,1292);
W_129 = Ghimj(index,1293);
W_130 = Ghimj(index,1294);
W_131 = Ghimj(index,1295);
W_132 = Ghimj(index,1296);
W_133 = Ghimj(index,1297);
W_134 = Ghimj(index,1298);
W_135 = Ghimj(index,1299);
W_136 = Ghimj(index,1300);
W_137 = Ghimj(index,1301);
W_138 = Ghimj(index,1302);
a = - W_59/ Ghimj(index,306);
W_59 = -a;
W_133 = W_133+ a *Ghimj(index,307);
W_135 = W_135+ a *Ghimj(index,308);
a = - W_60/ Ghimj(index,310);
W_60 = -a;
W_92 = W_92+ a *Ghimj(index,311);
W_120 = W_120+ a *Ghimj(index,312);
W_133 = W_133+ a *Ghimj(index,313);
W_135 = W_135+ a *Ghimj(index,314);
a = - W_70/ Ghimj(index,352);
W_70 = -a;
W_84 = W_84+ a *Ghimj(index,353);
W_87 = W_87+ a *Ghimj(index,354);
W_126 = W_126+ a *Ghimj(index,355);
a = - W_76/ Ghimj(index,377);
W_76 = -a;
W_87 = W_87+ a *Ghimj(index,378);
W_126 = W_126+ a *Ghimj(index,379);
W_133 = W_133+ a *Ghimj(index,380);
W_135 = W_135+ a *Ghimj(index,381);
a = - W_84/ Ghimj(index,421);
W_84 = -a;
W_92 = W_92+ a *Ghimj(index,422);
W_124 = W_124+ a *Ghimj(index,423);
W_135 = W_135+ a *Ghimj(index,424);
W_137 = W_137+ a *Ghimj(index,425);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
Ghimj(index,1269) = W_59;
Ghimj(index,1270) = W_60;
Ghimj(index,1271) = W_70;
Ghimj(index,1272) = W_76;
Ghimj(index,1273) = W_84;
Ghimj(index,1274) = W_87;
Ghimj(index,1275) = W_92;
Ghimj(index,1276) = W_93;
Ghimj(index,1277) = W_94;
Ghimj(index,1278) = W_99;
Ghimj(index,1279) = W_102;
Ghimj(index,1280) = W_109;
Ghimj(index,1281) = W_111;
Ghimj(index,1282) = W_113;
Ghimj(index,1283) = W_115;
Ghimj(index,1284) = W_117;
Ghimj(index,1285) = W_120;
Ghimj(index,1286) = W_121;
Ghimj(index,1287) = W_122;
Ghimj(index,1288) = W_124;
Ghimj(index,1289) = W_125;
Ghimj(index,1290) = W_126;
Ghimj(index,1291) = W_127;
Ghimj(index,1292) = W_128;
Ghimj(index,1293) = W_129;
Ghimj(index,1294) = W_130;
Ghimj(index,1295) = W_131;
Ghimj(index,1296) = W_132;
Ghimj(index,1297) = W_133;
Ghimj(index,1298) = W_134;
Ghimj(index,1299) = W_135;
Ghimj(index,1300) = W_136;
Ghimj(index,1301) = W_137;
Ghimj(index,1302) = W_138;
W_39 = Ghimj(index,1303);
W_41 = Ghimj(index,1304);
W_42 = Ghimj(index,1305);
W_43 = Ghimj(index,1306);
W_51 = Ghimj(index,1307);
W_75 = Ghimj(index,1308);
W_112 = Ghimj(index,1309);
W_116 = Ghimj(index,1310);
W_120 = Ghimj(index,1311);
W_122 = Ghimj(index,1312);
W_123 = Ghimj(index,1313);
W_124 = Ghimj(index,1314);
W_125 = Ghimj(index,1315);
W_126 = Ghimj(index,1316);
W_127 = Ghimj(index,1317);
W_128 = Ghimj(index,1318);
W_129 = Ghimj(index,1319);
W_130 = Ghimj(index,1320);
W_131 = Ghimj(index,1321);
W_132 = Ghimj(index,1322);
W_133 = Ghimj(index,1323);
W_134 = Ghimj(index,1324);
W_135 = Ghimj(index,1325);
W_136 = Ghimj(index,1326);
W_137 = Ghimj(index,1327);
W_138 = Ghimj(index,1328);
a = - W_39/ Ghimj(index,258);
W_39 = -a;
W_134 = W_134+ a *Ghimj(index,259);
a = - W_41/ Ghimj(index,262);
W_41 = -a;
W_120 = W_120+ a *Ghimj(index,263);
a = - W_42/ Ghimj(index,264);
W_42 = -a;
W_120 = W_120+ a *Ghimj(index,265);
a = - W_43/ Ghimj(index,266);
W_43 = -a;
W_120 = W_120+ a *Ghimj(index,267);
a = - W_51/ Ghimj(index,285);
W_51 = -a;
W_132 = W_132+ a *Ghimj(index,286);
W_134 = W_134+ a *Ghimj(index,287);
a = - W_75/ Ghimj(index,374);
W_75 = -a;
W_120 = W_120+ a *Ghimj(index,375);
W_126 = W_126+ a *Ghimj(index,376);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
Ghimj(index,1303) = W_39;
Ghimj(index,1304) = W_41;
Ghimj(index,1305) = W_42;
Ghimj(index,1306) = W_43;
Ghimj(index,1307) = W_51;
Ghimj(index,1308) = W_75;
Ghimj(index,1309) = W_112;
Ghimj(index,1310) = W_116;
Ghimj(index,1311) = W_120;
Ghimj(index,1312) = W_122;
Ghimj(index,1313) = W_123;
Ghimj(index,1314) = W_124;
Ghimj(index,1315) = W_125;
Ghimj(index,1316) = W_126;
Ghimj(index,1317) = W_127;
Ghimj(index,1318) = W_128;
Ghimj(index,1319) = W_129;
Ghimj(index,1320) = W_130;
Ghimj(index,1321) = W_131;
Ghimj(index,1322) = W_132;
Ghimj(index,1323) = W_133;
Ghimj(index,1324) = W_134;
Ghimj(index,1325) = W_135;
Ghimj(index,1326) = W_136;
Ghimj(index,1327) = W_137;
Ghimj(index,1328) = W_138;
W_0 = Ghimj(index,1329);
W_50 = Ghimj(index,1330);
W_58 = Ghimj(index,1331);
W_59 = Ghimj(index,1332);
W_62 = Ghimj(index,1333);
W_64 = Ghimj(index,1334);
W_73 = Ghimj(index,1335);
W_76 = Ghimj(index,1336);
W_77 = Ghimj(index,1337);
W_83 = Ghimj(index,1338);
W_87 = Ghimj(index,1339);
W_91 = Ghimj(index,1340);
W_92 = Ghimj(index,1341);
W_93 = Ghimj(index,1342);
W_94 = Ghimj(index,1343);
W_99 = Ghimj(index,1344);
W_101 = Ghimj(index,1345);
W_102 = Ghimj(index,1346);
W_105 = Ghimj(index,1347);
W_106 = Ghimj(index,1348);
W_109 = Ghimj(index,1349);
W_111 = Ghimj(index,1350);
W_113 = Ghimj(index,1351);
W_114 = Ghimj(index,1352);
W_115 = Ghimj(index,1353);
W_116 = Ghimj(index,1354);
W_117 = Ghimj(index,1355);
W_119 = Ghimj(index,1356);
W_121 = Ghimj(index,1357);
W_123 = Ghimj(index,1358);
W_124 = Ghimj(index,1359);
W_125 = Ghimj(index,1360);
W_126 = Ghimj(index,1361);
W_127 = Ghimj(index,1362);
W_128 = Ghimj(index,1363);
W_129 = Ghimj(index,1364);
W_130 = Ghimj(index,1365);
W_131 = Ghimj(index,1366);
W_132 = Ghimj(index,1367);
W_133 = Ghimj(index,1368);
W_134 = Ghimj(index,1369);
W_135 = Ghimj(index,1370);
W_136 = Ghimj(index,1371);
W_137 = Ghimj(index,1372);
W_138 = Ghimj(index,1373);
a = - W_0/ Ghimj(index,0);
W_0 = -a;
a = - W_50/ Ghimj(index,282);
W_50 = -a;
W_83 = W_83+ a *Ghimj(index,283);
W_138 = W_138+ a *Ghimj(index,284);
a = - W_58/ Ghimj(index,303);
W_58 = -a;
W_91 = W_91+ a *Ghimj(index,304);
W_126 = W_126+ a *Ghimj(index,305);
a = - W_59/ Ghimj(index,306);
W_59 = -a;
W_133 = W_133+ a *Ghimj(index,307);
W_135 = W_135+ a *Ghimj(index,308);
a = - W_62/ Ghimj(index,319);
W_62 = -a;
W_93 = W_93+ a *Ghimj(index,320);
W_126 = W_126+ a *Ghimj(index,321);
W_133 = W_133+ a *Ghimj(index,322);
a = - W_64/ Ghimj(index,327);
W_64 = -a;
W_113 = W_113+ a *Ghimj(index,328);
W_126 = W_126+ a *Ghimj(index,329);
W_135 = W_135+ a *Ghimj(index,330);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_76/ Ghimj(index,377);
W_76 = -a;
W_87 = W_87+ a *Ghimj(index,378);
W_126 = W_126+ a *Ghimj(index,379);
W_133 = W_133+ a *Ghimj(index,380);
W_135 = W_135+ a *Ghimj(index,381);
a = - W_77/ Ghimj(index,382);
W_77 = -a;
W_121 = W_121+ a *Ghimj(index,383);
W_126 = W_126+ a *Ghimj(index,384);
W_135 = W_135+ a *Ghimj(index,385);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_101/ Ghimj(index,586);
W_101 = -a;
W_105 = W_105+ a *Ghimj(index,587);
W_114 = W_114+ a *Ghimj(index,588);
W_116 = W_116+ a *Ghimj(index,589);
W_119 = W_119+ a *Ghimj(index,590);
W_123 = W_123+ a *Ghimj(index,591);
W_126 = W_126+ a *Ghimj(index,592);
W_128 = W_128+ a *Ghimj(index,593);
W_130 = W_130+ a *Ghimj(index,594);
W_135 = W_135+ a *Ghimj(index,595);
W_136 = W_136+ a *Ghimj(index,596);
W_138 = W_138+ a *Ghimj(index,597);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
a = - W_134/ Ghimj(index,1324);
W_134 = -a;
W_135 = W_135+ a *Ghimj(index,1325);
W_136 = W_136+ a *Ghimj(index,1326);
W_137 = W_137+ a *Ghimj(index,1327);
W_138 = W_138+ a *Ghimj(index,1328);
Ghimj(index,1329) = W_0;
Ghimj(index,1330) = W_50;
Ghimj(index,1331) = W_58;
Ghimj(index,1332) = W_59;
Ghimj(index,1333) = W_62;
Ghimj(index,1334) = W_64;
Ghimj(index,1335) = W_73;
Ghimj(index,1336) = W_76;
Ghimj(index,1337) = W_77;
Ghimj(index,1338) = W_83;
Ghimj(index,1339) = W_87;
Ghimj(index,1340) = W_91;
Ghimj(index,1341) = W_92;
Ghimj(index,1342) = W_93;
Ghimj(index,1343) = W_94;
Ghimj(index,1344) = W_99;
Ghimj(index,1345) = W_101;
Ghimj(index,1346) = W_102;
Ghimj(index,1347) = W_105;
Ghimj(index,1348) = W_106;
Ghimj(index,1349) = W_109;
Ghimj(index,1350) = W_111;
Ghimj(index,1351) = W_113;
Ghimj(index,1352) = W_114;
Ghimj(index,1353) = W_115;
Ghimj(index,1354) = W_116;
Ghimj(index,1355) = W_117;
Ghimj(index,1356) = W_119;
Ghimj(index,1357) = W_121;
Ghimj(index,1358) = W_123;
Ghimj(index,1359) = W_124;
Ghimj(index,1360) = W_125;
Ghimj(index,1361) = W_126;
Ghimj(index,1362) = W_127;
Ghimj(index,1363) = W_128;
Ghimj(index,1364) = W_129;
Ghimj(index,1365) = W_130;
Ghimj(index,1366) = W_131;
Ghimj(index,1367) = W_132;
Ghimj(index,1368) = W_133;
Ghimj(index,1369) = W_134;
Ghimj(index,1370) = W_135;
Ghimj(index,1371) = W_136;
Ghimj(index,1372) = W_137;
Ghimj(index,1373) = W_138;
W_73 = Ghimj(index,1374);
W_83 = Ghimj(index,1375);
W_101 = Ghimj(index,1376);
W_105 = Ghimj(index,1377);
W_106 = Ghimj(index,1378);
W_107 = Ghimj(index,1379);
W_114 = Ghimj(index,1380);
W_116 = Ghimj(index,1381);
W_117 = Ghimj(index,1382);
W_119 = Ghimj(index,1383);
W_121 = Ghimj(index,1384);
W_123 = Ghimj(index,1385);
W_124 = Ghimj(index,1386);
W_125 = Ghimj(index,1387);
W_126 = Ghimj(index,1388);
W_127 = Ghimj(index,1389);
W_128 = Ghimj(index,1390);
W_129 = Ghimj(index,1391);
W_130 = Ghimj(index,1392);
W_131 = Ghimj(index,1393);
W_132 = Ghimj(index,1394);
W_133 = Ghimj(index,1395);
W_134 = Ghimj(index,1396);
W_135 = Ghimj(index,1397);
W_136 = Ghimj(index,1398);
W_137 = Ghimj(index,1399);
W_138 = Ghimj(index,1400);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_101/ Ghimj(index,586);
W_101 = -a;
W_105 = W_105+ a *Ghimj(index,587);
W_114 = W_114+ a *Ghimj(index,588);
W_116 = W_116+ a *Ghimj(index,589);
W_119 = W_119+ a *Ghimj(index,590);
W_123 = W_123+ a *Ghimj(index,591);
W_126 = W_126+ a *Ghimj(index,592);
W_128 = W_128+ a *Ghimj(index,593);
W_130 = W_130+ a *Ghimj(index,594);
W_135 = W_135+ a *Ghimj(index,595);
W_136 = W_136+ a *Ghimj(index,596);
W_138 = W_138+ a *Ghimj(index,597);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
a = - W_134/ Ghimj(index,1324);
W_134 = -a;
W_135 = W_135+ a *Ghimj(index,1325);
W_136 = W_136+ a *Ghimj(index,1326);
W_137 = W_137+ a *Ghimj(index,1327);
W_138 = W_138+ a *Ghimj(index,1328);
a = - W_135/ Ghimj(index,1370);
W_135 = -a;
W_136 = W_136+ a *Ghimj(index,1371);
W_137 = W_137+ a *Ghimj(index,1372);
W_138 = W_138+ a *Ghimj(index,1373);
Ghimj(index,1374) = W_73;
Ghimj(index,1375) = W_83;
Ghimj(index,1376) = W_101;
Ghimj(index,1377) = W_105;
Ghimj(index,1378) = W_106;
Ghimj(index,1379) = W_107;
Ghimj(index,1380) = W_114;
Ghimj(index,1381) = W_116;
Ghimj(index,1382) = W_117;
Ghimj(index,1383) = W_119;
Ghimj(index,1384) = W_121;
Ghimj(index,1385) = W_123;
Ghimj(index,1386) = W_124;
Ghimj(index,1387) = W_125;
Ghimj(index,1388) = W_126;
Ghimj(index,1389) = W_127;
Ghimj(index,1390) = W_128;
Ghimj(index,1391) = W_129;
Ghimj(index,1392) = W_130;
Ghimj(index,1393) = W_131;
Ghimj(index,1394) = W_132;
Ghimj(index,1395) = W_133;
Ghimj(index,1396) = W_134;
Ghimj(index,1397) = W_135;
Ghimj(index,1398) = W_136;
Ghimj(index,1399) = W_137;
Ghimj(index,1400) = W_138;
W_46 = Ghimj(index,1401);
W_56 = Ghimj(index,1402);
W_62 = Ghimj(index,1403);
W_65 = Ghimj(index,1404);
W_66 = Ghimj(index,1405);
W_69 = Ghimj(index,1406);
W_71 = Ghimj(index,1407);
W_73 = Ghimj(index,1408);
W_78 = Ghimj(index,1409);
W_79 = Ghimj(index,1410);
W_81 = Ghimj(index,1411);
W_82 = Ghimj(index,1412);
W_87 = Ghimj(index,1413);
W_88 = Ghimj(index,1414);
W_89 = Ghimj(index,1415);
W_91 = Ghimj(index,1416);
W_92 = Ghimj(index,1417);
W_93 = Ghimj(index,1418);
W_94 = Ghimj(index,1419);
W_96 = Ghimj(index,1420);
W_99 = Ghimj(index,1421);
W_102 = Ghimj(index,1422);
W_103 = Ghimj(index,1423);
W_104 = Ghimj(index,1424);
W_106 = Ghimj(index,1425);
W_107 = Ghimj(index,1426);
W_108 = Ghimj(index,1427);
W_109 = Ghimj(index,1428);
W_110 = Ghimj(index,1429);
W_111 = Ghimj(index,1430);
W_113 = Ghimj(index,1431);
W_114 = Ghimj(index,1432);
W_115 = Ghimj(index,1433);
W_117 = Ghimj(index,1434);
W_119 = Ghimj(index,1435);
W_121 = Ghimj(index,1436);
W_122 = Ghimj(index,1437);
W_124 = Ghimj(index,1438);
W_125 = Ghimj(index,1439);
W_126 = Ghimj(index,1440);
W_127 = Ghimj(index,1441);
W_128 = Ghimj(index,1442);
W_129 = Ghimj(index,1443);
W_130 = Ghimj(index,1444);
W_131 = Ghimj(index,1445);
W_132 = Ghimj(index,1446);
W_133 = Ghimj(index,1447);
W_134 = Ghimj(index,1448);
W_135 = Ghimj(index,1449);
W_136 = Ghimj(index,1450);
W_137 = Ghimj(index,1451);
W_138 = Ghimj(index,1452);
a = - W_46/ Ghimj(index,272);
W_46 = -a;
W_81 = W_81+ a *Ghimj(index,273);
W_124 = W_124+ a *Ghimj(index,274);
W_137 = W_137+ a *Ghimj(index,275);
a = - W_56/ Ghimj(index,296);
W_56 = -a;
W_65 = W_65+ a *Ghimj(index,297);
W_81 = W_81+ a *Ghimj(index,298);
W_126 = W_126+ a *Ghimj(index,299);
a = - W_62/ Ghimj(index,319);
W_62 = -a;
W_93 = W_93+ a *Ghimj(index,320);
W_126 = W_126+ a *Ghimj(index,321);
W_133 = W_133+ a *Ghimj(index,322);
a = - W_65/ Ghimj(index,331);
W_65 = -a;
W_114 = W_114+ a *Ghimj(index,332);
W_126 = W_126+ a *Ghimj(index,333);
W_132 = W_132+ a *Ghimj(index,334);
a = - W_66/ Ghimj(index,335);
W_66 = -a;
W_109 = W_109+ a *Ghimj(index,336);
W_126 = W_126+ a *Ghimj(index,337);
W_137 = W_137+ a *Ghimj(index,338);
a = - W_69/ Ghimj(index,347);
W_69 = -a;
W_93 = W_93+ a *Ghimj(index,348);
W_126 = W_126+ a *Ghimj(index,349);
W_137 = W_137+ a *Ghimj(index,350);
a = - W_71/ Ghimj(index,356);
W_71 = -a;
W_117 = W_117+ a *Ghimj(index,357);
W_126 = W_126+ a *Ghimj(index,358);
W_137 = W_137+ a *Ghimj(index,359);
a = - W_73/ Ghimj(index,364);
W_73 = -a;
W_126 = W_126+ a *Ghimj(index,365);
W_135 = W_135+ a *Ghimj(index,366);
W_137 = W_137+ a *Ghimj(index,367);
a = - W_78/ Ghimj(index,386);
W_78 = -a;
W_103 = W_103+ a *Ghimj(index,387);
W_106 = W_106+ a *Ghimj(index,388);
W_107 = W_107+ a *Ghimj(index,389);
W_110 = W_110+ a *Ghimj(index,390);
W_124 = W_124+ a *Ghimj(index,391);
W_126 = W_126+ a *Ghimj(index,392);
a = - W_79/ Ghimj(index,393);
W_79 = -a;
W_102 = W_102+ a *Ghimj(index,394);
W_126 = W_126+ a *Ghimj(index,395);
W_137 = W_137+ a *Ghimj(index,396);
a = - W_81/ Ghimj(index,405);
W_81 = -a;
W_114 = W_114+ a *Ghimj(index,406);
W_124 = W_124+ a *Ghimj(index,407);
W_126 = W_126+ a *Ghimj(index,408);
W_127 = W_127+ a *Ghimj(index,409);
W_129 = W_129+ a *Ghimj(index,410);
W_136 = W_136+ a *Ghimj(index,411);
a = - W_82/ Ghimj(index,412);
W_82 = -a;
W_113 = W_113+ a *Ghimj(index,413);
W_126 = W_126+ a *Ghimj(index,414);
W_137 = W_137+ a *Ghimj(index,415);
a = - W_87/ Ghimj(index,444);
W_87 = -a;
W_92 = W_92+ a *Ghimj(index,445);
W_124 = W_124+ a *Ghimj(index,446);
W_126 = W_126+ a *Ghimj(index,447);
W_135 = W_135+ a *Ghimj(index,448);
W_137 = W_137+ a *Ghimj(index,449);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_89/ Ghimj(index,457);
W_89 = -a;
W_93 = W_93+ a *Ghimj(index,458);
W_94 = W_94+ a *Ghimj(index,459);
W_102 = W_102+ a *Ghimj(index,460);
W_107 = W_107+ a *Ghimj(index,461);
W_109 = W_109+ a *Ghimj(index,462);
W_113 = W_113+ a *Ghimj(index,463);
W_117 = W_117+ a *Ghimj(index,464);
W_124 = W_124+ a *Ghimj(index,465);
W_125 = W_125+ a *Ghimj(index,466);
W_126 = W_126+ a *Ghimj(index,467);
a = - W_91/ Ghimj(index,481);
W_91 = -a;
W_106 = W_106+ a *Ghimj(index,482);
W_109 = W_109+ a *Ghimj(index,483);
W_126 = W_126+ a *Ghimj(index,484);
W_133 = W_133+ a *Ghimj(index,485);
W_136 = W_136+ a *Ghimj(index,486);
a = - W_92/ Ghimj(index,489);
W_92 = -a;
W_124 = W_124+ a *Ghimj(index,490);
W_126 = W_126+ a *Ghimj(index,491);
W_133 = W_133+ a *Ghimj(index,492);
W_135 = W_135+ a *Ghimj(index,493);
W_137 = W_137+ a *Ghimj(index,494);
a = - W_93/ Ghimj(index,497);
W_93 = -a;
W_125 = W_125+ a *Ghimj(index,498);
W_126 = W_126+ a *Ghimj(index,499);
W_133 = W_133+ a *Ghimj(index,500);
W_137 = W_137+ a *Ghimj(index,501);
a = - W_94/ Ghimj(index,505);
W_94 = -a;
W_125 = W_125+ a *Ghimj(index,506);
W_126 = W_126+ a *Ghimj(index,507);
W_133 = W_133+ a *Ghimj(index,508);
W_137 = W_137+ a *Ghimj(index,509);
a = - W_96/ Ghimj(index,538);
W_96 = -a;
W_107 = W_107+ a *Ghimj(index,539);
W_108 = W_108+ a *Ghimj(index,540);
W_109 = W_109+ a *Ghimj(index,541);
W_110 = W_110+ a *Ghimj(index,542);
W_113 = W_113+ a *Ghimj(index,543);
W_124 = W_124+ a *Ghimj(index,544);
W_125 = W_125+ a *Ghimj(index,545);
W_126 = W_126+ a *Ghimj(index,546);
W_133 = W_133+ a *Ghimj(index,547);
W_137 = W_137+ a *Ghimj(index,548);
a = - W_99/ Ghimj(index,565);
W_99 = -a;
W_102 = W_102+ a *Ghimj(index,566);
W_111 = W_111+ a *Ghimj(index,567);
W_125 = W_125+ a *Ghimj(index,568);
W_126 = W_126+ a *Ghimj(index,569);
W_133 = W_133+ a *Ghimj(index,570);
W_137 = W_137+ a *Ghimj(index,571);
a = - W_102/ Ghimj(index,600);
W_102 = -a;
W_125 = W_125+ a *Ghimj(index,601);
W_126 = W_126+ a *Ghimj(index,602);
W_133 = W_133+ a *Ghimj(index,603);
W_137 = W_137+ a *Ghimj(index,604);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_108/ Ghimj(index,636);
W_108 = -a;
W_109 = W_109+ a *Ghimj(index,637);
W_113 = W_113+ a *Ghimj(index,638);
W_115 = W_115+ a *Ghimj(index,639);
W_124 = W_124+ a *Ghimj(index,640);
W_125 = W_125+ a *Ghimj(index,641);
W_126 = W_126+ a *Ghimj(index,642);
W_133 = W_133+ a *Ghimj(index,643);
W_135 = W_135+ a *Ghimj(index,644);
W_136 = W_136+ a *Ghimj(index,645);
W_137 = W_137+ a *Ghimj(index,646);
a = - W_109/ Ghimj(index,648);
W_109 = -a;
W_124 = W_124+ a *Ghimj(index,649);
W_125 = W_125+ a *Ghimj(index,650);
W_126 = W_126+ a *Ghimj(index,651);
W_133 = W_133+ a *Ghimj(index,652);
W_136 = W_136+ a *Ghimj(index,653);
W_137 = W_137+ a *Ghimj(index,654);
a = - W_110/ Ghimj(index,659);
W_110 = -a;
W_124 = W_124+ a *Ghimj(index,660);
W_125 = W_125+ a *Ghimj(index,661);
W_126 = W_126+ a *Ghimj(index,662);
W_133 = W_133+ a *Ghimj(index,663);
W_136 = W_136+ a *Ghimj(index,664);
W_137 = W_137+ a *Ghimj(index,665);
a = - W_111/ Ghimj(index,669);
W_111 = -a;
W_115 = W_115+ a *Ghimj(index,670);
W_124 = W_124+ a *Ghimj(index,671);
W_125 = W_125+ a *Ghimj(index,672);
W_126 = W_126+ a *Ghimj(index,673);
W_133 = W_133+ a *Ghimj(index,674);
W_136 = W_136+ a *Ghimj(index,675);
W_137 = W_137+ a *Ghimj(index,676);
a = - W_113/ Ghimj(index,689);
W_113 = -a;
W_124 = W_124+ a *Ghimj(index,690);
W_125 = W_125+ a *Ghimj(index,691);
W_126 = W_126+ a *Ghimj(index,692);
W_133 = W_133+ a *Ghimj(index,693);
W_135 = W_135+ a *Ghimj(index,694);
W_136 = W_136+ a *Ghimj(index,695);
W_137 = W_137+ a *Ghimj(index,696);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_115/ Ghimj(index,706);
W_115 = -a;
W_124 = W_124+ a *Ghimj(index,707);
W_126 = W_126+ a *Ghimj(index,708);
W_127 = W_127+ a *Ghimj(index,709);
W_129 = W_129+ a *Ghimj(index,710);
W_133 = W_133+ a *Ghimj(index,711);
W_136 = W_136+ a *Ghimj(index,712);
W_137 = W_137+ a *Ghimj(index,713);
a = - W_117/ Ghimj(index,731);
W_117 = -a;
W_121 = W_121+ a *Ghimj(index,732);
W_124 = W_124+ a *Ghimj(index,733);
W_125 = W_125+ a *Ghimj(index,734);
W_126 = W_126+ a *Ghimj(index,735);
W_127 = W_127+ a *Ghimj(index,736);
W_129 = W_129+ a *Ghimj(index,737);
W_133 = W_133+ a *Ghimj(index,738);
W_136 = W_136+ a *Ghimj(index,739);
W_137 = W_137+ a *Ghimj(index,740);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
a = - W_134/ Ghimj(index,1324);
W_134 = -a;
W_135 = W_135+ a *Ghimj(index,1325);
W_136 = W_136+ a *Ghimj(index,1326);
W_137 = W_137+ a *Ghimj(index,1327);
W_138 = W_138+ a *Ghimj(index,1328);
a = - W_135/ Ghimj(index,1370);
W_135 = -a;
W_136 = W_136+ a *Ghimj(index,1371);
W_137 = W_137+ a *Ghimj(index,1372);
W_138 = W_138+ a *Ghimj(index,1373);
a = - W_136/ Ghimj(index,1398);
W_136 = -a;
W_137 = W_137+ a *Ghimj(index,1399);
W_138 = W_138+ a *Ghimj(index,1400);
Ghimj(index,1401) = W_46;
Ghimj(index,1402) = W_56;
Ghimj(index,1403) = W_62;
Ghimj(index,1404) = W_65;
Ghimj(index,1405) = W_66;
Ghimj(index,1406) = W_69;
Ghimj(index,1407) = W_71;
Ghimj(index,1408) = W_73;
Ghimj(index,1409) = W_78;
Ghimj(index,1410) = W_79;
Ghimj(index,1411) = W_81;
Ghimj(index,1412) = W_82;
Ghimj(index,1413) = W_87;
Ghimj(index,1414) = W_88;
Ghimj(index,1415) = W_89;
Ghimj(index,1416) = W_91;
Ghimj(index,1417) = W_92;
Ghimj(index,1418) = W_93;
Ghimj(index,1419) = W_94;
Ghimj(index,1420) = W_96;
Ghimj(index,1421) = W_99;
Ghimj(index,1422) = W_102;
Ghimj(index,1423) = W_103;
Ghimj(index,1424) = W_104;
Ghimj(index,1425) = W_106;
Ghimj(index,1426) = W_107;
Ghimj(index,1427) = W_108;
Ghimj(index,1428) = W_109;
Ghimj(index,1429) = W_110;
Ghimj(index,1430) = W_111;
Ghimj(index,1431) = W_113;
Ghimj(index,1432) = W_114;
Ghimj(index,1433) = W_115;
Ghimj(index,1434) = W_117;
Ghimj(index,1435) = W_119;
Ghimj(index,1436) = W_121;
Ghimj(index,1437) = W_122;
Ghimj(index,1438) = W_124;
Ghimj(index,1439) = W_125;
Ghimj(index,1440) = W_126;
Ghimj(index,1441) = W_127;
Ghimj(index,1442) = W_128;
Ghimj(index,1443) = W_129;
Ghimj(index,1444) = W_130;
Ghimj(index,1445) = W_131;
Ghimj(index,1446) = W_132;
Ghimj(index,1447) = W_133;
Ghimj(index,1448) = W_134;
Ghimj(index,1449) = W_135;
Ghimj(index,1450) = W_136;
Ghimj(index,1451) = W_137;
Ghimj(index,1452) = W_138;
W_83 = Ghimj(index,1453);
W_88 = Ghimj(index,1454);
W_97 = Ghimj(index,1455);
W_98 = Ghimj(index,1456);
W_103 = Ghimj(index,1457);
W_104 = Ghimj(index,1458);
W_105 = Ghimj(index,1459);
W_106 = Ghimj(index,1460);
W_107 = Ghimj(index,1461);
W_112 = Ghimj(index,1462);
W_114 = Ghimj(index,1463);
W_116 = Ghimj(index,1464);
W_118 = Ghimj(index,1465);
W_119 = Ghimj(index,1466);
W_120 = Ghimj(index,1467);
W_121 = Ghimj(index,1468);
W_122 = Ghimj(index,1469);
W_123 = Ghimj(index,1470);
W_124 = Ghimj(index,1471);
W_125 = Ghimj(index,1472);
W_126 = Ghimj(index,1473);
W_127 = Ghimj(index,1474);
W_128 = Ghimj(index,1475);
W_129 = Ghimj(index,1476);
W_130 = Ghimj(index,1477);
W_131 = Ghimj(index,1478);
W_132 = Ghimj(index,1479);
W_133 = Ghimj(index,1480);
W_134 = Ghimj(index,1481);
W_135 = Ghimj(index,1482);
W_136 = Ghimj(index,1483);
W_137 = Ghimj(index,1484);
W_138 = Ghimj(index,1485);
a = - W_83/ Ghimj(index,416);
W_83 = -a;
W_128 = W_128+ a *Ghimj(index,417);
W_135 = W_135+ a *Ghimj(index,418);
W_136 = W_136+ a *Ghimj(index,419);
W_138 = W_138+ a *Ghimj(index,420);
a = - W_88/ Ghimj(index,450);
W_88 = -a;
W_103 = W_103+ a *Ghimj(index,451);
W_106 = W_106+ a *Ghimj(index,452);
W_124 = W_124+ a *Ghimj(index,453);
W_126 = W_126+ a *Ghimj(index,454);
W_127 = W_127+ a *Ghimj(index,455);
W_137 = W_137+ a *Ghimj(index,456);
a = - W_97/ Ghimj(index,549);
W_97 = -a;
W_98 = W_98+ a *Ghimj(index,550);
W_120 = W_120+ a *Ghimj(index,551);
W_122 = W_122+ a *Ghimj(index,552);
W_126 = W_126+ a *Ghimj(index,553);
W_127 = W_127+ a *Ghimj(index,554);
W_130 = W_130+ a *Ghimj(index,555);
W_137 = W_137+ a *Ghimj(index,556);
a = - W_98/ Ghimj(index,557);
W_98 = -a;
W_107 = W_107+ a *Ghimj(index,558);
W_120 = W_120+ a *Ghimj(index,559);
W_124 = W_124+ a *Ghimj(index,560);
W_126 = W_126+ a *Ghimj(index,561);
W_127 = W_127+ a *Ghimj(index,562);
a = - W_103/ Ghimj(index,605);
W_103 = -a;
W_124 = W_124+ a *Ghimj(index,606);
W_126 = W_126+ a *Ghimj(index,607);
W_127 = W_127+ a *Ghimj(index,608);
W_129 = W_129+ a *Ghimj(index,609);
a = - W_104/ Ghimj(index,610);
W_104 = -a;
W_125 = W_125+ a *Ghimj(index,611);
W_126 = W_126+ a *Ghimj(index,612);
W_127 = W_127+ a *Ghimj(index,613);
W_129 = W_129+ a *Ghimj(index,614);
W_137 = W_137+ a *Ghimj(index,615);
a = - W_105/ Ghimj(index,616);
W_105 = -a;
W_128 = W_128+ a *Ghimj(index,617);
W_129 = W_129+ a *Ghimj(index,618);
W_132 = W_132+ a *Ghimj(index,619);
W_135 = W_135+ a *Ghimj(index,620);
W_138 = W_138+ a *Ghimj(index,621);
a = - W_106/ Ghimj(index,622);
W_106 = -a;
W_124 = W_124+ a *Ghimj(index,623);
W_126 = W_126+ a *Ghimj(index,624);
W_136 = W_136+ a *Ghimj(index,625);
a = - W_107/ Ghimj(index,626);
W_107 = -a;
W_124 = W_124+ a *Ghimj(index,627);
W_126 = W_126+ a *Ghimj(index,628);
W_136 = W_136+ a *Ghimj(index,629);
a = - W_112/ Ghimj(index,677);
W_112 = -a;
W_116 = W_116+ a *Ghimj(index,678);
W_123 = W_123+ a *Ghimj(index,679);
W_126 = W_126+ a *Ghimj(index,680);
W_128 = W_128+ a *Ghimj(index,681);
W_134 = W_134+ a *Ghimj(index,682);
W_137 = W_137+ a *Ghimj(index,683);
W_138 = W_138+ a *Ghimj(index,684);
a = - W_114/ Ghimj(index,697);
W_114 = -a;
W_126 = W_126+ a *Ghimj(index,698);
W_127 = W_127+ a *Ghimj(index,699);
W_129 = W_129+ a *Ghimj(index,700);
W_132 = W_132+ a *Ghimj(index,701);
W_136 = W_136+ a *Ghimj(index,702);
a = - W_116/ Ghimj(index,714);
W_116 = -a;
W_123 = W_123+ a *Ghimj(index,715);
W_127 = W_127+ a *Ghimj(index,716);
W_128 = W_128+ a *Ghimj(index,717);
W_131 = W_131+ a *Ghimj(index,718);
W_134 = W_134+ a *Ghimj(index,719);
W_135 = W_135+ a *Ghimj(index,720);
W_138 = W_138+ a *Ghimj(index,721);
a = - W_118/ Ghimj(index,745);
W_118 = -a;
W_123 = W_123+ a *Ghimj(index,746);
W_125 = W_125+ a *Ghimj(index,747);
W_126 = W_126+ a *Ghimj(index,748);
W_127 = W_127+ a *Ghimj(index,749);
W_128 = W_128+ a *Ghimj(index,750);
W_129 = W_129+ a *Ghimj(index,751);
W_131 = W_131+ a *Ghimj(index,752);
W_132 = W_132+ a *Ghimj(index,753);
W_134 = W_134+ a *Ghimj(index,754);
W_135 = W_135+ a *Ghimj(index,755);
W_137 = W_137+ a *Ghimj(index,756);
W_138 = W_138+ a *Ghimj(index,757);
a = - W_119/ Ghimj(index,767);
W_119 = -a;
W_121 = W_121+ a *Ghimj(index,768);
W_124 = W_124+ a *Ghimj(index,769);
W_125 = W_125+ a *Ghimj(index,770);
W_126 = W_126+ a *Ghimj(index,771);
W_127 = W_127+ a *Ghimj(index,772);
W_129 = W_129+ a *Ghimj(index,773);
W_133 = W_133+ a *Ghimj(index,774);
W_136 = W_136+ a *Ghimj(index,775);
W_137 = W_137+ a *Ghimj(index,776);
a = - W_120/ Ghimj(index,787);
W_120 = -a;
W_122 = W_122+ a *Ghimj(index,788);
W_124 = W_124+ a *Ghimj(index,789);
W_126 = W_126+ a *Ghimj(index,790);
W_127 = W_127+ a *Ghimj(index,791);
W_128 = W_128+ a *Ghimj(index,792);
W_130 = W_130+ a *Ghimj(index,793);
W_133 = W_133+ a *Ghimj(index,794);
W_135 = W_135+ a *Ghimj(index,795);
W_136 = W_136+ a *Ghimj(index,796);
W_137 = W_137+ a *Ghimj(index,797);
a = - W_121/ Ghimj(index,821);
W_121 = -a;
W_124 = W_124+ a *Ghimj(index,822);
W_125 = W_125+ a *Ghimj(index,823);
W_126 = W_126+ a *Ghimj(index,824);
W_127 = W_127+ a *Ghimj(index,825);
W_129 = W_129+ a *Ghimj(index,826);
W_133 = W_133+ a *Ghimj(index,827);
W_135 = W_135+ a *Ghimj(index,828);
W_136 = W_136+ a *Ghimj(index,829);
W_137 = W_137+ a *Ghimj(index,830);
a = - W_122/ Ghimj(index,847);
W_122 = -a;
W_124 = W_124+ a *Ghimj(index,848);
W_125 = W_125+ a *Ghimj(index,849);
W_126 = W_126+ a *Ghimj(index,850);
W_127 = W_127+ a *Ghimj(index,851);
W_128 = W_128+ a *Ghimj(index,852);
W_129 = W_129+ a *Ghimj(index,853);
W_130 = W_130+ a *Ghimj(index,854);
W_131 = W_131+ a *Ghimj(index,855);
W_133 = W_133+ a *Ghimj(index,856);
W_135 = W_135+ a *Ghimj(index,857);
W_136 = W_136+ a *Ghimj(index,858);
W_137 = W_137+ a *Ghimj(index,859);
W_138 = W_138+ a *Ghimj(index,860);
a = - W_123/ Ghimj(index,869);
W_123 = -a;
W_124 = W_124+ a *Ghimj(index,870);
W_125 = W_125+ a *Ghimj(index,871);
W_126 = W_126+ a *Ghimj(index,872);
W_127 = W_127+ a *Ghimj(index,873);
W_128 = W_128+ a *Ghimj(index,874);
W_129 = W_129+ a *Ghimj(index,875);
W_130 = W_130+ a *Ghimj(index,876);
W_131 = W_131+ a *Ghimj(index,877);
W_132 = W_132+ a *Ghimj(index,878);
W_133 = W_133+ a *Ghimj(index,879);
W_134 = W_134+ a *Ghimj(index,880);
W_135 = W_135+ a *Ghimj(index,881);
W_136 = W_136+ a *Ghimj(index,882);
W_137 = W_137+ a *Ghimj(index,883);
W_138 = W_138+ a *Ghimj(index,884);
a = - W_124/ Ghimj(index,896);
W_124 = -a;
W_125 = W_125+ a *Ghimj(index,897);
W_126 = W_126+ a *Ghimj(index,898);
W_127 = W_127+ a *Ghimj(index,899);
W_128 = W_128+ a *Ghimj(index,900);
W_129 = W_129+ a *Ghimj(index,901);
W_130 = W_130+ a *Ghimj(index,902);
W_131 = W_131+ a *Ghimj(index,903);
W_132 = W_132+ a *Ghimj(index,904);
W_133 = W_133+ a *Ghimj(index,905);
W_135 = W_135+ a *Ghimj(index,906);
W_136 = W_136+ a *Ghimj(index,907);
W_137 = W_137+ a *Ghimj(index,908);
W_138 = W_138+ a *Ghimj(index,909);
a = - W_125/ Ghimj(index,934);
W_125 = -a;
W_126 = W_126+ a *Ghimj(index,935);
W_127 = W_127+ a *Ghimj(index,936);
W_128 = W_128+ a *Ghimj(index,937);
W_129 = W_129+ a *Ghimj(index,938);
W_130 = W_130+ a *Ghimj(index,939);
W_131 = W_131+ a *Ghimj(index,940);
W_132 = W_132+ a *Ghimj(index,941);
W_133 = W_133+ a *Ghimj(index,942);
W_134 = W_134+ a *Ghimj(index,943);
W_135 = W_135+ a *Ghimj(index,944);
W_136 = W_136+ a *Ghimj(index,945);
W_137 = W_137+ a *Ghimj(index,946);
W_138 = W_138+ a *Ghimj(index,947);
a = - W_126/ Ghimj(index,1023);
W_126 = -a;
W_127 = W_127+ a *Ghimj(index,1024);
W_128 = W_128+ a *Ghimj(index,1025);
W_129 = W_129+ a *Ghimj(index,1026);
W_130 = W_130+ a *Ghimj(index,1027);
W_131 = W_131+ a *Ghimj(index,1028);
W_132 = W_132+ a *Ghimj(index,1029);
W_133 = W_133+ a *Ghimj(index,1030);
W_134 = W_134+ a *Ghimj(index,1031);
W_135 = W_135+ a *Ghimj(index,1032);
W_136 = W_136+ a *Ghimj(index,1033);
W_137 = W_137+ a *Ghimj(index,1034);
W_138 = W_138+ a *Ghimj(index,1035);
a = - W_127/ Ghimj(index,1071);
W_127 = -a;
W_128 = W_128+ a *Ghimj(index,1072);
W_129 = W_129+ a *Ghimj(index,1073);
W_130 = W_130+ a *Ghimj(index,1074);
W_131 = W_131+ a *Ghimj(index,1075);
W_132 = W_132+ a *Ghimj(index,1076);
W_133 = W_133+ a *Ghimj(index,1077);
W_134 = W_134+ a *Ghimj(index,1078);
W_135 = W_135+ a *Ghimj(index,1079);
W_136 = W_136+ a *Ghimj(index,1080);
W_137 = W_137+ a *Ghimj(index,1081);
W_138 = W_138+ a *Ghimj(index,1082);
a = - W_128/ Ghimj(index,1138);
W_128 = -a;
W_129 = W_129+ a *Ghimj(index,1139);
W_130 = W_130+ a *Ghimj(index,1140);
W_131 = W_131+ a *Ghimj(index,1141);
W_132 = W_132+ a *Ghimj(index,1142);
W_133 = W_133+ a *Ghimj(index,1143);
W_134 = W_134+ a *Ghimj(index,1144);
W_135 = W_135+ a *Ghimj(index,1145);
W_136 = W_136+ a *Ghimj(index,1146);
W_137 = W_137+ a *Ghimj(index,1147);
W_138 = W_138+ a *Ghimj(index,1148);
a = - W_129/ Ghimj(index,1176);
W_129 = -a;
W_130 = W_130+ a *Ghimj(index,1177);
W_131 = W_131+ a *Ghimj(index,1178);
W_132 = W_132+ a *Ghimj(index,1179);
W_133 = W_133+ a *Ghimj(index,1180);
W_134 = W_134+ a *Ghimj(index,1181);
W_135 = W_135+ a *Ghimj(index,1182);
W_136 = W_136+ a *Ghimj(index,1183);
W_137 = W_137+ a *Ghimj(index,1184);
W_138 = W_138+ a *Ghimj(index,1185);
a = - W_130/ Ghimj(index,1218);
W_130 = -a;
W_131 = W_131+ a *Ghimj(index,1219);
W_132 = W_132+ a *Ghimj(index,1220);
W_133 = W_133+ a *Ghimj(index,1221);
W_134 = W_134+ a *Ghimj(index,1222);
W_135 = W_135+ a *Ghimj(index,1223);
W_136 = W_136+ a *Ghimj(index,1224);
W_137 = W_137+ a *Ghimj(index,1225);
W_138 = W_138+ a *Ghimj(index,1226);
a = - W_131/ Ghimj(index,1242);
W_131 = -a;
W_132 = W_132+ a *Ghimj(index,1243);
W_133 = W_133+ a *Ghimj(index,1244);
W_134 = W_134+ a *Ghimj(index,1245);
W_135 = W_135+ a *Ghimj(index,1246);
W_136 = W_136+ a *Ghimj(index,1247);
W_137 = W_137+ a *Ghimj(index,1248);
W_138 = W_138+ a *Ghimj(index,1249);
a = - W_132/ Ghimj(index,1262);
W_132 = -a;
W_133 = W_133+ a *Ghimj(index,1263);
W_134 = W_134+ a *Ghimj(index,1264);
W_135 = W_135+ a *Ghimj(index,1265);
W_136 = W_136+ a *Ghimj(index,1266);
W_137 = W_137+ a *Ghimj(index,1267);
W_138 = W_138+ a *Ghimj(index,1268);
a = - W_133/ Ghimj(index,1297);
W_133 = -a;
W_134 = W_134+ a *Ghimj(index,1298);
W_135 = W_135+ a *Ghimj(index,1299);
W_136 = W_136+ a *Ghimj(index,1300);
W_137 = W_137+ a *Ghimj(index,1301);
W_138 = W_138+ a *Ghimj(index,1302);
a = - W_134/ Ghimj(index,1324);
W_134 = -a;
W_135 = W_135+ a *Ghimj(index,1325);
W_136 = W_136+ a *Ghimj(index,1326);
W_137 = W_137+ a *Ghimj(index,1327);
W_138 = W_138+ a *Ghimj(index,1328);
a = - W_135/ Ghimj(index,1370);
W_135 = -a;
W_136 = W_136+ a *Ghimj(index,1371);
W_137 = W_137+ a *Ghimj(index,1372);
W_138 = W_138+ a *Ghimj(index,1373);
a = - W_136/ Ghimj(index,1398);
W_136 = -a;
W_137 = W_137+ a *Ghimj(index,1399);
W_138 = W_138+ a *Ghimj(index,1400);
a = - W_137/ Ghimj(index,1451);
W_137 = -a;
W_138 = W_138+ a *Ghimj(index,1452);
Ghimj(index,1453) = W_83;
Ghimj(index,1454) = W_88;
Ghimj(index,1455) = W_97;
Ghimj(index,1456) = W_98;
Ghimj(index,1457) = W_103;
Ghimj(index,1458) = W_104;
Ghimj(index,1459) = W_105;
Ghimj(index,1460) = W_106;
Ghimj(index,1461) = W_107;
Ghimj(index,1462) = W_112;
Ghimj(index,1463) = W_114;
Ghimj(index,1464) = W_116;
Ghimj(index,1465) = W_118;
Ghimj(index,1466) = W_119;
Ghimj(index,1467) = W_120;
Ghimj(index,1468) = W_121;
Ghimj(index,1469) = W_122;
Ghimj(index,1470) = W_123;
Ghimj(index,1471) = W_124;
Ghimj(index,1472) = W_125;
Ghimj(index,1473) = W_126;
Ghimj(index,1474) = W_127;
Ghimj(index,1475) = W_128;
Ghimj(index,1476) = W_129;
Ghimj(index,1477) = W_130;
Ghimj(index,1478) = W_131;
Ghimj(index,1479) = W_132;
Ghimj(index,1480) = W_133;
Ghimj(index,1481) = W_134;
Ghimj(index,1482) = W_135;
Ghimj(index,1483) = W_136;
Ghimj(index,1484) = W_137;
Ghimj(index,1485) = W_138;
}
__device__ void ros_Decomp(double * __restrict__ Ghimj, int &Ndec, int VL_GLO)
{
kppDecomp(Ghimj, VL_GLO);
Ndec++;
}
__device__ void ros_PrepareMatrix(double &H, int direction, double gam, double *jac0, double *Ghimj, int &Nsng, int &Ndec, int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int ising, nConsecutive;
double ghinv;
ghinv = ONE/(direction*H*gam);
for (int i=0; i<LU_NONZERO; i++)
Ghimj(index,i) = -jac0(index,i);
Ghimj(index,0) += ghinv;
Ghimj(index,1) += ghinv;
Ghimj(index,2) += ghinv;
Ghimj(index,3) += ghinv;
Ghimj(index,4) += ghinv;
Ghimj(index,5) += ghinv;
Ghimj(index,6) += ghinv;
Ghimj(index,9) += ghinv;
Ghimj(index,25) += ghinv;
Ghimj(index,29) += ghinv;
Ghimj(index,38) += ghinv;
Ghimj(index,43) += ghinv;
Ghimj(index,46) += ghinv;
Ghimj(index,48) += ghinv;
Ghimj(index,52) += ghinv;
Ghimj(index,58) += ghinv;
Ghimj(index,60) += ghinv;
Ghimj(index,62) += ghinv;
Ghimj(index,64) += ghinv;
Ghimj(index,68) += ghinv;
Ghimj(index,69) += ghinv;
Ghimj(index,72) += ghinv;
Ghimj(index,75) += ghinv;
Ghimj(index,112) += ghinv;
Ghimj(index,123) += ghinv;
Ghimj(index,140) += ghinv;
Ghimj(index,148) += ghinv;
Ghimj(index,163) += ghinv;
Ghimj(index,170) += ghinv;
Ghimj(index,182) += ghinv;
Ghimj(index,185) += ghinv;
Ghimj(index,190) += ghinv;
Ghimj(index,194) += ghinv;
Ghimj(index,202) += ghinv;
Ghimj(index,206) += ghinv;
Ghimj(index,233) += ghinv;
Ghimj(index,244) += ghinv;
Ghimj(index,251) += ghinv;
Ghimj(index,255) += ghinv;
Ghimj(index,258) += ghinv;
Ghimj(index,260) += ghinv;
Ghimj(index,262) += ghinv;
Ghimj(index,264) += ghinv;
Ghimj(index,266) += ghinv;
Ghimj(index,268) += ghinv;
Ghimj(index,270) += ghinv;
Ghimj(index,272) += ghinv;
Ghimj(index,276) += ghinv;
Ghimj(index,278) += ghinv;
Ghimj(index,280) += ghinv;
Ghimj(index,282) += ghinv;
Ghimj(index,285) += ghinv;
Ghimj(index,288) += ghinv;
Ghimj(index,290) += ghinv;
Ghimj(index,292) += ghinv;
Ghimj(index,294) += ghinv;
Ghimj(index,296) += ghinv;
Ghimj(index,300) += ghinv;
Ghimj(index,303) += ghinv;
Ghimj(index,306) += ghinv;
Ghimj(index,310) += ghinv;
Ghimj(index,315) += ghinv;
Ghimj(index,319) += ghinv;
Ghimj(index,323) += ghinv;
Ghimj(index,327) += ghinv;
Ghimj(index,331) += ghinv;
Ghimj(index,335) += ghinv;
Ghimj(index,339) += ghinv;
Ghimj(index,343) += ghinv;
Ghimj(index,347) += ghinv;
Ghimj(index,352) += ghinv;
Ghimj(index,356) += ghinv;
Ghimj(index,360) += ghinv;
Ghimj(index,364) += ghinv;
Ghimj(index,368) += ghinv;
Ghimj(index,374) += ghinv;
Ghimj(index,377) += ghinv;
Ghimj(index,382) += ghinv;
Ghimj(index,386) += ghinv;
Ghimj(index,393) += ghinv;
Ghimj(index,397) += ghinv;
Ghimj(index,405) += ghinv;
Ghimj(index,412) += ghinv;
Ghimj(index,416) += ghinv;
Ghimj(index,421) += ghinv;
Ghimj(index,427) += ghinv;
Ghimj(index,436) += ghinv;
Ghimj(index,444) += ghinv;
Ghimj(index,450) += ghinv;
Ghimj(index,457) += ghinv;
Ghimj(index,469) += ghinv;
Ghimj(index,481) += ghinv;
Ghimj(index,489) += ghinv;
Ghimj(index,497) += ghinv;
Ghimj(index,505) += ghinv;
Ghimj(index,514) += ghinv;
Ghimj(index,538) += ghinv;
Ghimj(index,549) += ghinv;
Ghimj(index,557) += ghinv;
Ghimj(index,565) += ghinv;
Ghimj(index,573) += ghinv;
Ghimj(index,586) += ghinv;
Ghimj(index,600) += ghinv;
Ghimj(index,605) += ghinv;
Ghimj(index,610) += ghinv;
Ghimj(index,616) += ghinv;
Ghimj(index,622) += ghinv;
Ghimj(index,626) += ghinv;
Ghimj(index,636) += ghinv;
Ghimj(index,648) += ghinv;
Ghimj(index,659) += ghinv;
Ghimj(index,669) += ghinv;
Ghimj(index,677) += ghinv;
Ghimj(index,689) += ghinv;
Ghimj(index,697) += ghinv;
Ghimj(index,706) += ghinv;
Ghimj(index,714) += ghinv;
Ghimj(index,731) += ghinv;
Ghimj(index,745) += ghinv;
Ghimj(index,767) += ghinv;
Ghimj(index,787) += ghinv;
Ghimj(index,821) += ghinv;
Ghimj(index,847) += ghinv;
Ghimj(index,869) += ghinv;
Ghimj(index,896) += ghinv;
Ghimj(index,934) += ghinv;
Ghimj(index,1023) += ghinv;
Ghimj(index,1071) += ghinv;
Ghimj(index,1138) += ghinv;
Ghimj(index,1176) += ghinv;
Ghimj(index,1218) += ghinv;
Ghimj(index,1242) += ghinv;
Ghimj(index,1262) += ghinv;
Ghimj(index,1297) += ghinv;
Ghimj(index,1324) += ghinv;
Ghimj(index,1370) += ghinv;
Ghimj(index,1398) += ghinv;
Ghimj(index,1451) += ghinv;
Ghimj(index,1485) += ghinv;
Ghimj(index,1486) += ghinv;
ros_Decomp(Ghimj, Ndec, VL_GLO);
}
__device__ void Jac_sp(const double * __restrict__ var, const double * __restrict__ fix,
const double * __restrict__ rconst, double * __restrict__ jcb, int &Njac, const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
double dummy, B_0, B_1, B_2, B_3, B_4, B_5, B_6, B_7, B_8, B_9, B_10, B_11, B_12, B_13, B_14, B_15, B_16, B_17, B_18, B_19, B_20, B_21, B_22, B_23, B_24, B_25, B_26, B_27, B_28, B_29, B_30, B_31, B_32, B_33, B_34, B_35, B_36, B_37, B_38, B_39, B_40, B_41, B_42, B_43, B_44, B_45, B_46, B_47, B_48, B_49, B_50, B_51, B_52, B_53, B_54, B_55, B_56, B_57, B_58, B_59, B_60, B_61, B_62, B_63, B_64, B_65, B_66, B_67, B_68, B_69, B_70, B_71, B_72, B_73, B_74, B_75, B_76, B_77, B_78, B_79, B_80, B_81, B_82, B_83, B_84, B_85, B_86, B_87, B_88, B_89, B_90, B_91, B_92, B_93, B_94, B_95, B_96, B_97, B_98, B_99, B_100, B_101, B_102, B_103, B_104, B_105, B_106, B_107, B_108, B_109, B_110, B_111, B_112, B_113, B_114, B_115, B_116, B_117, B_118, B_119, B_120, B_121, B_122, B_123, B_124, B_125, B_126, B_127, B_128, B_129, B_130, B_131, B_132, B_133, B_134, B_135, B_136, B_137, B_138, B_139, B_140, B_141, B_142, B_143, B_144, B_145, B_146, B_147, B_148, B_149, B_150, B_151, B_152, B_153, B_154, B_155, B_156, B_157, B_158, B_159, B_160, B_161, B_162, B_163, B_164, B_165, B_166, B_167, B_168, B_169, B_170, B_171, B_172, B_173, B_174, B_175, B_176, B_177, B_178, B_179, B_180, B_181, B_182, B_183, B_184, B_185, B_186, B_187, B_188, B_189, B_190, B_191, B_192, B_193, B_194, B_195, B_196, B_197, B_198, B_199, B_200, B_201, B_202, B_203, B_204, B_205, B_206, B_207, B_208, B_209, B_210, B_211, B_212, B_213, B_214, B_215, B_216, B_217, B_218, B_219, B_220, B_221, B_222, B_223, B_224, B_225, B_226, B_227, B_228, B_229, B_230, B_231, B_232, B_233, B_234, B_235, B_236, B_237, B_238, B_239, B_240, B_241, B_242, B_243, B_244, B_245, B_246, B_247, B_248, B_249, B_250, B_251, B_252, B_253, B_254, B_255, B_256, B_257, B_258, B_259, B_260, B_261, B_262, B_263, B_264, B_265, B_266, B_267, B_268, B_269, B_270, B_271, B_272, B_273, B_274, B_275, B_276, B_277, B_278, B_279, B_280, B_281, B_282, B_283, B_284, B_285, B_286, B_287, B_288, B_289, B_290, B_291, B_292, B_293, B_294, B_295, B_296, B_297, B_298, B_299, B_300, B_301, B_302, B_303, B_304, B_305, B_306, B_307, B_308, B_309, B_310, B_311, B_312, B_313, B_314, B_315, B_316, B_317, B_318, B_319, B_320, B_321, B_322, B_323, B_324, B_325, B_326, B_327, B_328, B_329, B_330, B_331, B_332, B_333, B_334, B_335, B_336, B_337, B_338, B_339, B_340, B_341, B_342, B_343, B_344, B_345, B_346, B_347, B_348, B_349, B_350, B_351, B_352, B_353, B_354, B_355, B_356, B_357, B_358, B_359, B_360, B_361, B_362, B_363, B_364, B_365, B_366, B_367, B_368, B_369, B_370, B_371, B_372, B_373, B_374, B_375, B_376, B_377, B_378, B_379, B_380, B_381, B_382, B_383, B_384, B_385, B_386, B_387, B_388, B_389, B_390, B_391, B_392, B_393, B_394, B_395, B_396, B_397, B_398, B_399, B_400, B_401, B_402, B_403, B_404, B_405, B_406, B_407, B_408, B_409, B_410, B_411, B_412, B_413, B_414, B_415, B_416, B_417, B_418, B_419, B_420, B_421, B_422, B_423, B_424, B_425, B_426, B_427, B_428, B_429, B_430, B_431, B_432, B_433, B_434, B_435, B_436, B_437, B_438, B_439, B_440, B_441, B_442, B_443, B_444, B_445, B_446, B_447, B_448, B_449, B_450, B_451, B_452, B_453, B_454, B_455, B_456, B_457, B_458, B_459, B_460, B_461, B_462, B_463, B_464, B_465, B_466, B_467, B_468, B_469, B_470, B_471, B_472, B_473, B_474, B_475, B_476, B_477, B_478, B_479, B_480, B_481, B_482, B_483, B_484, B_485, B_486, B_487, B_488, B_489, B_490, B_491, B_492, B_493, B_494, B_495, B_496, B_497, B_498, B_499, B_500, B_501, B_502, B_503, B_504, B_505, B_506, B_507, B_508, B_509, B_510, B_511, B_512, B_513, B_514, B_515, B_516, B_517, B_518, B_519, B_520, B_521, B_522;
Njac++;
B_0 = rconst(index,0)*fix(index,0);
B_2 = rconst(index,1)*fix(index,0);
B_4 = 1.2e-10*var(index,124);
B_5 = 1.2e-10*var(index,120);
B_6 = rconst(index,3)*var(index,131);
B_7 = rconst(index,3)*var(index,124);
B_8 = rconst(index,4)*fix(index,0);
B_10 = rconst(index,5)*var(index,124);
B_11 = rconst(index,5)*var(index,122);
B_12 = 1.2e-10*var(index,120);
B_13 = 1.2e-10*var(index,97);
B_14 = rconst(index,7)*var(index,131);
B_15 = rconst(index,7)*var(index,126);
B_16 = rconst(index,8)*var(index,126);
B_17 = rconst(index,8)*var(index,124);
B_18 = rconst(index,9)*var(index,126);
B_19 = rconst(index,9)*var(index,97);
B_20 = rconst(index,10)*var(index,137);
B_21 = rconst(index,10)*var(index,131);
B_22 = rconst(index,11)*var(index,137);
B_23 = rconst(index,11)*var(index,124);
B_24 = 7.2e-11*var(index,137);
B_25 = 7.2e-11*var(index,122);
B_26 = 6.9e-12*var(index,137);
B_27 = 6.9e-12*var(index,122);
B_28 = 1.6e-12*var(index,137);
B_29 = 1.6e-12*var(index,122);
B_30 = rconst(index,15)*var(index,137);
B_31 = rconst(index,15)*var(index,126);
B_32 = rconst(index,16)*2*var(index,137);
B_33 = rconst(index,17)*var(index,128);
B_34 = rconst(index,17)*var(index,120);
B_35 = 1.8e-12*var(index,126);
B_36 = 1.8e-12*var(index,88);
B_37 = rconst(index,19)*fix(index,0);
B_39 = rconst(index,20)*fix(index,1);
B_41 = rconst(index,21)*var(index,120);
B_42 = rconst(index,21)*var(index,60);
B_43 = rconst(index,22)*var(index,120);
B_44 = rconst(index,22)*var(index,60);
B_45 = rconst(index,23)*var(index,133);
B_46 = rconst(index,23)*var(index,124);
B_47 = rconst(index,24)*var(index,133);
B_48 = rconst(index,24)*var(index,59);
B_49 = rconst(index,25)*var(index,135);
B_50 = rconst(index,25)*var(index,131);
B_51 = rconst(index,26)*var(index,135);
B_52 = rconst(index,26)*var(index,124);
B_53 = rconst(index,27)*var(index,135);
B_54 = rconst(index,27)*var(index,59);
B_55 = rconst(index,28)*var(index,136);
B_56 = rconst(index,28)*var(index,133);
B_57 = rconst(index,29)*var(index,136);
B_58 = rconst(index,29)*var(index,135);
B_59 = rconst(index,30);
B_60 = rconst(index,31)*var(index,133);
B_61 = rconst(index,31)*var(index,126);
B_62 = rconst(index,32)*var(index,137);
B_63 = rconst(index,32)*var(index,133);
B_64 = rconst(index,33)*var(index,135);
B_65 = rconst(index,33)*var(index,126);
B_66 = rconst(index,34)*var(index,137);
B_67 = rconst(index,34)*var(index,135);
B_68 = 3.5e-12*var(index,137);
B_69 = 3.5e-12*var(index,136);
B_70 = rconst(index,36)*var(index,126);
B_71 = rconst(index,36)*var(index,76);
B_72 = rconst(index,37)*var(index,126);
B_73 = rconst(index,37)*var(index,101);
B_74 = rconst(index,38);
B_75 = rconst(index,39)*var(index,126);
B_76 = rconst(index,39)*var(index,73);
B_77 = rconst(index,40)*var(index,126);
B_78 = rconst(index,40)*var(index,47);
B_79 = rconst(index,41)*var(index,124);
B_80 = rconst(index,41)*var(index,92);
B_81 = rconst(index,42)*var(index,137);
B_82 = rconst(index,42)*var(index,92);
B_83 = rconst(index,43)*var(index,137);
B_84 = rconst(index,43)*var(index,92);
B_85 = rconst(index,44)*var(index,133);
B_86 = rconst(index,44)*var(index,92);
B_87 = rconst(index,45)*var(index,133);
B_88 = rconst(index,45)*var(index,92);
B_89 = rconst(index,46)*var(index,135);
B_90 = rconst(index,46)*var(index,92);
B_91 = rconst(index,47)*var(index,135);
B_92 = rconst(index,47)*var(index,92);
B_93 = 1.2e-14*var(index,124);
B_94 = 1.2e-14*var(index,84);
B_95 = 1300;
B_96 = rconst(index,50)*var(index,126);
B_97 = rconst(index,50)*var(index,87);
B_98 = rconst(index,51)*var(index,87);
B_99 = rconst(index,51)*var(index,70);
B_100 = rconst(index,52)*var(index,135);
B_101 = rconst(index,52)*var(index,87);
B_102 = 1.66e-12*var(index,126);
B_103 = 1.66e-12*var(index,70);
B_104 = rconst(index,54)*var(index,126);
B_105 = rconst(index,54)*var(index,61);
B_106 = rconst(index,55)*fix(index,0);
B_108 = 1.75e-10*var(index,120);
B_109 = 1.75e-10*var(index,98);
B_110 = rconst(index,57)*var(index,126);
B_111 = rconst(index,57)*var(index,98);
B_112 = rconst(index,58)*var(index,126);
B_113 = rconst(index,58)*var(index,89);
B_114 = rconst(index,59)*var(index,137);
B_115 = rconst(index,59)*var(index,125);
B_116 = rconst(index,60)*var(index,133);
B_117 = rconst(index,60)*var(index,125);
B_118 = 1.3e-12*var(index,136);
B_119 = 1.3e-12*var(index,125);
B_120 = rconst(index,62)*2*var(index,125);
B_121 = rconst(index,63)*2*var(index,125);
B_122 = rconst(index,64)*var(index,126);
B_123 = rconst(index,64)*var(index,104);
B_124 = rconst(index,65)*var(index,130);
B_125 = rconst(index,65)*var(index,126);
B_126 = rconst(index,66)*var(index,136);
B_127 = rconst(index,66)*var(index,130);
B_128 = rconst(index,67)*var(index,126);
B_129 = rconst(index,67)*var(index,95);
B_130 = 4e-13*var(index,126);
B_131 = 4e-13*var(index,78);
B_132 = rconst(index,69)*var(index,126);
B_133 = rconst(index,69)*var(index,48);
B_134 = rconst(index,70)*var(index,124);
B_135 = rconst(index,70)*var(index,103);
B_136 = rconst(index,71)*var(index,126);
B_137 = rconst(index,71)*var(index,103);
B_138 = rconst(index,72)*var(index,137);
B_139 = rconst(index,72)*var(index,117);
B_140 = rconst(index,73)*var(index,133);
B_141 = rconst(index,73)*var(index,117);
B_142 = 2.3e-12*var(index,136);
B_143 = 2.3e-12*var(index,117);
B_144 = rconst(index,75)*var(index,125);
B_145 = rconst(index,75)*var(index,117);
B_146 = rconst(index,76)*var(index,126);
B_147 = rconst(index,76)*var(index,71);
B_148 = rconst(index,77)*var(index,126);
B_149 = rconst(index,77)*var(index,119);
B_150 = rconst(index,78)*var(index,136);
B_151 = rconst(index,78)*var(index,119);
B_152 = rconst(index,79)*var(index,126);
B_153 = rconst(index,79)*var(index,74);
B_154 = rconst(index,80)*var(index,137);
B_155 = rconst(index,80)*var(index,121);
B_156 = rconst(index,81)*var(index,137);
B_157 = rconst(index,81)*var(index,121);
B_158 = rconst(index,82)*var(index,133);
B_159 = rconst(index,82)*var(index,121);
B_160 = rconst(index,83)*var(index,135);
B_161 = rconst(index,83)*var(index,121);
B_162 = 4e-12*var(index,136);
B_163 = 4e-12*var(index,121);
B_164 = rconst(index,85)*var(index,125);
B_165 = rconst(index,85)*var(index,121);
B_166 = rconst(index,86)*var(index,125);
B_167 = rconst(index,86)*var(index,121);
B_168 = rconst(index,87)*var(index,121);
B_169 = rconst(index,87)*var(index,117);
B_170 = rconst(index,88)*2*var(index,121);
B_171 = rconst(index,89)*var(index,126);
B_172 = rconst(index,89)*var(index,63);
B_173 = rconst(index,90)*var(index,126);
B_174 = rconst(index,90)*var(index,58);
B_175 = rconst(index,91)*var(index,126);
B_176 = rconst(index,91)*var(index,77);
B_177 = rconst(index,92);
B_178 = rconst(index,93)*var(index,126);
B_179 = rconst(index,93)*var(index,49);
B_180 = rconst(index,94)*var(index,124);
B_181 = rconst(index,94)*var(index,107);
B_182 = rconst(index,95)*var(index,126);
B_183 = rconst(index,95)*var(index,107);
B_184 = rconst(index,96)*var(index,136);
B_185 = rconst(index,96)*var(index,107);
B_186 = rconst(index,97)*var(index,137);
B_187 = rconst(index,97)*var(index,93);
B_188 = rconst(index,98)*var(index,133);
B_189 = rconst(index,98)*var(index,93);
B_190 = rconst(index,99)*var(index,125);
B_191 = rconst(index,99)*var(index,93);
B_192 = rconst(index,100)*var(index,126);
B_193 = rconst(index,100)*var(index,69);
B_194 = rconst(index,101)*var(index,137);
B_195 = rconst(index,101)*var(index,115);
B_196 = rconst(index,102)*var(index,133);
B_197 = rconst(index,102)*var(index,115);
B_198 = rconst(index,103)*var(index,126);
B_199 = rconst(index,103)*var(index,67);
B_200 = rconst(index,104)*var(index,126);
B_201 = rconst(index,104)*var(index,86);
B_202 = rconst(index,105)*var(index,137);
B_203 = rconst(index,105)*var(index,94);
B_204 = rconst(index,106)*var(index,133);
B_205 = rconst(index,106)*var(index,94);
B_206 = rconst(index,107)*var(index,125);
B_207 = rconst(index,107)*var(index,94);
B_208 = rconst(index,108)*var(index,126);
B_209 = rconst(index,108)*var(index,72);
B_210 = rconst(index,109)*var(index,126);
B_211 = rconst(index,109)*var(index,108);
B_212 = rconst(index,110)*var(index,126);
B_213 = rconst(index,110)*var(index,96);
B_214 = rconst(index,111)*var(index,126);
B_215 = rconst(index,111)*var(index,62);
B_216 = rconst(index,112)*var(index,126);
B_217 = rconst(index,112)*var(index,40);
B_218 = rconst(index,113)*var(index,125);
B_219 = rconst(index,113)*var(index,102);
B_220 = rconst(index,114)*var(index,137);
B_221 = rconst(index,114)*var(index,102);
B_222 = rconst(index,115)*var(index,133);
B_223 = rconst(index,115)*var(index,102);
B_224 = rconst(index,116)*var(index,126);
B_225 = rconst(index,116)*var(index,79);
B_226 = rconst(index,117)*var(index,124);
B_227 = rconst(index,117)*var(index,110);
B_228 = rconst(index,118)*var(index,126);
B_229 = rconst(index,118)*var(index,110);
B_230 = rconst(index,119)*var(index,137);
B_231 = rconst(index,119)*var(index,113);
B_232 = rconst(index,120)*var(index,133);
B_233 = rconst(index,120)*var(index,113);
B_234 = rconst(index,121)*var(index,135);
B_235 = rconst(index,121)*var(index,113);
B_236 = 2e-12*var(index,125);
B_237 = 2e-12*var(index,113);
B_238 = 2e-12*2*var(index,113);
B_239 = 3e-11*var(index,126);
B_240 = 3e-11*var(index,82);
B_241 = rconst(index,125)*var(index,126);
B_242 = rconst(index,125)*var(index,85);
B_243 = rconst(index,126)*var(index,137);
B_244 = rconst(index,126)*var(index,99);
B_245 = rconst(index,127)*var(index,133);
B_246 = rconst(index,127)*var(index,99);
B_247 = rconst(index,128)*var(index,126);
B_248 = rconst(index,128)*var(index,68);
B_249 = 1.7e-12*var(index,126);
B_250 = 1.7e-12*var(index,111);
B_251 = 3.2e-11*var(index,126);
B_252 = 3.2e-11*var(index,64);
B_253 = rconst(index,131);
B_254 = rconst(index,132)*var(index,124);
B_255 = rconst(index,132)*var(index,106);
B_256 = rconst(index,133)*var(index,126);
B_257 = rconst(index,133)*var(index,106);
B_258 = rconst(index,134)*var(index,136);
B_259 = rconst(index,134)*var(index,106);
B_260 = rconst(index,135)*var(index,137);
B_261 = rconst(index,135)*var(index,109);
B_262 = rconst(index,136)*var(index,133);
B_263 = rconst(index,136)*var(index,109);
B_264 = 2e-12*var(index,125);
B_265 = 2e-12*var(index,109);
B_266 = 2e-12*2*var(index,109);
B_267 = 1e-10*var(index,126);
B_268 = 1e-10*var(index,66);
B_269 = 1.3e-11*var(index,126);
B_270 = 1.3e-11*var(index,91);
B_271 = rconst(index,141)*var(index,127);
B_272 = rconst(index,141)*var(index,124);
B_273 = rconst(index,142)*var(index,134);
B_274 = rconst(index,142)*var(index,131);
B_275 = rconst(index,143)*2*var(index,134);
B_276 = rconst(index,144)*2*var(index,134);
B_277 = rconst(index,145)*2*var(index,134);
B_278 = rconst(index,146)*2*var(index,134);
B_279 = rconst(index,147);
B_280 = rconst(index,148)*var(index,127);
B_281 = rconst(index,148)*var(index,97);
B_282 = rconst(index,149)*var(index,137);
B_283 = rconst(index,149)*var(index,127);
B_284 = rconst(index,150)*var(index,137);
B_285 = rconst(index,150)*var(index,127);
B_286 = rconst(index,151)*var(index,127);
B_287 = rconst(index,151)*var(index,88);
B_288 = rconst(index,152)*var(index,134);
B_289 = rconst(index,152)*var(index,126);
B_290 = rconst(index,153)*var(index,137);
B_291 = rconst(index,153)*var(index,134);
B_292 = rconst(index,154)*var(index,138);
B_293 = rconst(index,154)*var(index,126);
B_294 = rconst(index,155)*var(index,126);
B_295 = rconst(index,155)*var(index,112);
B_296 = rconst(index,156)*var(index,134);
B_297 = rconst(index,156)*var(index,133);
B_298 = rconst(index,157)*var(index,135);
B_299 = rconst(index,157)*var(index,134);
B_300 = rconst(index,158);
B_301 = rconst(index,159)*var(index,131);
B_302 = rconst(index,159)*var(index,116);
B_303 = rconst(index,160)*var(index,127);
B_304 = rconst(index,160)*var(index,116);
B_305 = rconst(index,161)*var(index,127);
B_306 = rconst(index,161)*var(index,98);
B_307 = rconst(index,162)*var(index,130);
B_308 = rconst(index,162)*var(index,127);
B_309 = 5.9e-11*var(index,127);
B_310 = 5.9e-11*var(index,104);
B_311 = rconst(index,164)*var(index,134);
B_312 = rconst(index,164)*var(index,125);
B_313 = 3.3e-10*var(index,120);
B_314 = 3.3e-10*var(index,41);
B_315 = 1.65e-10*var(index,120);
B_316 = 1.65e-10*var(index,75);
B_317 = rconst(index,167)*var(index,126);
B_318 = rconst(index,167)*var(index,75);
B_319 = 3.25e-10*var(index,120);
B_320 = 3.25e-10*var(index,57);
B_321 = rconst(index,169)*var(index,126);
B_322 = rconst(index,169)*var(index,57);
B_323 = rconst(index,170)*var(index,127);
B_324 = rconst(index,170)*var(index,103);
B_325 = 8e-11*var(index,127);
B_326 = 8e-11*var(index,119);
B_327 = 1.4e-10*var(index,120);
B_328 = 1.4e-10*var(index,42);
B_329 = 2.3e-10*var(index,120);
B_330 = 2.3e-10*var(index,43);
B_331 = rconst(index,174)*var(index,129);
B_332 = rconst(index,174)*var(index,124);
B_333 = rconst(index,175)*var(index,132);
B_334 = rconst(index,175)*var(index,131);
B_335 = 2.7e-12*2*var(index,132);
B_336 = rconst(index,177)*2*var(index,132);
B_337 = rconst(index,178)*var(index,137);
B_338 = rconst(index,178)*var(index,129);
B_339 = rconst(index,179)*var(index,137);
B_340 = rconst(index,179)*var(index,132);
B_341 = rconst(index,180)*var(index,126);
B_342 = rconst(index,180)*var(index,123);
B_343 = rconst(index,181)*var(index,131);
B_344 = rconst(index,181)*var(index,118);
B_345 = rconst(index,182)*var(index,126);
B_346 = rconst(index,182)*var(index,100);
B_347 = 4.9e-11*var(index,129);
B_348 = 4.9e-11*var(index,105);
B_349 = rconst(index,184)*var(index,133);
B_350 = rconst(index,184)*var(index,132);
B_351 = rconst(index,185)*var(index,135);
B_352 = rconst(index,185)*var(index,132);
B_353 = rconst(index,186);
B_354 = rconst(index,187)*var(index,130);
B_355 = rconst(index,187)*var(index,129);
B_356 = rconst(index,188)*var(index,129);
B_357 = rconst(index,188)*var(index,104);
B_358 = rconst(index,189)*var(index,132);
B_359 = rconst(index,189)*var(index,125);
B_360 = rconst(index,190)*var(index,132);
B_361 = rconst(index,190)*var(index,125);
B_362 = rconst(index,191)*var(index,126);
B_363 = rconst(index,191)*var(index,53);
B_364 = rconst(index,192)*var(index,129);
B_365 = rconst(index,192)*var(index,103);
B_366 = rconst(index,193)*var(index,129);
B_367 = rconst(index,193)*var(index,119);
B_368 = rconst(index,194)*var(index,126);
B_369 = rconst(index,194)*var(index,45);
B_370 = rconst(index,195)*var(index,126);
B_371 = rconst(index,195)*var(index,44);
B_372 = 3.32e-15*var(index,129);
B_373 = 3.32e-15*var(index,90);
B_374 = 1.1e-15*var(index,129);
B_375 = 1.1e-15*var(index,80);
B_376 = rconst(index,198)*var(index,127);
B_377 = rconst(index,198)*var(index,100);
B_378 = rconst(index,199)*var(index,134);
B_379 = rconst(index,199)*var(index,132);
B_380 = rconst(index,200)*var(index,134);
B_381 = rconst(index,200)*var(index,132);
B_382 = rconst(index,201)*var(index,134);
B_383 = rconst(index,201)*var(index,132);
B_384 = 1.45e-11*var(index,127);
B_385 = 1.45e-11*var(index,90);
B_386 = rconst(index,203)*var(index,126);
B_387 = rconst(index,203)*var(index,54);
B_388 = rconst(index,204)*var(index,126);
B_389 = rconst(index,204)*var(index,55);
B_390 = rconst(index,205)*var(index,126);
B_391 = rconst(index,205)*var(index,52);
B_392 = rconst(index,206)*var(index,126);
B_393 = rconst(index,206)*var(index,56);
B_394 = rconst(index,207)*var(index,126);
B_395 = rconst(index,207)*var(index,114);
B_396 = rconst(index,208)*var(index,126);
B_397 = rconst(index,208)*var(index,114);
B_398 = rconst(index,209)*var(index,136);
B_399 = rconst(index,209)*var(index,114);
B_400 = 1e-10*var(index,126);
B_401 = 1e-10*var(index,65);
B_402 = rconst(index,211);
B_403 = 3e-13*var(index,124);
B_404 = 3e-13*var(index,81);
B_405 = 5e-11*var(index,137);
B_406 = 5e-11*var(index,46);
B_407 = 3.3e-10*var(index,127);
B_408 = 3.3e-10*var(index,114);
B_409 = rconst(index,215)*var(index,129);
B_410 = rconst(index,215)*var(index,114);
B_411 = 4.4e-13*var(index,132);
B_412 = 4.4e-13*var(index,114);
B_414 = rconst(index,218);
B_415 = rconst(index,219);
B_416 = rconst(index,220);
B_417 = rconst(index,221);
B_418 = rconst(index,222);
B_419 = rconst(index,223);
B_420 = rconst(index,224);
B_421 = rconst(index,225);
B_422 = rconst(index,226);
B_423 = rconst(index,227);
B_424 = rconst(index,228);
B_425 = rconst(index,229);
B_426 = rconst(index,230);
B_427 = rconst(index,231);
B_428 = rconst(index,232);
B_429 = rconst(index,233);
B_431 = rconst(index,235);
B_432 = rconst(index,236);
B_433 = rconst(index,237);
B_434 = rconst(index,238);
B_435 = rconst(index,239);
B_436 = rconst(index,240);
B_437 = rconst(index,241);
B_438 = rconst(index,242);
B_439 = rconst(index,243);
B_440 = rconst(index,244);
B_441 = rconst(index,245);
B_442 = rconst(index,246);
B_443 = rconst(index,247);
B_444 = rconst(index,248);
B_445 = rconst(index,249);
B_446 = rconst(index,250);
B_447 = rconst(index,251);
B_448 = rconst(index,252);
B_449 = rconst(index,253);
B_450 = rconst(index,254);
B_451 = rconst(index,255);
B_452 = rconst(index,256);
B_453 = rconst(index,257);
B_454 = rconst(index,258);
B_455 = rconst(index,259);
B_456 = rconst(index,260);
B_457 = rconst(index,261);
B_458 = rconst(index,262);
B_459 = rconst(index,263);
B_460 = rconst(index,264);
B_461 = rconst(index,265);
B_462 = rconst(index,266);
B_463 = rconst(index,267);
B_464 = rconst(index,268);
B_465 = rconst(index,269);
B_466 = rconst(index,270);
B_467 = rconst(index,271);
B_468 = rconst(index,272);
B_469 = rconst(index,273);
B_470 = rconst(index,274);
B_471 = rconst(index,275);
B_472 = rconst(index,276);
B_473 = rconst(index,277);
B_474 = rconst(index,278);
B_475 = rconst(index,279);
B_476 = rconst(index,280);
B_477 = rconst(index,281);
B_478 = rconst(index,282);
B_479 = rconst(index,283);
B_480 = rconst(index,284);
B_481 = rconst(index,285)*var(index,128);
B_482 = rconst(index,285)*var(index,83);
B_483 = rconst(index,286);
B_484 = rconst(index,287)*var(index,138);
B_485 = rconst(index,287)*var(index,112);
B_486 = rconst(index,288)*var(index,138);
B_487 = rconst(index,288)*var(index,116);
B_488 = rconst(index,289)*var(index,128);
B_489 = rconst(index,289)*var(index,116);
B_490 = rconst(index,290)*var(index,138);
B_491 = rconst(index,290)*var(index,83);
B_492 = rconst(index,291)*var(index,123);
B_493 = rconst(index,291)*var(index,118);
B_494 = rconst(index,292)*var(index,128);
B_495 = rconst(index,292)*var(index,105);
B_496 = rconst(index,293)*var(index,123);
B_497 = rconst(index,293)*var(index,116);
B_498 = rconst(index,294)*var(index,138);
B_499 = rconst(index,294)*var(index,105);
B_500 = rconst(index,295)*var(index,123);
B_501 = rconst(index,295)*var(index,112);
B_502 = rconst(index,296)*var(index,138);
B_503 = rconst(index,296)*var(index,118);
B_504 = rconst(index,297);
B_505 = 2.3e-10*var(index,120);
B_506 = 2.3e-10*var(index,15);
B_507 = rconst(index,299);
B_508 = 1.4e-10*var(index,120);
B_509 = 1.4e-10*var(index,16);
B_510 = rconst(index,301);
B_511 = rconst(index,302)*var(index,120);
B_512 = rconst(index,302)*var(index,17);
B_513 = rconst(index,303)*var(index,120);
B_514 = rconst(index,303)*var(index,17);
B_515 = rconst(index,304);
B_516 = 3e-10*var(index,120);
B_517 = 3e-10*var(index,18);
B_518 = rconst(index,306)*var(index,126);
B_519 = rconst(index,306)*var(index,18);
B_520 = rconst(index,307);
B_521 = rconst(index,308);
B_522 = rconst(index,309);
jcb(index,0) = - B_469;
jcb(index,1) = - B_476;
jcb(index,2) = - B_474;
jcb(index,3) = - B_480;
jcb(index,4) = - B_504;
jcb(index,5) = - B_521;
jcb(index,6) = - B_522;
jcb(index,7) = B_476;
jcb(index,8) = B_474;
jcb(index,9) = 0;
jcb(index,10) = B_313+ B_462;
jcb(index,11) = B_327+ B_465;
jcb(index,12) = B_329+ B_464;
jcb(index,13) = B_370+ B_472;
jcb(index,14) = B_368+ B_473;
jcb(index,15) = B_390+ B_477;
jcb(index,16) = B_362;
jcb(index,17) = B_386+ B_478;
jcb(index,18) = B_388+ B_479;
jcb(index,19) = 2*B_319+ 2*B_321+ 2*B_463;
jcb(index,20) = 0.9*B_315+ B_317;
jcb(index,21) = B_314+ 0.9*B_316+ 2*B_320+ B_328+ B_330;
jcb(index,22) = B_318+ 2*B_322+ B_363+ B_369+ B_371+ B_387+ B_389+ B_391;
jcb(index,23) = 2*B_476;
jcb(index,24) = 3*B_474;
jcb(index,25) = 0;
jcb(index,26) = 2*B_327+ 2*B_465;
jcb(index,27) = B_329+ B_464;
jcb(index,28) = 2*B_328+ B_330;
jcb(index,29) = 0;
jcb(index,30) = B_465;
jcb(index,31) = 2*B_464;
jcb(index,32) = B_390;
jcb(index,33) = 2*B_386;
jcb(index,34) = B_388;
jcb(index,35) = 0.09*B_315;
jcb(index,36) = 0.09*B_316;
jcb(index,37) = 2*B_387+ B_389+ B_391;
jcb(index,38) = 0;
jcb(index,39) = B_405;
jcb(index,40) = 0.4*B_400;
jcb(index,41) = 0.4*B_401;
jcb(index,42) = B_406;
jcb(index,43) = 0;
jcb(index,44) = B_392;
jcb(index,45) = B_393;
jcb(index,46) = 0;
jcb(index,47) = 2*B_483;
jcb(index,48) = 0;
jcb(index,49) = 2*B_483;
jcb(index,50) = B_521;
jcb(index,51) = B_522;
jcb(index,52) = 0;
jcb(index,53) = B_507;
jcb(index,54) = B_510;
jcb(index,55) = B_513+ B_515;
jcb(index,56) = B_520;
jcb(index,57) = B_514;
jcb(index,58) = - B_505- B_507;
jcb(index,59) = - B_506;
jcb(index,60) = - B_508- B_510;
jcb(index,61) = - B_509;
jcb(index,62) = - B_511- B_513- B_515;
jcb(index,63) = - B_512- B_514;
jcb(index,64) = - B_516- B_518- B_520;
jcb(index,65) = - B_517;
jcb(index,66) = - B_519;
jcb(index,67) = B_504;
jcb(index,68) = 0;
jcb(index,69) = 0;
jcb(index,70) = B_22;
jcb(index,71) = B_23;
jcb(index,72) = 0;
jcb(index,73) = B_33;
jcb(index,74) = B_34;
jcb(index,75) = 0;
jcb(index,76) = 2*B_454;
jcb(index,77) = B_319;
jcb(index,78) = B_41+ B_43;
jcb(index,79) = B_315;
jcb(index,80) = B_481+ 3*B_483+ 2*B_490;
jcb(index,81) = B_93;
jcb(index,82) = B_100;
jcb(index,83) = B_79+ B_89+ B_91;
jcb(index,84) = B_12;
jcb(index,85) = B_108;
jcb(index,86) = B_134;
jcb(index,87) = B_498;
jcb(index,88) = B_254+ B_258;
jcb(index,89) = B_180+ B_184;
jcb(index,90) = B_226;
jcb(index,91) = B_457+ B_484+ B_500;
jcb(index,92) = B_486+ B_496;
jcb(index,93) = B_142;
jcb(index,94) = B_343+ B_468+ B_492+ B_502;
jcb(index,95) = B_150;
jcb(index,96) = 2*B_4+ B_13+ B_33+ B_42+ B_44+ B_109+ B_316+ B_320;
jcb(index,97) = B_162;
jcb(index,98) = B_10;
jcb(index,99) = B_493+ B_497+ B_501;
jcb(index,100) = 2*B_5+ 2*B_6+ B_11+ B_16+ B_22+ B_80+ B_94+ B_135+ B_181+ B_227+ B_255;
jcb(index,101) = B_118+ B_311+ B_360;
jcb(index,102) = B_14+ B_17+ B_288;
jcb(index,103) = B_34+ B_482;
jcb(index,104) = B_126;
jcb(index,105) = 2*B_7+ B_15+ B_20+ 2*B_49+ 2*B_273+ 2*B_333+ B_344;
jcb(index,106) = 2*B_334+ 2*B_335+ 2*B_336+ B_361+ B_378+ 2*B_380+ 2*B_382;
jcb(index,107) = 2*B_274+ 2*B_275+ 2*B_276+ B_277+ B_289+ B_312+ B_379+ 2*B_381+ 2*B_383;
jcb(index,108) = 2*B_50+ B_90+ B_92+ B_101;
jcb(index,109) = B_68+ B_119+ B_127+ B_143+ B_151+ B_163+ B_185+ B_259+ 2*B_422;
jcb(index,110) = B_21+ B_23+ B_69;
jcb(index,111) = B_485+ B_487+ 2*B_491+ B_499+ B_503;
jcb(index,112) = 0;
jcb(index,113) = 0.333333*B_498;
jcb(index,114) = 0.5*B_500;
jcb(index,115) = 0.333333*B_496;
jcb(index,116) = B_343+ B_468+ B_492+ 0.5*B_502;
jcb(index,117) = B_493+ 0.333333*B_497+ 0.5*B_501;
jcb(index,118) = B_360;
jcb(index,119) = 2*B_333+ B_344;
jcb(index,120) = 2*B_334+ 2*B_335+ 2*B_336+ B_361+ 0.5*B_378+ B_380+ B_382;
jcb(index,121) = 0.5*B_379+ B_381+ B_383;
jcb(index,122) = 0.333333*B_499+ 0.5*B_503;
jcb(index,123) = 0;
jcb(index,124) = 2*B_454;
jcb(index,125) = B_319;
jcb(index,126) = B_315;
jcb(index,127) = B_490;
jcb(index,128) = 0.333333*B_498;
jcb(index,129) = B_457+ B_484+ 0.5*B_500;
jcb(index,130) = 0.5*B_486+ 0.333333*B_496;
jcb(index,131) = 0.5*B_502;
jcb(index,132) = B_316+ B_320;
jcb(index,133) = 0.333333*B_497+ 0.5*B_501;
jcb(index,134) = B_311;
jcb(index,135) = B_288;
jcb(index,136) = 2*B_273;
jcb(index,137) = 0.5*B_378+ B_380+ B_382;
jcb(index,138) = 2*B_274+ 2*B_275+ 2*B_276+ B_277+ B_289+ B_312+ 0.5*B_379+ B_381+ B_383;
jcb(index,139) = B_485+ 0.5*B_487+ B_491+ 0.333333*B_499+ 0.5*B_503;
jcb(index,140) = 0;
jcb(index,141) = B_12;
jcb(index,142) = B_13;
jcb(index,143) = B_10;
jcb(index,144) = B_11+ B_16+ B_22;
jcb(index,145) = B_14+ B_17;
jcb(index,146) = B_15+ B_20;
jcb(index,147) = B_21+ B_23;
jcb(index,148) = 0;
jcb(index,149) = B_481+ 3*B_483+ B_490;
jcb(index,150) = B_93;
jcb(index,151) = B_100;
jcb(index,152) = B_79+ B_89+ B_91;
jcb(index,153) = 0.333333*B_498;
jcb(index,154) = 0.5*B_486+ 0.333333*B_496;
jcb(index,155) = 0.333333*B_497;
jcb(index,156) = B_80+ B_94;
jcb(index,157) = B_482;
jcb(index,158) = 2*B_49;
jcb(index,159) = 2*B_50+ B_90+ B_92+ B_101;
jcb(index,160) = B_68+ 2*B_422;
jcb(index,161) = B_69;
jcb(index,162) = 0.5*B_487+ B_491+ 0.333333*B_499;
jcb(index,163) = 0;
jcb(index,164) = B_41+ B_43;
jcb(index,165) = B_108;
jcb(index,166) = 2*B_4+ B_33+ B_42+ B_44+ B_109;
jcb(index,167) = 2*B_5+ 2*B_6;
jcb(index,168) = B_34;
jcb(index,169) = 2*B_7;
jcb(index,170) = 0;
jcb(index,171) = B_134;
jcb(index,172) = B_254+ B_258;
jcb(index,173) = B_180+ B_184;
jcb(index,174) = B_226;
jcb(index,175) = B_142;
jcb(index,176) = B_150;
jcb(index,177) = B_162;
jcb(index,178) = B_135+ B_181+ B_227+ B_255;
jcb(index,179) = B_118;
jcb(index,180) = B_126;
jcb(index,181) = B_119+ B_127+ B_143+ B_151+ B_163+ B_185+ B_259;
jcb(index,182) = 0;
jcb(index,183) = B_16;
jcb(index,184) = B_17;
jcb(index,185) = 0;
jcb(index,186) = B_62;
jcb(index,187) = B_63;
jcb(index,188) = B_476;
jcb(index,189) = B_474;
jcb(index,190) = 0;
jcb(index,191) = B_362+ B_471;
jcb(index,192) = B_363;
jcb(index,193) = B_476;
jcb(index,194) = 0;
jcb(index,195) = 4*B_313+ 4*B_462;
jcb(index,196) = 2*B_327+ 2*B_465;
jcb(index,197) = 3*B_329+ 3*B_464;
jcb(index,198) = 3*B_319+ 3*B_321+ 3*B_463;
jcb(index,199) = B_315+ B_317+ B_461;
jcb(index,200) = 4*B_314+ B_316+ 3*B_320+ 2*B_328+ 3*B_330;
jcb(index,201) = B_318+ 3*B_322;
jcb(index,202) = 0;
jcb(index,203) = B_116;
jcb(index,204) = B_117;
jcb(index,205) = B_469;
jcb(index,206) = 0;
jcb(index,207) = B_458;
jcb(index,208) = B_455;
jcb(index,209) = B_37+ B_47;
jcb(index,210) = B_418;
jcb(index,211) = 0.4*B_400;
jcb(index,212) = 0.333*B_426;
jcb(index,213) = B_70;
jcb(index,214) = B_188;
jcb(index,215) = B_204;
jcb(index,216) = B_245;
jcb(index,217) = B_345;
jcb(index,218) = B_72;
jcb(index,219) = B_222;
jcb(index,220) = B_262;
jcb(index,221) = B_232;
jcb(index,222) = B_394+ B_396+ B_407+ B_409;
jcb(index,223) = B_196;
jcb(index,224) = B_140;
jcb(index,225) = B_156+ B_158;
jcb(index,226) = B_28;
jcb(index,227) = B_116;
jcb(index,228) = B_71+ B_73+ B_346+ B_395+ B_397+ 0.4*B_401;
jcb(index,229) = B_284+ B_408;
jcb(index,230) = B_410;
jcb(index,231) = B_48+ B_62+ B_117+ B_141+ B_159+ B_189+ B_197+ B_205+ B_223+ B_233+ B_246+ B_263+ B_420;
jcb(index,232) = B_29+ B_63+ B_157+ B_285;
jcb(index,233) = 0;
jcb(index,234) = B_188;
jcb(index,235) = B_204;
jcb(index,236) = B_245;
jcb(index,237) = B_222;
jcb(index,238) = B_262;
jcb(index,239) = B_232;
jcb(index,240) = B_196;
jcb(index,241) = B_140;
jcb(index,242) = B_158;
jcb(index,243) = B_141+ B_159+ B_189+ B_197+ B_205+ B_223+ B_233+ B_246+ B_263;
jcb(index,244) = 0;
jcb(index,245) = 2*B_370+ 2*B_472;
jcb(index,246) = 3*B_368+ 3*B_473;
jcb(index,247) = B_390+ B_477;
jcb(index,248) = B_386+ B_478;
jcb(index,249) = 2*B_388+ 2*B_479;
jcb(index,250) = 3*B_369+ 2*B_371+ B_387+ 2*B_389+ B_391;
jcb(index,251) = 0;
jcb(index,252) = B_477;
jcb(index,253) = 2*B_478;
jcb(index,254) = B_479;
jcb(index,255) = - B_448;
jcb(index,256) = 0.8*B_247;
jcb(index,257) = 0.8*B_248;
jcb(index,258) = - B_279- B_454;
jcb(index,259) = B_278;
jcb(index,260) = - B_216;
jcb(index,261) = - B_217;
jcb(index,262) = - B_313- B_462;
jcb(index,263) = - B_314;
jcb(index,264) = - B_327- B_465;
jcb(index,265) = - B_328;
jcb(index,266) = - B_329- B_464;
jcb(index,267) = - B_330;
jcb(index,268) = - B_370- B_472;
jcb(index,269) = - B_371;
jcb(index,270) = - B_368- B_473;
jcb(index,271) = - B_369;
jcb(index,272) = - B_405;
jcb(index,273) = B_403;
jcb(index,274) = B_404;
jcb(index,275) = - B_406;
jcb(index,276) = - B_77;
jcb(index,277) = - B_78;
jcb(index,278) = - B_132;
jcb(index,279) = - B_133;
jcb(index,280) = - B_178;
jcb(index,281) = - B_179;
jcb(index,282) = - B_458;
jcb(index,283) = B_490;
jcb(index,284) = B_491;
jcb(index,285) = - B_455;
jcb(index,286) = B_378;
jcb(index,287) = B_277+ B_379;
jcb(index,288) = - B_390- B_477;
jcb(index,289) = - B_391;
jcb(index,290) = - B_362- B_471;
jcb(index,291) = - B_363;
jcb(index,292) = - B_386- B_478;
jcb(index,293) = - B_387;
jcb(index,294) = - B_388- B_479;
jcb(index,295) = - B_389;
jcb(index,296) = - B_392;
jcb(index,297) = 0.6*B_400;
jcb(index,298) = B_402;
jcb(index,299) = - B_393+ 0.6*B_401;
jcb(index,300) = - B_319- B_321- B_463;
jcb(index,301) = - B_320;
jcb(index,302) = - B_322;
jcb(index,303) = - B_173- B_435;
jcb(index,304) = B_269;
jcb(index,305) = - B_174+ B_270;
jcb(index,306) = - B_37- B_47- B_53;
jcb(index,307) = - B_48+ B_420;
jcb(index,308) = - B_54;
jcb(index,309) = B_53;
jcb(index,310) = - B_41- B_43- B_418;
jcb(index,311) = B_89;
jcb(index,312) = - B_42- B_44;
jcb(index,313) = 0;
jcb(index,314) = B_54+ B_90;
jcb(index,315) = - B_104;
jcb(index,316) = B_98;
jcb(index,317) = B_99;
jcb(index,318) = - B_105;
jcb(index,319) = - B_214- B_442;
jcb(index,320) = 0.04*B_188;
jcb(index,321) = - B_215;
jcb(index,322) = 0.04*B_189;
jcb(index,323) = - B_171- B_434;
jcb(index,324) = B_154;
jcb(index,325) = - B_172;
jcb(index,326) = B_155;
jcb(index,327) = - B_251- B_253- B_450;
jcb(index,328) = B_234;
jcb(index,329) = - B_252;
jcb(index,330) = B_235;
jcb(index,331) = - B_400;
jcb(index,332) = B_396+ B_411;
jcb(index,333) = B_397- B_401;
jcb(index,334) = B_412;
jcb(index,335) = - B_267- B_451;
jcb(index,336) = B_260;
jcb(index,337) = - B_268;
jcb(index,338) = B_261;
jcb(index,339) = - B_198;
jcb(index,340) = B_194;
jcb(index,341) = - B_199;
jcb(index,342) = B_195;
jcb(index,343) = - B_247- B_447;
jcb(index,344) = B_243;
jcb(index,345) = - B_248;
jcb(index,346) = B_244;
jcb(index,347) = - B_192- B_437;
jcb(index,348) = B_186;
jcb(index,349) = - B_193;
jcb(index,350) = B_187;
jcb(index,351) = B_104;
jcb(index,352) = - B_98- B_102;
jcb(index,353) = B_95;
jcb(index,354) = - B_99;
jcb(index,355) = - B_103+ B_105;
jcb(index,356) = - B_146- B_432;
jcb(index,357) = B_138;
jcb(index,358) = - B_147;
jcb(index,359) = B_139;
jcb(index,360) = - B_208- B_441;
jcb(index,361) = B_202;
jcb(index,362) = - B_209;
jcb(index,363) = B_203;
jcb(index,364) = - B_74- B_75- B_426;
jcb(index,365) = - B_76;
jcb(index,366) = B_66;
jcb(index,367) = B_67;
jcb(index,368) = - B_152;
jcb(index,369) = 0.18*B_168;
jcb(index,370) = B_156+ B_166+ 0.18*B_169;
jcb(index,371) = B_167;
jcb(index,372) = - B_153;
jcb(index,373) = B_157;
jcb(index,374) = - 0.9*B_315- B_317- B_461;
jcb(index,375) = - 0.9*B_316;
jcb(index,376) = - B_318;
jcb(index,377) = - B_70- B_424;
jcb(index,378) = B_100;
jcb(index,379) = B_60- B_71;
jcb(index,380) = B_61;
jcb(index,381) = B_101;
jcb(index,382) = - B_175- B_177- B_436;
jcb(index,383) = B_160;
jcb(index,384) = - B_176;
jcb(index,385) = B_161;
jcb(index,386) = - B_130;
jcb(index,387) = 0.23125*B_134;
jcb(index,388) = 0.28*B_254;
jcb(index,389) = 0.22*B_180;
jcb(index,390) = 0.45*B_226;
jcb(index,391) = 0.23125*B_135+ 0.22*B_181+ 0.45*B_227+ 0.28*B_255;
jcb(index,392) = - B_131;
jcb(index,393) = - B_224- B_443;
jcb(index,394) = B_220;
jcb(index,395) = - B_225;
jcb(index,396) = B_221;
jcb(index,397) = - B_374- B_453;
jcb(index,398) = B_384;
jcb(index,399) = B_484;
jcb(index,400) = B_303+ B_486;
jcb(index,401) = B_304+ B_385;
jcb(index,402) = - B_375;
jcb(index,403) = B_275;
jcb(index,404) = B_485+ B_487;
jcb(index,405) = - B_402- B_403;
jcb(index,406) = B_394+ B_398+ B_407+ B_409;
jcb(index,407) = - B_404;
jcb(index,408) = B_395;
jcb(index,409) = B_408;
jcb(index,410) = B_410;
jcb(index,411) = B_399;
jcb(index,412) = - B_239- B_445;
jcb(index,413) = B_230;
jcb(index,414) = - B_240;
jcb(index,415) = B_231;
jcb(index,416) = - B_59- B_423- B_481- B_483- B_490;
jcb(index,417) = - B_482;
jcb(index,418) = B_57;
jcb(index,419) = B_58;
jcb(index,420) = - B_491;
jcb(index,421) = - B_93- B_95;
jcb(index,422) = B_79+ B_81+ B_91;
jcb(index,423) = B_80- B_94;
jcb(index,424) = B_92;
jcb(index,425) = B_82;
jcb(index,426) = 0.85*B_224+ 0.67*B_443;
jcb(index,427) = - B_241- B_446;
jcb(index,428) = 0.88*B_218+ 0.56*B_222;
jcb(index,429) = B_249+ 0.67*B_449;
jcb(index,430) = 0.88*B_219;
jcb(index,431) = 0.85*B_225- B_242+ B_250;
jcb(index,432) = 0.56*B_223;
jcb(index,433) = 0;
jcb(index,434) = B_214+ B_442;
jcb(index,435) = 0.7*B_192+ B_437;
jcb(index,436) = - B_200- B_438;
jcb(index,437) = 0.96*B_188+ B_190;
jcb(index,438) = B_191;
jcb(index,439) = 0.7*B_193- B_201+ B_215;
jcb(index,440) = 0.96*B_189;
jcb(index,441) = 0;
jcb(index,442) = - B_98+ B_102;
jcb(index,443) = 0;
jcb(index,444) = - B_96- B_99- B_100- B_106;
jcb(index,445) = B_83;
jcb(index,446) = 0;
jcb(index,447) = - B_97+ B_103;
jcb(index,448) = - B_101;
jcb(index,449) = B_84;
jcb(index,450) = - B_35- B_286- B_417;
jcb(index,451) = 0.13875*B_134;
jcb(index,452) = 0.09*B_254;
jcb(index,453) = 0.13875*B_135+ 0.09*B_255;
jcb(index,454) = - B_36;
jcb(index,455) = - B_287;
jcb(index,456) = B_32;
jcb(index,457) = - B_112;
jcb(index,458) = 0.2*B_190;
jcb(index,459) = 0.5*B_206;
jcb(index,460) = 0.18*B_218;
jcb(index,461) = 0.03*B_180;
jcb(index,462) = 0.25*B_264;
jcb(index,463) = 0.25*B_236;
jcb(index,464) = 0.25*B_144;
jcb(index,465) = 0.03*B_181;
jcb(index,466) = B_121+ 0.25*B_145+ 0.2*B_191+ 0.5*B_207+ 0.18*B_219+ 0.25*B_237+ 0.25*B_265;
jcb(index,467) = - B_113;
jcb(index,468) = B_374;
jcb(index,469) = - B_372- B_384- B_475;
jcb(index,470) = B_376;
jcb(index,471) = B_498;
jcb(index,472) = B_500;
jcb(index,473) = B_496;
jcb(index,474) = B_502;
jcb(index,475) = B_497+ B_501;
jcb(index,476) = B_377- B_385;
jcb(index,477) = - B_373+ B_375;
jcb(index,478) = B_382;
jcb(index,479) = B_383;
jcb(index,480) = B_499+ B_503;
jcb(index,481) = - B_269- B_452;
jcb(index,482) = B_258;
jcb(index,483) = 0.044*B_262;
jcb(index,484) = - B_270;
jcb(index,485) = 0.044*B_263;
jcb(index,486) = B_259;
jcb(index,487) = B_77;
jcb(index,488) = B_93;
jcb(index,489) = - B_79- B_81- B_83- B_85- B_87- B_89- B_91;
jcb(index,490) = - B_80+ B_94;
jcb(index,491) = B_78;
jcb(index,492) = - B_86- B_88;
jcb(index,493) = - B_90- B_92;
jcb(index,494) = - B_82- B_84;
jcb(index,495) = 0.82*B_178;
jcb(index,496) = 0.3*B_192;
jcb(index,497) = - B_186- B_188- B_190;
jcb(index,498) = - B_191;
jcb(index,499) = 0.82*B_179+ 0.3*B_193;
jcb(index,500) = - B_189;
jcb(index,501) = - B_187;
jcb(index,502) = 0.3*B_208;
jcb(index,503) = B_200;
jcb(index,504) = 0;
jcb(index,505) = - B_202- B_204- B_206;
jcb(index,506) = - B_207;
jcb(index,507) = B_201+ 0.3*B_209;
jcb(index,508) = - B_205;
jcb(index,509) = - B_203;
jcb(index,510) = B_173+ B_435;
jcb(index,511) = B_175;
jcb(index,512) = 0.25*B_445;
jcb(index,513) = 0;
jcb(index,514) = - B_128;
jcb(index,515) = B_212+ B_440;
jcb(index,516) = B_431;
jcb(index,517) = 0.63*B_134;
jcb(index,518) = 0.14*B_254;
jcb(index,519) = 0.31*B_180;
jcb(index,520) = 0;
jcb(index,521) = 0.22*B_226+ B_444;
jcb(index,522) = 0.25*B_232+ 0.125*B_236+ 0.5*B_238;
jcb(index,523) = B_433;
jcb(index,524) = 0;
jcb(index,525) = 0.63*B_135+ 0.31*B_181+ 0.22*B_227+ 0.14*B_255;
jcb(index,526) = 0.125*B_237;
jcb(index,527) = B_124- B_129+ B_174+ B_176+ B_213;
jcb(index,528) = B_307;
jcb(index,529) = B_354;
jcb(index,530) = B_125+ B_126+ B_308+ B_355+ B_428+ B_429;
jcb(index,531) = 0.25*B_233;
jcb(index,532) = 0;
jcb(index,533) = B_127;
jcb(index,534) = 0;
jcb(index,535) = 0.7*B_208;
jcb(index,536) = 0.5*B_445;
jcb(index,537) = 0.5*B_206;
jcb(index,538) = - B_212- B_440;
jcb(index,539) = 0.04*B_180;
jcb(index,540) = B_210;
jcb(index,541) = 0.25*B_264;
jcb(index,542) = 0.9*B_226;
jcb(index,543) = 0.5*B_232+ 0.5*B_236+ B_238;
jcb(index,544) = 0.04*B_181+ 0.9*B_227;
jcb(index,545) = 0.5*B_207+ 0.5*B_237+ 0.25*B_265;
jcb(index,546) = 0.7*B_209+ B_211- B_213;
jcb(index,547) = 0.5*B_233;
jcb(index,548) = 0;
jcb(index,549) = - B_12- B_18- B_280;
jcb(index,550) = 0.05*B_108+ 0.69*B_431;
jcb(index,551) = - B_13+ 0.05*B_109;
jcb(index,552) = B_26;
jcb(index,553) = - B_19;
jcb(index,554) = - B_281;
jcb(index,555) = B_428;
jcb(index,556) = B_27;
jcb(index,557) = - B_108- B_110- B_305- B_431;
jcb(index,558) = 0.06*B_180;
jcb(index,559) = - B_109;
jcb(index,560) = 0.06*B_181;
jcb(index,561) = - B_111;
jcb(index,562) = - B_306;
jcb(index,563) = 0.2*B_247;
jcb(index,564) = B_241;
jcb(index,565) = - B_243- B_245;
jcb(index,566) = 0;
jcb(index,567) = 0;
jcb(index,568) = 0;
jcb(index,569) = B_242+ 0.2*B_248;
jcb(index,570) = - B_246;
jcb(index,571) = - B_244;
jcb(index,572) = B_372;
jcb(index,573) = - B_345- B_376- B_466;
jcb(index,574) = B_347;
jcb(index,575) = 0;
jcb(index,576) = 0;
jcb(index,577) = B_492;
jcb(index,578) = B_493;
jcb(index,579) = - B_346;
jcb(index,580) = - B_377;
jcb(index,581) = B_348+ B_373;
jcb(index,582) = B_336;
jcb(index,583) = 0;
jcb(index,584) = 0;
jcb(index,585) = 2*B_481+ B_490;
jcb(index,586) = - B_72- B_425;
jcb(index,587) = B_494+ B_498;
jcb(index,588) = B_398;
jcb(index,589) = B_486+ B_488+ B_496;
jcb(index,590) = B_150;
jcb(index,591) = B_497;
jcb(index,592) = B_64- B_73;
jcb(index,593) = 2*B_482+ B_489+ B_495;
jcb(index,594) = B_126;
jcb(index,595) = B_65;
jcb(index,596) = B_127+ B_151+ B_399;
jcb(index,597) = B_487+ B_491+ B_499;
jcb(index,598) = B_216;
jcb(index,599) = 0.15*B_224;
jcb(index,600) = - B_218- B_220- B_222;
jcb(index,601) = - B_219;
jcb(index,602) = B_217+ 0.15*B_225;
jcb(index,603) = - B_223;
jcb(index,604) = - B_221;
jcb(index,605) = - B_134- B_136- B_323- B_364;
jcb(index,606) = - B_135;
jcb(index,607) = - B_137;
jcb(index,608) = - B_324;
jcb(index,609) = - B_365;
jcb(index,610) = - B_122- B_309- B_356- B_427;
jcb(index,611) = B_114;
jcb(index,612) = - B_123;
jcb(index,613) = - B_310;
jcb(index,614) = - B_357;
jcb(index,615) = B_115;
jcb(index,616) = - B_347- B_353- B_470- B_494- B_498;
jcb(index,617) = - B_495;
jcb(index,618) = - B_348;
jcb(index,619) = B_351;
jcb(index,620) = B_352;
jcb(index,621) = - B_499;
jcb(index,622) = - B_254- B_256- B_258;
jcb(index,623) = - B_255;
jcb(index,624) = - B_257;
jcb(index,625) = - B_259;
jcb(index,626) = - B_180- B_182- B_184;
jcb(index,627) = - B_181;
jcb(index,628) = - B_183;
jcb(index,629) = - B_185;
jcb(index,630) = B_251+ B_450;
jcb(index,631) = 0.5*B_198;
jcb(index,632) = 0.25*B_445;
jcb(index,633) = B_269;
jcb(index,634) = 0.2*B_206;
jcb(index,635) = 0;
jcb(index,636) = - B_210- B_439;
jcb(index,637) = 0.25*B_264;
jcb(index,638) = 0.25*B_232+ 0.375*B_236+ B_238;
jcb(index,639) = 0;
jcb(index,640) = 0;
jcb(index,641) = 0.2*B_207+ 0.375*B_237+ 0.25*B_265;
jcb(index,642) = 0.5*B_199- B_211+ B_252+ B_270;
jcb(index,643) = 0.25*B_233;
jcb(index,644) = 0;
jcb(index,645) = 0;
jcb(index,646) = 0;
jcb(index,647) = B_256;
jcb(index,648) = - B_260- B_262- B_264- 2*B_266;
jcb(index,649) = 0;
jcb(index,650) = - B_265;
jcb(index,651) = B_257;
jcb(index,652) = - B_263;
jcb(index,653) = 0;
jcb(index,654) = - B_261;
jcb(index,655) = B_267+ B_451;
jcb(index,656) = B_452;
jcb(index,657) = 0.65*B_254;
jcb(index,658) = 0.956*B_262+ 0.5*B_264+ 2*B_266;
jcb(index,659) = - B_226- B_228- B_444;
jcb(index,660) = - B_227+ 0.65*B_255;
jcb(index,661) = 0.5*B_265;
jcb(index,662) = - B_229+ B_268;
jcb(index,663) = 0.956*B_263;
jcb(index,664) = 0;
jcb(index,665) = 0;
jcb(index,666) = 0.015*B_245;
jcb(index,667) = 0.16*B_222;
jcb(index,668) = B_184;
jcb(index,669) = - B_249- B_449;
jcb(index,670) = 0.02*B_196;
jcb(index,671) = 0;
jcb(index,672) = 0;
jcb(index,673) = - B_250;
jcb(index,674) = 0.02*B_197+ 0.16*B_223+ 0.015*B_246;
jcb(index,675) = B_185;
jcb(index,676) = 0;
jcb(index,677) = - B_294- B_457- B_484- B_500;
jcb(index,678) = B_488;
jcb(index,679) = - B_501;
jcb(index,680) = - B_295;
jcb(index,681) = B_489;
jcb(index,682) = B_290;
jcb(index,683) = B_291;
jcb(index,684) = - B_485;
jcb(index,685) = B_253;
jcb(index,686) = B_239;
jcb(index,687) = 0.1*B_254;
jcb(index,688) = B_228;
jcb(index,689) = - B_230- B_232- B_234- B_236- 2*B_238;
jcb(index,690) = 0.1*B_255;
jcb(index,691) = - B_237;
jcb(index,692) = B_229+ B_240;
jcb(index,693) = - B_233;
jcb(index,694) = - B_235;
jcb(index,695) = 0;
jcb(index,696) = - B_231;
jcb(index,697) = - B_394- B_396- B_398- B_407- B_409- B_411;
jcb(index,698) = - B_395- B_397;
jcb(index,699) = - B_408;
jcb(index,700) = - B_410;
jcb(index,701) = - B_412;
jcb(index,702) = - B_399;
jcb(index,703) = 0.5*B_198;
jcb(index,704) = 0.666667*B_136+ 0.666667*B_323+ 0.666667*B_364;
jcb(index,705) = B_182;
jcb(index,706) = - B_194- B_196;
jcb(index,707) = 0;
jcb(index,708) = 0.666667*B_137+ B_183+ 0.5*B_199;
jcb(index,709) = 0.666667*B_324;
jcb(index,710) = 0.666667*B_365;
jcb(index,711) = - B_197;
jcb(index,712) = 0;
jcb(index,713) = - B_195;
jcb(index,714) = - B_300- B_301- B_303- B_459- B_460- B_486- B_488- B_496;
jcb(index,715) = - B_497;
jcb(index,716) = - B_304;
jcb(index,717) = - B_489;
jcb(index,718) = - B_302;
jcb(index,719) = B_298;
jcb(index,720) = B_299;
jcb(index,721) = - B_487;
jcb(index,722) = B_132;
jcb(index,723) = 0.18*B_178;
jcb(index,724) = 0.3*B_146;
jcb(index,725) = 0.33*B_443;
jcb(index,726) = B_446;
jcb(index,727) = 0.12*B_218+ 0.28*B_222;
jcb(index,728) = 0.06*B_180;
jcb(index,729) = 0.33*B_449;
jcb(index,730) = 0;
jcb(index,731) = - B_138- B_140- B_142- B_144- B_168;
jcb(index,732) = - B_169;
jcb(index,733) = 0.06*B_181;
jcb(index,734) = - B_145+ 0.12*B_219;
jcb(index,735) = B_133+ 0.3*B_147+ 0.18*B_179;
jcb(index,736) = 0;
jcb(index,737) = 0;
jcb(index,738) = - B_141+ 0.28*B_223;
jcb(index,739) = - B_143;
jcb(index,740) = - B_139;
jcb(index,741) = B_345;
jcb(index,742) = B_494;
jcb(index,743) = 0;
jcb(index,744) = 0;
jcb(index,745) = - B_343- B_468- B_492- B_502;
jcb(index,746) = - B_493;
jcb(index,747) = B_358;
jcb(index,748) = B_346;
jcb(index,749) = 0;
jcb(index,750) = B_495;
jcb(index,751) = 0;
jcb(index,752) = - B_344;
jcb(index,753) = B_339+ B_359;
jcb(index,754) = 0;
jcb(index,755) = 0;
jcb(index,756) = B_340;
jcb(index,757) = - B_503;
jcb(index,758) = B_447;
jcb(index,759) = 0.7*B_146+ B_432;
jcb(index,760) = 0.33*B_443;
jcb(index,761) = 0.985*B_245;
jcb(index,762) = 0.12*B_218+ 0.28*B_222;
jcb(index,763) = 0.47*B_180;
jcb(index,764) = 0.33*B_449;
jcb(index,765) = 0.98*B_196;
jcb(index,766) = B_140+ B_142+ 0.75*B_144+ B_168;
jcb(index,767) = - B_148- B_150- B_325- B_366- B_433;
jcb(index,768) = B_169;
jcb(index,769) = 0.47*B_181;
jcb(index,770) = 0.75*B_145+ 0.12*B_219;
jcb(index,771) = 0.7*B_147- B_149;
jcb(index,772) = - B_326;
jcb(index,773) = - B_367;
jcb(index,774) = B_141+ 0.98*B_197+ 0.28*B_223+ 0.985*B_246;
jcb(index,775) = B_143- B_151;
jcb(index,776) = 0;
jcb(index,777) = - B_313;
jcb(index,778) = - B_327;
jcb(index,779) = - B_329;
jcb(index,780) = - B_319;
jcb(index,781) = - B_41- B_43+ B_418;
jcb(index,782) = - B_315;
jcb(index,783) = 0;
jcb(index,784) = - B_12;
jcb(index,785) = - B_108;
jcb(index,786) = 0;
jcb(index,787) = - B_0- B_4- B_13- B_33- B_39- B_42- B_44- B_109- B_314- B_316- B_320- B_328- B_330;
jcb(index,788) = 0;
jcb(index,789) = - B_5+ B_414;
jcb(index,790) = 0;
jcb(index,791) = 0;
jcb(index,792) = - B_34;
jcb(index,793) = 0;
jcb(index,794) = 0;
jcb(index,795) = 0;
jcb(index,796) = 0;
jcb(index,797) = 0;
jcb(index,798) = 2*B_448;
jcb(index,799) = B_171;
jcb(index,800) = B_447;
jcb(index,801) = B_441;
jcb(index,802) = B_177+ B_436;
jcb(index,803) = 0.25*B_445;
jcb(index,804) = B_446;
jcb(index,805) = B_438;
jcb(index,806) = 0;
jcb(index,807) = B_204+ 0.3*B_206;
jcb(index,808) = B_212+ B_440;
jcb(index,809) = 0.985*B_245;
jcb(index,810) = 0;
jcb(index,811) = 0.1*B_254;
jcb(index,812) = 0.23*B_180;
jcb(index,813) = B_439;
jcb(index,814) = 0;
jcb(index,815) = 0.1*B_226+ B_444;
jcb(index,816) = 0;
jcb(index,817) = 0.25*B_232+ 0.125*B_236;
jcb(index,818) = 0;
jcb(index,819) = - B_168;
jcb(index,820) = B_148+ B_150+ B_325+ B_366;
jcb(index,821) = - B_154- B_156- B_158- B_160- B_162- B_164- B_166- B_169- 2*B_170;
jcb(index,822) = 0.23*B_181+ 0.1*B_227+ 0.1*B_255;
jcb(index,823) = - B_165- B_167+ 0.3*B_207+ 0.125*B_237;
jcb(index,824) = B_149+ B_172+ B_213;
jcb(index,825) = B_326;
jcb(index,826) = B_367;
jcb(index,827) = - B_159+ B_205+ 0.25*B_233+ 0.985*B_246;
jcb(index,828) = - B_161;
jcb(index,829) = B_151- B_163;
jcb(index,830) = - B_155- B_157;
jcb(index,831) = 0.09*B_315;
jcb(index,832) = B_128;
jcb(index,833) = 0;
jcb(index,834) = B_12+ B_18+ B_280;
jcb(index,835) = 0.4*B_108+ 0.31*B_431;
jcb(index,836) = 0;
jcb(index,837) = 0;
jcb(index,838) = 0;
jcb(index,839) = 0;
jcb(index,840) = 0;
jcb(index,841) = 0;
jcb(index,842) = 0;
jcb(index,843) = 0;
jcb(index,844) = 0;
jcb(index,845) = B_13+ 0.4*B_109+ 0.09*B_316;
jcb(index,846) = 0;
jcb(index,847) = - B_8- B_10- B_24- B_26- B_28;
jcb(index,848) = - B_11;
jcb(index,849) = 0;
jcb(index,850) = B_14+ B_19+ B_129;
jcb(index,851) = B_281;
jcb(index,852) = B_416;
jcb(index,853) = 0;
jcb(index,854) = B_429;
jcb(index,855) = B_15;
jcb(index,856) = 0;
jcb(index,857) = 0;
jcb(index,858) = 0;
jcb(index,859) = - B_25- B_27- B_29;
jcb(index,860) = B_456;
jcb(index,861) = B_364;
jcb(index,862) = B_356;
jcb(index,863) = - B_500;
jcb(index,864) = B_409;
jcb(index,865) = - B_496;
jcb(index,866) = - B_492;
jcb(index,867) = B_366;
jcb(index,868) = 0;
jcb(index,869) = - B_341- B_493- B_497- B_501;
jcb(index,870) = 0;
jcb(index,871) = 0;
jcb(index,872) = - B_342;
jcb(index,873) = 0;
jcb(index,874) = 0;
jcb(index,875) = B_337+ B_354+ B_357+ B_365+ B_367+ B_410;
jcb(index,876) = B_355;
jcb(index,877) = 0;
jcb(index,878) = 0;
jcb(index,879) = 0;
jcb(index,880) = 0;
jcb(index,881) = 0;
jcb(index,882) = 0;
jcb(index,883) = B_338;
jcb(index,884) = 0;
jcb(index,885) = - B_403;
jcb(index,886) = - B_93;
jcb(index,887) = - B_79;
jcb(index,888) = - B_134;
jcb(index,889) = - B_254;
jcb(index,890) = - B_180;
jcb(index,891) = - B_226;
jcb(index,892) = 0;
jcb(index,893) = - B_4;
jcb(index,894) = B_156;
jcb(index,895) = - B_10;
jcb(index,896) = - B_5- B_6- B_11- B_16- B_22- B_45- B_51- B_80- B_94- B_135- B_181- B_227- B_255- B_271- B_331- B_404 - B_414- B_415;
jcb(index,897) = 0;
jcb(index,898) = - B_17;
jcb(index,899) = - B_272;
jcb(index,900) = 0;
jcb(index,901) = - B_332;
jcb(index,902) = 0;
jcb(index,903) = B_2- B_7;
jcb(index,904) = 0;
jcb(index,905) = - B_46;
jcb(index,906) = - B_52;
jcb(index,907) = 0;
jcb(index,908) = - B_23+ B_157;
jcb(index,909) = 0;
jcb(index,910) = B_480;
jcb(index,911) = B_471;
jcb(index,912) = B_434;
jcb(index,913) = 0.6*B_400;
jcb(index,914) = B_152;
jcb(index,915) = B_461;
jcb(index,916) = B_402;
jcb(index,917) = B_438;
jcb(index,918) = - B_190;
jcb(index,919) = - B_206;
jcb(index,920) = 0.75*B_108+ B_110+ B_305;
jcb(index,921) = - B_218;
jcb(index,922) = 0.7*B_122+ B_356;
jcb(index,923) = 0.08*B_254;
jcb(index,924) = 0.07*B_180;
jcb(index,925) = - B_264;
jcb(index,926) = - B_236;
jcb(index,927) = 0;
jcb(index,928) = - B_144+ 0.82*B_168;
jcb(index,929) = B_433;
jcb(index,930) = 0.75*B_109;
jcb(index,931) = B_158+ B_162- B_166+ 0.82*B_169+ 2*B_170;
jcb(index,932) = 0;
jcb(index,933) = 0.07*B_181+ 0.08*B_255;
jcb(index,934) = - B_114- B_116- B_118- 2*B_120- 2*B_121- B_145- B_167- B_191- B_207- B_219- B_237- B_265- B_311- B_358 - B_360;
jcb(index,935) = B_111+ 0.7*B_123+ B_153+ 0.6*B_401;
jcb(index,936) = B_306;
jcb(index,937) = 0;
jcb(index,938) = B_357;
jcb(index,939) = 0;
jcb(index,940) = 0;
jcb(index,941) = - B_359- B_361;
jcb(index,942) = - B_117+ B_159;
jcb(index,943) = - B_312;
jcb(index,944) = 0;
jcb(index,945) = - B_119+ B_163;
jcb(index,946) = - B_115;
jcb(index,947) = 0;
jcb(index,948) = - B_216;
jcb(index,949) = - B_370;
jcb(index,950) = - B_368;
jcb(index,951) = - B_77;
jcb(index,952) = - B_132;
jcb(index,953) = - B_178;
jcb(index,954) = - B_390;
jcb(index,955) = - B_362;
jcb(index,956) = - B_386;
jcb(index,957) = - B_388;
jcb(index,958) = - B_392;
jcb(index,959) = B_319- B_321;
jcb(index,960) = - B_173;
jcb(index,961) = - B_104;
jcb(index,962) = - B_214;
jcb(index,963) = - B_171+ B_434;
jcb(index,964) = - B_251;
jcb(index,965) = - B_400;
jcb(index,966) = B_451;
jcb(index,967) = - 0.5*B_198;
jcb(index,968) = - 0.2*B_247+ B_447;
jcb(index,969) = - 0.3*B_192+ B_437;
jcb(index,970) = - B_102;
jcb(index,971) = - 0.3*B_146+ B_432;
jcb(index,972) = - 0.3*B_208+ B_441;
jcb(index,973) = - B_75+ 0.333*B_426;
jcb(index,974) = - B_152;
jcb(index,975) = - B_317;
jcb(index,976) = - B_70+ B_424;
jcb(index,977) = - B_175;
jcb(index,978) = - B_130;
jcb(index,979) = - 0.15*B_224+ B_443;
jcb(index,980) = 0;
jcb(index,981) = - B_239+ B_445;
jcb(index,982) = 0;
jcb(index,983) = - B_241;
jcb(index,984) = - B_200;
jcb(index,985) = - B_96;
jcb(index,986) = - B_35+ 2*B_417;
jcb(index,987) = - B_112;
jcb(index,988) = - B_269;
jcb(index,989) = B_81+ B_85;
jcb(index,990) = 0;
jcb(index,991) = 0;
jcb(index,992) = - B_128;
jcb(index,993) = - B_212;
jcb(index,994) = B_12- B_18;
jcb(index,995) = 0.75*B_108- B_110;
jcb(index,996) = 0;
jcb(index,997) = - B_345;
jcb(index,998) = - B_72+ B_425;
jcb(index,999) = 0;
jcb(index,1000) = 0.13*B_134- B_136;
jcb(index,1001) = - 0.7*B_122+ B_309+ B_427;
jcb(index,1002) = 0;
jcb(index,1003) = 0.25*B_254- B_256;
jcb(index,1004) = 0.33*B_180- B_182;
jcb(index,1005) = - B_210;
jcb(index,1006) = 0;
jcb(index,1007) = 0.19*B_226- B_228;
jcb(index,1008) = - B_249;
jcb(index,1009) = - B_294+ B_457;
jcb(index,1010) = 0;
jcb(index,1011) = - B_394- B_396;
jcb(index,1012) = 0;
jcb(index,1013) = 0;
jcb(index,1014) = 0;
jcb(index,1015) = B_343+ B_468;
jcb(index,1016) = - B_148;
jcb(index,1017) = B_13+ 2*B_33+ 0.75*B_109+ B_320;
jcb(index,1018) = 0;
jcb(index,1019) = B_10+ 2*B_24;
jcb(index,1020) = - B_341;
jcb(index,1021) = B_11- B_16+ B_22+ 0.13*B_135+ 0.33*B_181+ 0.19*B_227+ 0.25*B_255;
jcb(index,1022) = 0;
jcb(index,1023) = - B_14- B_17- B_19- B_30- B_36- B_60- B_64- B_71- B_73- B_76- B_78- B_97- B_103- B_105- B_111- B_113- 0.7 *B_123- B_124- B_129- B_131- B_133- B_137- 0.3*B_147- B_149- B_153- B_172- B_174- B_176- B_179- B_183- 0.3 *B_193- 0.5*B_199- B_201- 0.3*B_209- B_211- B_213- B_215- B_217- 0.15*B_225- B_229- B_240- B_242- 0.2 *B_248- B_250- B_252- B_257- B_270- B_288- B_292- B_295- B_318- B_322- B_342- B_346- B_363- B_369- B_371 - B_387- B_389- B_391- B_393- B_395- B_397- B_401;
jcb(index,1024) = B_284+ B_310;
jcb(index,1025) = 2*B_34+ B_416;
jcb(index,1026) = 0;
jcb(index,1027) = - B_125;
jcb(index,1028) = - B_15+ B_20+ B_344;
jcb(index,1029) = 0;
jcb(index,1030) = - B_61+ B_62+ B_86;
jcb(index,1031) = - B_289;
jcb(index,1032) = - B_65;
jcb(index,1033) = B_68;
jcb(index,1034) = B_21+ B_23+ 2*B_25- B_31+ B_63+ B_69+ B_82+ B_285;
jcb(index,1035) = - B_293;
jcb(index,1036) = B_476;
jcb(index,1037) = 2*B_454;
jcb(index,1038) = 3*B_313+ 4*B_462;
jcb(index,1039) = B_327+ B_465;
jcb(index,1040) = 2*B_329+ B_464;
jcb(index,1041) = B_458;
jcb(index,1042) = B_477;
jcb(index,1043) = 2*B_478;
jcb(index,1044) = B_479;
jcb(index,1045) = 3*B_319+ 3*B_321+ 3*B_463;
jcb(index,1046) = 0.35*B_315+ B_317+ B_461;
jcb(index,1047) = B_374+ 2*B_453;
jcb(index,1048) = 0;
jcb(index,1049) = - B_286;
jcb(index,1050) = B_372- B_384+ B_475;
jcb(index,1051) = - B_280;
jcb(index,1052) = - B_305;
jcb(index,1053) = - B_376;
jcb(index,1054) = - B_323;
jcb(index,1055) = - B_309;
jcb(index,1056) = 0;
jcb(index,1057) = 0;
jcb(index,1058) = 0;
jcb(index,1059) = B_457;
jcb(index,1060) = - B_407;
jcb(index,1061) = - B_303+ B_459;
jcb(index,1062) = 0;
jcb(index,1063) = - B_325;
jcb(index,1064) = 3*B_314+ 0.35*B_316+ 3*B_320+ B_328+ 2*B_330;
jcb(index,1065) = 0;
jcb(index,1066) = 0;
jcb(index,1067) = 0;
jcb(index,1068) = - B_271;
jcb(index,1069) = B_311;
jcb(index,1070) = 0.94*B_288+ B_292+ B_318+ 3*B_322;
jcb(index,1071) = - B_272- B_281- B_282- B_284- B_287- B_304- B_306- B_307- B_310- B_324- B_326- B_377- B_385- B_408;
jcb(index,1072) = 0;
jcb(index,1073) = B_373+ B_375;
jcb(index,1074) = - B_308;
jcb(index,1075) = B_273;
jcb(index,1076) = B_380;
jcb(index,1077) = B_296;
jcb(index,1078) = B_274+ 2*B_276+ B_277+ 0.94*B_289+ B_297+ B_312+ B_381;
jcb(index,1079) = 0;
jcb(index,1080) = 0;
jcb(index,1081) = - B_283- B_285;
jcb(index,1082) = B_293+ B_456;
jcb(index,1083) = B_216;
jcb(index,1084) = B_370;
jcb(index,1085) = B_368;
jcb(index,1086) = B_77;
jcb(index,1087) = B_132;
jcb(index,1088) = B_178;
jcb(index,1089) = B_390;
jcb(index,1090) = B_362;
jcb(index,1091) = B_386;
jcb(index,1092) = B_388;
jcb(index,1093) = B_321;
jcb(index,1094) = B_104;
jcb(index,1095) = B_171;
jcb(index,1096) = B_198;
jcb(index,1097) = B_102;
jcb(index,1098) = B_75;
jcb(index,1099) = B_152;
jcb(index,1100) = B_317;
jcb(index,1101) = B_70;
jcb(index,1102) = B_175;
jcb(index,1103) = B_130;
jcb(index,1104) = 0.85*B_224;
jcb(index,1105) = - B_481;
jcb(index,1106) = 0;
jcb(index,1107) = B_200;
jcb(index,1108) = B_96;
jcb(index,1109) = B_35;
jcb(index,1110) = B_83+ B_87+ B_89;
jcb(index,1111) = 0;
jcb(index,1112) = B_18;
jcb(index,1113) = B_110+ 1.155*B_431;
jcb(index,1114) = B_72;
jcb(index,1115) = 0;
jcb(index,1116) = 0;
jcb(index,1117) = B_122;
jcb(index,1118) = - B_494;
jcb(index,1119) = 0;
jcb(index,1120) = 0;
jcb(index,1121) = 0;
jcb(index,1122) = B_249;
jcb(index,1123) = B_294+ B_484+ B_500;
jcb(index,1124) = 0;
jcb(index,1125) = 0;
jcb(index,1126) = - B_488;
jcb(index,1127) = 0;
jcb(index,1128) = B_492+ B_502;
jcb(index,1129) = B_148;
jcb(index,1130) = - B_33;
jcb(index,1131) = 0;
jcb(index,1132) = B_28;
jcb(index,1133) = B_341+ B_493+ B_501;
jcb(index,1134) = 0;
jcb(index,1135) = 0;
jcb(index,1136) = B_19+ B_30+ B_36+ B_71+ B_73+ B_76+ B_78+ B_97+ B_103+ B_105+ B_111+ B_123+ B_124+ B_131+ B_133+ B_149 + B_153+ B_172+ B_176+ B_179+ B_199+ B_201+ B_217+ 0.85*B_225+ B_250+ B_292+ B_295+ B_318+ B_322+ B_342 + B_363+ B_369+ B_371+ B_387+ B_389+ B_391;
jcb(index,1137) = 0;
jcb(index,1138) = - B_34- B_416- B_482- B_489- B_495;
jcb(index,1139) = 0;
jcb(index,1140) = B_125;
jcb(index,1141) = 0;
jcb(index,1142) = 0;
jcb(index,1143) = B_88;
jcb(index,1144) = 0;
jcb(index,1145) = B_90;
jcb(index,1146) = 0;
jcb(index,1147) = B_29+ B_31+ B_84;
jcb(index,1148) = B_293+ B_485+ B_503;
jcb(index,1149) = B_469;
jcb(index,1150) = B_476;
jcb(index,1151) = B_474;
jcb(index,1152) = 2*B_370+ 2*B_472;
jcb(index,1153) = 3*B_368+ 3*B_473;
jcb(index,1154) = B_390+ B_477;
jcb(index,1155) = B_362+ B_471;
jcb(index,1156) = B_386+ B_478;
jcb(index,1157) = 2*B_388+ 2*B_479;
jcb(index,1158) = - B_374;
jcb(index,1159) = - B_372+ B_384+ B_475;
jcb(index,1160) = B_345+ B_376+ 2*B_466;
jcb(index,1161) = - B_364;
jcb(index,1162) = - B_356;
jcb(index,1163) = - B_347+ 0.85*B_470;
jcb(index,1164) = 0;
jcb(index,1165) = - B_409+ B_411;
jcb(index,1166) = 0;
jcb(index,1167) = B_468;
jcb(index,1168) = - B_366;
jcb(index,1169) = 0;
jcb(index,1170) = B_341;
jcb(index,1171) = - B_331;
jcb(index,1172) = B_360;
jcb(index,1173) = B_342+ B_346+ B_363+ 3*B_369+ 2*B_371+ B_387+ 2*B_389+ B_391;
jcb(index,1174) = B_377+ B_385;
jcb(index,1175) = 0;
jcb(index,1176) = - B_332- B_337- B_348- B_354- B_357- B_365- B_367- B_373- B_375- B_410;
jcb(index,1177) = - B_355;
jcb(index,1178) = B_333;
jcb(index,1179) = B_334+ 2*B_335+ B_349+ B_361+ B_378+ B_380+ B_412+ B_467;
jcb(index,1180) = B_350;
jcb(index,1181) = B_379+ B_381;
jcb(index,1182) = 0;
jcb(index,1183) = 0;
jcb(index,1184) = - B_338;
jcb(index,1185) = 0;
jcb(index,1186) = B_173+ B_435;
jcb(index,1187) = B_400;
jcb(index,1188) = B_451;
jcb(index,1189) = B_441;
jcb(index,1190) = B_175;
jcb(index,1191) = 0.75*B_445;
jcb(index,1192) = B_112;
jcb(index,1193) = B_452;
jcb(index,1194) = 0.8*B_190;
jcb(index,1195) = B_204+ 0.8*B_206;
jcb(index,1196) = 0.25*B_108;
jcb(index,1197) = 0.68*B_218;
jcb(index,1198) = 1.13875*B_134;
jcb(index,1199) = 0.3*B_122+ B_309+ B_427;
jcb(index,1200) = 0.58*B_254;
jcb(index,1201) = 0.57*B_180;
jcb(index,1202) = B_439;
jcb(index,1203) = 0.956*B_262+ 1.25*B_264+ B_266;
jcb(index,1204) = B_444;
jcb(index,1205) = 0.75*B_232+ 1.125*B_236+ 0.5*B_238;
jcb(index,1206) = B_394+ B_398+ B_407+ B_409;
jcb(index,1207) = 0.98*B_196;
jcb(index,1208) = 0.75*B_144;
jcb(index,1209) = 0.25*B_109;
jcb(index,1210) = B_164+ B_166;
jcb(index,1211) = 0;
jcb(index,1212) = 1.13875*B_135+ 0.57*B_181+ 0.58*B_255;
jcb(index,1213) = B_116+ B_118+ 2*B_120+ B_121+ 0.75*B_145+ B_165+ B_167+ 0.8*B_191+ 0.8*B_207+ 0.68*B_219+ 1.125*B_237 + 1.25*B_265+ B_311+ B_358+ B_360;
jcb(index,1214) = B_113+ 0.3*B_123- B_124+ B_174+ B_176+ B_395+ B_401;
jcb(index,1215) = - B_307+ B_310+ B_408;
jcb(index,1216) = 0;
jcb(index,1217) = - B_354+ B_410;
jcb(index,1218) = - B_125- B_126- B_308- B_355- B_428- B_429;
jcb(index,1219) = 0;
jcb(index,1220) = B_359+ B_361;
jcb(index,1221) = B_117+ 0.98*B_197+ B_205+ 0.75*B_233+ 0.956*B_263;
jcb(index,1222) = B_312;
jcb(index,1223) = 0;
jcb(index,1224) = B_119- B_127+ B_399;
jcb(index,1225) = 0;
jcb(index,1226) = 0;
jcb(index,1227) = B_455;
jcb(index,1228) = B_37+ B_47+ B_53;
jcb(index,1229) = 0.1*B_315;
jcb(index,1230) = - B_301;
jcb(index,1231) = - B_343;
jcb(index,1232) = B_0+ B_39+ 0.1*B_316;
jcb(index,1233) = B_28;
jcb(index,1234) = 0;
jcb(index,1235) = - B_6+ B_415;
jcb(index,1236) = 0;
jcb(index,1237) = - B_14;
jcb(index,1238) = 0;
jcb(index,1239) = 0;
jcb(index,1240) = 0;
jcb(index,1241) = 0;
jcb(index,1242) = - B_2- B_7- B_15- B_20- B_49- B_273- B_302- B_333- B_344;
jcb(index,1243) = - B_334+ B_467;
jcb(index,1244) = B_48+ B_420;
jcb(index,1245) = - B_274;
jcb(index,1246) = - B_50+ B_54+ B_419;
jcb(index,1247) = B_421;
jcb(index,1248) = - B_21+ B_29;
jcb(index,1249) = 0;
jcb(index,1250) = B_353+ 0.15*B_470;
jcb(index,1251) = - B_411;
jcb(index,1252) = B_343;
jcb(index,1253) = 0;
jcb(index,1254) = B_331;
jcb(index,1255) = - B_358- B_360;
jcb(index,1256) = 0;
jcb(index,1257) = 0;
jcb(index,1258) = 0;
jcb(index,1259) = B_332;
jcb(index,1260) = 0;
jcb(index,1261) = - B_333+ B_344;
jcb(index,1262) = - B_334- 2*B_335- 2*B_336- B_339- B_349- B_351- B_359- B_361- B_378- B_380- B_382- B_412- B_467;
jcb(index,1263) = - B_350;
jcb(index,1264) = - B_379- B_381- B_383;
jcb(index,1265) = - B_352;
jcb(index,1266) = 0;
jcb(index,1267) = - B_340;
jcb(index,1268) = 0;
jcb(index,1269) = B_37- B_47;
jcb(index,1270) = 2*B_41;
jcb(index,1271) = B_98;
jcb(index,1272) = B_424;
jcb(index,1273) = 0;
jcb(index,1274) = B_96+ B_99+ B_100+ B_106;
jcb(index,1275) = - B_85- B_87+ B_91;
jcb(index,1276) = - B_188;
jcb(index,1277) = - B_204;
jcb(index,1278) = - B_245;
jcb(index,1279) = - B_222;
jcb(index,1280) = - B_262;
jcb(index,1281) = 0;
jcb(index,1282) = - B_232;
jcb(index,1283) = - B_196;
jcb(index,1284) = - B_140;
jcb(index,1285) = 2*B_42;
jcb(index,1286) = - B_158;
jcb(index,1287) = 0;
jcb(index,1288) = - B_45;
jcb(index,1289) = - B_116;
jcb(index,1290) = - B_60+ B_97;
jcb(index,1291) = 0;
jcb(index,1292) = 0;
jcb(index,1293) = 0;
jcb(index,1294) = 0;
jcb(index,1295) = B_49;
jcb(index,1296) = - B_349;
jcb(index,1297) = - B_46- B_48- B_55- B_61- B_62- B_86- B_88- B_117- B_141- B_159- B_189- B_197- B_205- B_223- B_233- B_246 - B_263- B_296- B_350- B_420;
jcb(index,1298) = - B_297;
jcb(index,1299) = B_50+ B_92+ B_101+ B_419;
jcb(index,1300) = - B_56+ B_422;
jcb(index,1301) = - B_63;
jcb(index,1302) = 0;
jcb(index,1303) = 2*B_279;
jcb(index,1304) = B_313;
jcb(index,1305) = B_327;
jcb(index,1306) = B_329;
jcb(index,1307) = B_455;
jcb(index,1308) = 0.46*B_315;
jcb(index,1309) = B_294;
jcb(index,1310) = B_300+ B_301+ B_460;
jcb(index,1311) = B_314+ 0.46*B_316+ B_328+ B_330;
jcb(index,1312) = 0;
jcb(index,1313) = 0;
jcb(index,1314) = B_271;
jcb(index,1315) = - B_311;
jcb(index,1316) = - B_288+ B_295;
jcb(index,1317) = B_272+ B_284;
jcb(index,1318) = 0;
jcb(index,1319) = 0;
jcb(index,1320) = 0;
jcb(index,1321) = - B_273+ B_302;
jcb(index,1322) = - B_378- B_380- B_382;
jcb(index,1323) = - B_296;
jcb(index,1324) = - B_274- 2*B_275- 2*B_276- 2*B_277- 2*B_278- B_289- B_290- B_297- B_298- B_312- B_379- B_381- B_383;
jcb(index,1325) = - B_299;
jcb(index,1326) = 0;
jcb(index,1327) = B_285- B_291;
jcb(index,1328) = 0;
jcb(index,1329) = B_469;
jcb(index,1330) = B_458;
jcb(index,1331) = B_173+ B_435;
jcb(index,1332) = - B_53;
jcb(index,1333) = B_214+ B_442;
jcb(index,1334) = B_251+ B_253+ B_450;
jcb(index,1335) = B_74+ B_75+ 0.667*B_426;
jcb(index,1336) = B_70;
jcb(index,1337) = B_175+ B_177+ B_436;
jcb(index,1338) = B_59+ B_423;
jcb(index,1339) = - B_100;
jcb(index,1340) = B_452;
jcb(index,1341) = - B_89- B_91;
jcb(index,1342) = 0.96*B_188;
jcb(index,1343) = B_204;
jcb(index,1344) = 0.985*B_245;
jcb(index,1345) = B_425;
jcb(index,1346) = 0.84*B_222;
jcb(index,1347) = B_353+ 0.15*B_470;
jcb(index,1348) = 0;
jcb(index,1349) = 0.956*B_262;
jcb(index,1350) = B_249+ B_449;
jcb(index,1351) = B_232- B_234;
jcb(index,1352) = 0;
jcb(index,1353) = 0.98*B_196;
jcb(index,1354) = B_300+ B_460;
jcb(index,1355) = B_140+ B_142;
jcb(index,1356) = 0;
jcb(index,1357) = B_158- B_160+ B_162;
jcb(index,1358) = 0;
jcb(index,1359) = B_45- B_51;
jcb(index,1360) = B_116+ B_118;
jcb(index,1361) = - B_64+ B_71+ B_76+ B_174+ B_176+ B_215+ B_250+ B_252;
jcb(index,1362) = 0;
jcb(index,1363) = 0;
jcb(index,1364) = 0;
jcb(index,1365) = 0;
jcb(index,1366) = - B_49;
jcb(index,1367) = B_349- B_351;
jcb(index,1368) = B_46+ 2*B_55+ B_62+ B_117+ B_141+ B_159+ 0.96*B_189+ 0.98*B_197+ B_205+ 0.84*B_223+ B_233+ 0.985*B_246 + 0.956*B_263+ B_296+ B_350;
jcb(index,1369) = B_297- B_298;
jcb(index,1370) = - B_50- B_52- B_54- B_57- B_65- B_66- B_90- B_92- B_101- B_161- B_235- B_299- B_352- B_419;
jcb(index,1371) = 2*B_56- B_58+ B_68+ B_119+ B_143+ B_163+ B_421;
jcb(index,1372) = B_63- B_67+ B_69;
jcb(index,1373) = 0;
jcb(index,1374) = 0.333*B_426;
jcb(index,1375) = B_59+ B_423;
jcb(index,1376) = B_72;
jcb(index,1377) = B_347+ 0.85*B_470;
jcb(index,1378) = - B_258;
jcb(index,1379) = - B_184;
jcb(index,1380) = - B_398;
jcb(index,1381) = B_301+ B_303+ B_459;
jcb(index,1382) = - B_142;
jcb(index,1383) = - B_150;
jcb(index,1384) = - B_162;
jcb(index,1385) = 0;
jcb(index,1386) = B_51;
jcb(index,1387) = - B_118;
jcb(index,1388) = B_73;
jcb(index,1389) = B_304;
jcb(index,1390) = 0;
jcb(index,1391) = B_348;
jcb(index,1392) = - B_126;
jcb(index,1393) = B_302;
jcb(index,1394) = 0;
jcb(index,1395) = - B_55;
jcb(index,1396) = 0;
jcb(index,1397) = B_52- B_57;
jcb(index,1398) = - B_56- B_58- B_68- B_119- B_127- B_143- B_151- B_163- B_185- B_259- B_399- B_421- B_422;
jcb(index,1399) = - B_69;
jcb(index,1400) = 0;
jcb(index,1401) = - B_405;
jcb(index,1402) = B_392;
jcb(index,1403) = B_442;
jcb(index,1404) = 0.4*B_400;
jcb(index,1405) = B_451;
jcb(index,1406) = B_437;
jcb(index,1407) = B_432;
jcb(index,1408) = B_74+ 0.667*B_426;
jcb(index,1409) = B_130;
jcb(index,1410) = 0.67*B_443;
jcb(index,1411) = 0;
jcb(index,1412) = 0.75*B_445;
jcb(index,1413) = B_106;
jcb(index,1414) = B_35+ B_286;
jcb(index,1415) = B_112;
jcb(index,1416) = B_452;
jcb(index,1417) = - B_81- B_83+ B_85;
jcb(index,1418) = - B_186+ 0.96*B_188+ 0.8*B_190;
jcb(index,1419) = - B_202+ 0.3*B_206;
jcb(index,1420) = B_440;
jcb(index,1421) = - B_243;
jcb(index,1422) = 1.23*B_218- B_220+ 0.56*B_222;
jcb(index,1423) = 0.13*B_134;
jcb(index,1424) = B_427;
jcb(index,1425) = 0.25*B_254;
jcb(index,1426) = 0.26*B_180;
jcb(index,1427) = B_210+ B_439;
jcb(index,1428) = - B_260+ 0.956*B_262+ B_264+ B_266;
jcb(index,1429) = 0.32*B_226+ B_444;
jcb(index,1430) = 0.67*B_449;
jcb(index,1431) = - B_230+ 0.75*B_232+ 0.875*B_236+ B_238;
jcb(index,1432) = B_396;
jcb(index,1433) = - B_194+ 0.98*B_196;
jcb(index,1434) = - B_138+ B_140+ B_142+ B_144+ 0.82*B_168;
jcb(index,1435) = B_433;
jcb(index,1436) = - B_154- B_156+ B_164+ 0.82*B_169;
jcb(index,1437) = B_8- B_24- B_26- B_28;
jcb(index,1438) = B_16- B_22+ 0.13*B_135+ 0.26*B_181+ 0.32*B_227+ 0.25*B_255;
jcb(index,1439) = - B_114+ B_116+ B_118+ 2*B_120+ B_145+ B_165+ 0.8*B_191+ 0.3*B_207+ 1.23*B_219+ 0.875*B_237+ B_265+ B_311 + B_360;
jcb(index,1440) = B_17- B_30+ B_36+ B_113+ B_124+ B_131+ B_211+ 0.94*B_288+ B_393+ B_397+ 0.4*B_401;
jcb(index,1441) = - B_282- B_284+ B_287+ B_307;
jcb(index,1442) = 0;
jcb(index,1443) = - B_337+ B_354;
jcb(index,1444) = B_125+ B_126+ B_308+ B_355+ B_429;
jcb(index,1445) = - B_20;
jcb(index,1446) = - B_339+ B_361;
jcb(index,1447) = - B_62+ B_86+ B_117+ B_141+ 0.96*B_189+ 0.98*B_197+ 0.56*B_223+ 0.75*B_233+ 0.956*B_263;
jcb(index,1448) = 0.94*B_289- B_290+ B_312;
jcb(index,1449) = - B_66;
jcb(index,1450) = - B_68+ B_119+ B_127+ B_143;
jcb(index,1451) = - B_21- B_23- B_25- B_27- B_29- B_31- 2*B_32- B_63- B_67- B_69- B_82- B_84- B_115- B_139- B_155- B_157 - B_187- B_195- B_203- B_221- B_231- B_244- B_261- B_283- B_285- B_291- B_338- B_340- B_406;
jcb(index,1452) = 0;
jcb(index,1453) = - B_490;
jcb(index,1454) = B_286;
jcb(index,1455) = B_280;
jcb(index,1456) = B_305;
jcb(index,1457) = B_323;
jcb(index,1458) = B_309;
jcb(index,1459) = - B_498;
jcb(index,1460) = 0;
jcb(index,1461) = 0;
jcb(index,1462) = - B_484;
jcb(index,1463) = B_407;
jcb(index,1464) = - B_486;
jcb(index,1465) = - B_502;
jcb(index,1466) = B_325;
jcb(index,1467) = 0;
jcb(index,1468) = 0;
jcb(index,1469) = 0;
jcb(index,1470) = 0;
jcb(index,1471) = 0;
jcb(index,1472) = 0;
jcb(index,1473) = 0.06*B_288- B_292;
jcb(index,1474) = B_281+ B_282+ B_287+ B_306+ B_307+ B_310+ B_324+ B_326+ B_408;
jcb(index,1475) = 0;
jcb(index,1476) = 0;
jcb(index,1477) = B_308;
jcb(index,1478) = 0;
jcb(index,1479) = 0;
jcb(index,1480) = 0;
jcb(index,1481) = 0.06*B_289;
jcb(index,1482) = 0;
jcb(index,1483) = 0;
jcb(index,1484) = B_283;
jcb(index,1485) = - B_293- B_456- B_485- B_487- B_491- B_499- B_503;
}
__device__ void Fun(double *var, const double * __restrict__ fix, const double * __restrict__ rconst, double *varDot, int &Nfun, const int VL_GLO){
int index = blockIdx.x*blockDim.x+threadIdx.x;
Nfun++;
double dummy, A_0, A_1, A_2, A_3, A_4, A_5, A_6, A_7, A_8, A_9, A_10, A_11, A_12, A_13, A_14, A_15, A_16, A_17, A_18, A_19, A_20, A_21, A_22, A_23, A_24, A_25, A_26, A_27, A_28, A_29, A_30, A_31, A_32, A_33, A_34, A_35, A_36, A_37, A_38, A_39, A_40, A_41, A_42, A_43, A_44, A_45, A_46, A_47, A_48, A_49, A_50, A_51, A_52, A_53, A_54, A_55, A_56, A_57, A_58, A_59, A_60, A_61, A_62, A_63, A_64, A_65, A_66, A_67, A_68, A_69, A_70, A_71, A_72, A_73, A_74, A_75, A_76, A_77, A_78, A_79, A_80, A_81, A_82, A_83, A_84, A_85, A_86, A_87, A_88, A_89, A_90, A_91, A_92, A_93, A_94, A_95, A_96, A_97, A_98, A_99, A_100, A_101, A_102, A_103, A_104, A_105, A_106, A_107, A_108, A_109, A_110, A_111, A_112, A_113, A_114, A_115, A_116, A_117, A_118, A_119, A_120, A_121, A_122, A_123, A_124, A_125, A_126, A_127, A_128, A_129, A_130, A_131, A_132, A_133, A_134, A_135, A_136, A_137, A_138, A_139, A_140, A_141, A_142, A_143, A_144, A_145, A_146, A_147, A_148, A_149, A_150, A_151, A_152, A_153, A_154, A_155, A_156, A_157, A_158, A_159, A_160, A_161, A_162, A_163, A_164, A_165, A_166, A_167, A_168, A_169, A_170, A_171, A_172, A_173, A_174, A_175, A_176, A_177, A_178, A_179, A_180, A_181, A_182, A_183, A_184, A_185, A_186, A_187, A_188, A_189, A_190, A_191, A_192, A_193, A_194, A_195, A_196, A_197, A_198, A_199, A_200, A_201, A_202, A_203, A_204, A_205, A_206, A_207, A_208, A_209, A_210, A_211, A_212, A_213, A_214, A_215, A_216, A_217, A_218, A_219, A_220, A_221, A_222, A_223, A_224, A_225, A_226, A_227, A_228, A_229, A_230, A_231, A_232, A_233, A_234, A_235, A_236, A_237, A_238, A_239, A_240, A_241, A_242, A_243, A_244, A_245, A_246, A_247, A_248, A_249, A_250, A_251, A_252, A_253, A_254, A_255, A_256, A_257, A_258, A_259, A_260, A_261, A_262, A_263, A_264, A_265, A_266, A_267, A_268, A_269, A_270, A_271, A_272, A_273, A_274, A_275, A_276, A_277, A_278, A_279, A_280, A_281, A_282, A_283, A_284, A_285, A_286, A_287, A_288, A_289, A_290, A_291, A_292, A_293, A_294, A_295, A_296, A_297, A_298, A_299, A_300, A_301, A_302, A_303, A_304, A_305, A_306, A_307, A_308, A_309;
{
A_0 = rconst(index,0)*var(index,120)*fix(index,0);
A_1 = rconst(index,1)*var(index,131)*fix(index,0);
A_2 = 1.2e-10*var(index,120)*var(index,124);
A_3 = rconst(index,3)*var(index,124)*var(index,131);
A_4 = rconst(index,4)*var(index,122)*fix(index,0);
A_5 = rconst(index,5)*var(index,122)*var(index,124);
A_6 = 1.2e-10*var(index,97)*var(index,120);
A_7 = rconst(index,7)*var(index,126)*var(index,131);
A_8 = rconst(index,8)*var(index,124)*var(index,126);
A_9 = rconst(index,9)*var(index,97)*var(index,126);
A_10 = rconst(index,10)*var(index,131)*var(index,137);
A_11 = rconst(index,11)*var(index,124)*var(index,137);
A_12 = 7.2e-11*var(index,122)*var(index,137);
A_13 = 6.9e-12*var(index,122)*var(index,137);
A_14 = 1.6e-12*var(index,122)*var(index,137);
A_15 = rconst(index,15)*var(index,126)*var(index,137);
A_16 = rconst(index,16)*var(index,137)*var(index,137);
A_17 = rconst(index,17)*var(index,120)*var(index,128);
A_18 = 1.8e-12*var(index,88)*var(index,126);
A_19 = rconst(index,19)*var(index,59)*fix(index,0);
A_20 = rconst(index,20)*var(index,120)*fix(index,1);
A_21 = rconst(index,21)*var(index,60)*var(index,120);
A_22 = rconst(index,22)*var(index,60)*var(index,120);
A_23 = rconst(index,23)*var(index,124)*var(index,133);
A_24 = rconst(index,24)*var(index,59)*var(index,133);
A_25 = rconst(index,25)*var(index,131)*var(index,135);
A_26 = rconst(index,26)*var(index,124)*var(index,135);
A_27 = rconst(index,27)*var(index,59)*var(index,135);
A_28 = rconst(index,28)*var(index,133)*var(index,136);
A_29 = rconst(index,29)*var(index,135)*var(index,136);
A_30 = rconst(index,30)*var(index,83);
A_31 = rconst(index,31)*var(index,126)*var(index,133);
A_32 = rconst(index,32)*var(index,133)*var(index,137);
A_33 = rconst(index,33)*var(index,126)*var(index,135);
A_34 = rconst(index,34)*var(index,135)*var(index,137);
A_35 = 3.5e-12*var(index,136)*var(index,137);
A_36 = rconst(index,36)*var(index,76)*var(index,126);
A_37 = rconst(index,37)*var(index,101)*var(index,126);
A_38 = rconst(index,38)*var(index,73);
A_39 = rconst(index,39)*var(index,73)*var(index,126);
A_40 = rconst(index,40)*var(index,47)*var(index,126);
A_41 = rconst(index,41)*var(index,92)*var(index,124);
A_42 = rconst(index,42)*var(index,92)*var(index,137);
A_43 = rconst(index,43)*var(index,92)*var(index,137);
A_44 = rconst(index,44)*var(index,92)*var(index,133);
A_45 = rconst(index,45)*var(index,92)*var(index,133);
A_46 = rconst(index,46)*var(index,92)*var(index,135);
A_47 = rconst(index,47)*var(index,92)*var(index,135);
A_48 = 1.2e-14*var(index,84)*var(index,124);
A_49 = 1300*var(index,84);
A_50 = rconst(index,50)*var(index,87)*var(index,126);
A_51 = rconst(index,51)*var(index,70)*var(index,87);
A_52 = rconst(index,52)*var(index,87)*var(index,135);
A_53 = 1.66e-12*var(index,70)*var(index,126);
A_54 = rconst(index,54)*var(index,61)*var(index,126);
A_55 = rconst(index,55)*var(index,87)*fix(index,0);
A_56 = 1.75e-10*var(index,98)*var(index,120);
A_57 = rconst(index,57)*var(index,98)*var(index,126);
A_58 = rconst(index,58)*var(index,89)*var(index,126);
A_59 = rconst(index,59)*var(index,125)*var(index,137);
A_60 = rconst(index,60)*var(index,125)*var(index,133);
A_61 = 1.3e-12*var(index,125)*var(index,136);
A_62 = rconst(index,62)*var(index,125)*var(index,125);
A_63 = rconst(index,63)*var(index,125)*var(index,125);
A_64 = rconst(index,64)*var(index,104)*var(index,126);
A_65 = rconst(index,65)*var(index,126)*var(index,130);
A_66 = rconst(index,66)*var(index,130)*var(index,136);
A_67 = rconst(index,67)*var(index,95)*var(index,126);
A_68 = 4e-13*var(index,78)*var(index,126);
A_69 = rconst(index,69)*var(index,48)*var(index,126);
A_70 = rconst(index,70)*var(index,103)*var(index,124);
A_71 = rconst(index,71)*var(index,103)*var(index,126);
A_72 = rconst(index,72)*var(index,117)*var(index,137);
A_73 = rconst(index,73)*var(index,117)*var(index,133);
A_74 = 2.3e-12*var(index,117)*var(index,136);
A_75 = rconst(index,75)*var(index,117)*var(index,125);
A_76 = rconst(index,76)*var(index,71)*var(index,126);
A_77 = rconst(index,77)*var(index,119)*var(index,126);
A_78 = rconst(index,78)*var(index,119)*var(index,136);
A_79 = rconst(index,79)*var(index,74)*var(index,126);
A_80 = rconst(index,80)*var(index,121)*var(index,137);
A_81 = rconst(index,81)*var(index,121)*var(index,137);
A_82 = rconst(index,82)*var(index,121)*var(index,133);
A_83 = rconst(index,83)*var(index,121)*var(index,135);
A_84 = 4e-12*var(index,121)*var(index,136);
A_85 = rconst(index,85)*var(index,121)*var(index,125);
A_86 = rconst(index,86)*var(index,121)*var(index,125);
A_87 = rconst(index,87)*var(index,117)*var(index,121);
A_88 = rconst(index,88)*var(index,121)*var(index,121);
A_89 = rconst(index,89)*var(index,63)*var(index,126);
A_90 = rconst(index,90)*var(index,58)*var(index,126);
A_91 = rconst(index,91)*var(index,77)*var(index,126);
A_92 = rconst(index,92)*var(index,77);
A_93 = rconst(index,93)*var(index,49)*var(index,126);
A_94 = rconst(index,94)*var(index,107)*var(index,124);
A_95 = rconst(index,95)*var(index,107)*var(index,126);
A_96 = rconst(index,96)*var(index,107)*var(index,136);
A_97 = rconst(index,97)*var(index,93)*var(index,137);
A_98 = rconst(index,98)*var(index,93)*var(index,133);
A_99 = rconst(index,99)*var(index,93)*var(index,125);
A_100 = rconst(index,100)*var(index,69)*var(index,126);
A_101 = rconst(index,101)*var(index,115)*var(index,137);
A_102 = rconst(index,102)*var(index,115)*var(index,133);
A_103 = rconst(index,103)*var(index,67)*var(index,126);
A_104 = rconst(index,104)*var(index,86)*var(index,126);
A_105 = rconst(index,105)*var(index,94)*var(index,137);
A_106 = rconst(index,106)*var(index,94)*var(index,133);
A_107 = rconst(index,107)*var(index,94)*var(index,125);
A_108 = rconst(index,108)*var(index,72)*var(index,126);
A_109 = rconst(index,109)*var(index,108)*var(index,126);
A_110 = rconst(index,110)*var(index,96)*var(index,126);
A_111 = rconst(index,111)*var(index,62)*var(index,126);
A_112 = rconst(index,112)*var(index,40)*var(index,126);
A_113 = rconst(index,113)*var(index,102)*var(index,125);
A_114 = rconst(index,114)*var(index,102)*var(index,137);
A_115 = rconst(index,115)*var(index,102)*var(index,133);
A_116 = rconst(index,116)*var(index,79)*var(index,126);
A_117 = rconst(index,117)*var(index,110)*var(index,124);
A_118 = rconst(index,118)*var(index,110)*var(index,126);
A_119 = rconst(index,119)*var(index,113)*var(index,137);
A_120 = rconst(index,120)*var(index,113)*var(index,133);
A_121 = rconst(index,121)*var(index,113)*var(index,135);
A_122 = 2e-12*var(index,113)*var(index,125);
A_123 = 2e-12*var(index,113)*var(index,113);
A_124 = 3e-11*var(index,82)*var(index,126);
A_125 = rconst(index,125)*var(index,85)*var(index,126);
A_126 = rconst(index,126)*var(index,99)*var(index,137);
A_127 = rconst(index,127)*var(index,99)*var(index,133);
A_128 = rconst(index,128)*var(index,68)*var(index,126);
A_129 = 1.7e-12*var(index,111)*var(index,126);
A_130 = 3.2e-11*var(index,64)*var(index,126);
A_131 = rconst(index,131)*var(index,64);
A_132 = rconst(index,132)*var(index,106)*var(index,124);
A_133 = rconst(index,133)*var(index,106)*var(index,126);
A_134 = rconst(index,134)*var(index,106)*var(index,136);
A_135 = rconst(index,135)*var(index,109)*var(index,137);
A_136 = rconst(index,136)*var(index,109)*var(index,133);
A_137 = 2e-12*var(index,109)*var(index,125);
A_138 = 2e-12*var(index,109)*var(index,109);
A_139 = 1e-10*var(index,66)*var(index,126);
A_140 = 1.3e-11*var(index,91)*var(index,126);
A_141 = rconst(index,141)*var(index,124)*var(index,127);
A_142 = rconst(index,142)*var(index,131)*var(index,134);
A_143 = rconst(index,143)*var(index,134)*var(index,134);
A_144 = rconst(index,144)*var(index,134)*var(index,134);
A_145 = rconst(index,145)*var(index,134)*var(index,134);
A_146 = rconst(index,146)*var(index,134)*var(index,134);
A_147 = rconst(index,147)*var(index,39);
A_148 = rconst(index,148)*var(index,97)*var(index,127);
A_149 = rconst(index,149)*var(index,127)*var(index,137);
A_150 = rconst(index,150)*var(index,127)*var(index,137);
A_151 = rconst(index,151)*var(index,88)*var(index,127);
A_152 = rconst(index,152)*var(index,126)*var(index,134);
A_153 = rconst(index,153)*var(index,134)*var(index,137);
A_154 = rconst(index,154)*var(index,126)*var(index,138);
A_155 = rconst(index,155)*var(index,112)*var(index,126);
A_156 = rconst(index,156)*var(index,133)*var(index,134);
A_157 = rconst(index,157)*var(index,134)*var(index,135);
A_158 = rconst(index,158)*var(index,116);
A_159 = rconst(index,159)*var(index,116)*var(index,131);
A_160 = rconst(index,160)*var(index,116)*var(index,127);
A_161 = rconst(index,161)*var(index,98)*var(index,127);
A_162 = rconst(index,162)*var(index,127)*var(index,130);
A_163 = 5.9e-11*var(index,104)*var(index,127);
A_164 = rconst(index,164)*var(index,125)*var(index,134);
A_165 = 3.3e-10*var(index,41)*var(index,120);
A_166 = 1.65e-10*var(index,75)*var(index,120);
A_167 = rconst(index,167)*var(index,75)*var(index,126);
A_168 = 3.25e-10*var(index,57)*var(index,120);
A_169 = rconst(index,169)*var(index,57)*var(index,126);
A_170 = rconst(index,170)*var(index,103)*var(index,127);
A_171 = 8e-11*var(index,119)*var(index,127);
A_172 = 1.4e-10*var(index,42)*var(index,120);
A_173 = 2.3e-10*var(index,43)*var(index,120);
A_174 = rconst(index,174)*var(index,124)*var(index,129);
A_175 = rconst(index,175)*var(index,131)*var(index,132);
A_176 = 2.7e-12*var(index,132)*var(index,132);
A_177 = rconst(index,177)*var(index,132)*var(index,132);
A_178 = rconst(index,178)*var(index,129)*var(index,137);
A_179 = rconst(index,179)*var(index,132)*var(index,137);
A_180 = rconst(index,180)*var(index,123)*var(index,126);
A_181 = rconst(index,181)*var(index,118)*var(index,131);
A_182 = rconst(index,182)*var(index,100)*var(index,126);
A_183 = 4.9e-11*var(index,105)*var(index,129);
A_184 = rconst(index,184)*var(index,132)*var(index,133);
A_185 = rconst(index,185)*var(index,132)*var(index,135);
A_186 = rconst(index,186)*var(index,105);
A_187 = rconst(index,187)*var(index,129)*var(index,130);
A_188 = rconst(index,188)*var(index,104)*var(index,129);
A_189 = rconst(index,189)*var(index,125)*var(index,132);
A_190 = rconst(index,190)*var(index,125)*var(index,132);
A_191 = rconst(index,191)*var(index,53)*var(index,126);
A_192 = rconst(index,192)*var(index,103)*var(index,129);
A_193 = rconst(index,193)*var(index,119)*var(index,129);
A_194 = rconst(index,194)*var(index,45)*var(index,126);
A_195 = rconst(index,195)*var(index,44)*var(index,126);
A_196 = 3.32e-15*var(index,90)*var(index,129);
A_197 = 1.1e-15*var(index,80)*var(index,129);
A_198 = rconst(index,198)*var(index,100)*var(index,127);
A_199 = rconst(index,199)*var(index,132)*var(index,134);
A_200 = rconst(index,200)*var(index,132)*var(index,134);
A_201 = rconst(index,201)*var(index,132)*var(index,134);
A_202 = 1.45e-11*var(index,90)*var(index,127);
A_203 = rconst(index,203)*var(index,54)*var(index,126);
A_204 = rconst(index,204)*var(index,55)*var(index,126);
A_205 = rconst(index,205)*var(index,52)*var(index,126);
A_206 = rconst(index,206)*var(index,56)*var(index,126);
A_207 = rconst(index,207)*var(index,114)*var(index,126);
A_208 = rconst(index,208)*var(index,114)*var(index,126);
A_209 = rconst(index,209)*var(index,114)*var(index,136);
A_210 = 1e-10*var(index,65)*var(index,126);
A_211 = rconst(index,211)*var(index,81);
A_212 = 3e-13*var(index,81)*var(index,124);
A_213 = 5e-11*var(index,46)*var(index,137);
A_214 = 3.3e-10*var(index,114)*var(index,127);
A_215 = rconst(index,215)*var(index,114)*var(index,129);
A_216 = 4.4e-13*var(index,114)*var(index,132);
A_217 = rconst(index,217)*fix(index,0);
A_218 = rconst(index,218)*var(index,124);
A_219 = rconst(index,219)*var(index,124);
A_220 = rconst(index,220)*var(index,128);
A_221 = rconst(index,221)*var(index,88);
A_222 = rconst(index,222)*var(index,60);
A_223 = rconst(index,223)*var(index,135);
A_224 = rconst(index,224)*var(index,133);
A_225 = rconst(index,225)*var(index,136);
A_226 = rconst(index,226)*var(index,136);
A_227 = rconst(index,227)*var(index,83);
A_228 = rconst(index,228)*var(index,76);
A_229 = rconst(index,229)*var(index,101);
A_230 = rconst(index,230)*var(index,73);
A_231 = rconst(index,231)*var(index,104);
A_232 = rconst(index,232)*var(index,130);
A_233 = rconst(index,233)*var(index,130);
A_234 = rconst(index,234)*fix(index,2);
A_235 = rconst(index,235)*var(index,98);
A_236 = rconst(index,236)*var(index,71);
A_237 = rconst(index,237)*var(index,119);
A_238 = rconst(index,238)*var(index,63);
A_239 = rconst(index,239)*var(index,58);
A_240 = rconst(index,240)*var(index,77);
A_241 = rconst(index,241)*var(index,69);
A_242 = rconst(index,242)*var(index,86);
A_243 = rconst(index,243)*var(index,108);
A_244 = rconst(index,244)*var(index,96);
A_245 = rconst(index,245)*var(index,72);
A_246 = rconst(index,246)*var(index,62);
A_247 = rconst(index,247)*var(index,79);
A_248 = rconst(index,248)*var(index,110);
A_249 = rconst(index,249)*var(index,82);
A_250 = rconst(index,250)*var(index,85);
A_251 = rconst(index,251)*var(index,68);
A_252 = rconst(index,252)*var(index,38);
A_253 = rconst(index,253)*var(index,111);
A_254 = rconst(index,254)*var(index,64);
A_255 = rconst(index,255)*var(index,66);
A_256 = rconst(index,256)*var(index,91);
A_257 = rconst(index,257)*var(index,80);
A_258 = rconst(index,258)*var(index,39);
A_259 = rconst(index,259)*var(index,51);
A_260 = rconst(index,260)*var(index,138);
A_261 = rconst(index,261)*var(index,112);
A_262 = rconst(index,262)*var(index,50);
A_263 = rconst(index,263)*var(index,116);
A_264 = rconst(index,264)*var(index,116);
A_265 = rconst(index,265)*var(index,75);
A_266 = rconst(index,266)*var(index,41);
A_267 = rconst(index,267)*var(index,57);
A_268 = rconst(index,268)*var(index,43);
A_269 = rconst(index,269)*var(index,42);
A_270 = rconst(index,270)*var(index,100);
A_271 = rconst(index,271)*var(index,132);
A_272 = rconst(index,272)*var(index,118);
A_273 = rconst(index,273)*var(index,0);
A_274 = rconst(index,274)*var(index,105);
A_275 = rconst(index,275)*var(index,53);
A_276 = rconst(index,276)*var(index,44);
A_277 = rconst(index,277)*var(index,45);
A_278 = rconst(index,278)*var(index,2);
A_279 = rconst(index,279)*var(index,90);
A_280 = rconst(index,280)*var(index,1);
A_281 = rconst(index,281)*var(index,52);
A_282 = rconst(index,282)*var(index,54);
A_283 = rconst(index,283)*var(index,55);
A_284 = rconst(index,284)*var(index,3);
A_285 = rconst(index,285)*var(index,83)*var(index,128);
A_286 = rconst(index,286)*var(index,83);
A_287 = rconst(index,287)*var(index,112)*var(index,138);
A_288 = rconst(index,288)*var(index,116)*var(index,138);
A_289 = rconst(index,289)*var(index,116)*var(index,128);
A_290 = rconst(index,290)*var(index,83)*var(index,138);
A_291 = rconst(index,291)*var(index,118)*var(index,123);
A_292 = rconst(index,292)*var(index,105)*var(index,128);
A_293 = rconst(index,293)*var(index,116)*var(index,123);
A_294 = rconst(index,294)*var(index,105)*var(index,138);
A_295 = rconst(index,295)*var(index,112)*var(index,123);
A_296 = rconst(index,296)*var(index,118)*var(index,138);
A_297 = rconst(index,297)*var(index,4);
A_298 = 2.3e-10*var(index,15)*var(index,120);
A_299 = rconst(index,299)*var(index,15);
A_300 = 1.4e-10*var(index,16)*var(index,120);
A_301 = rconst(index,301)*var(index,16);
A_302 = rconst(index,302)*var(index,17)*var(index,120);
A_303 = rconst(index,303)*var(index,17)*var(index,120);
A_304 = rconst(index,304)*var(index,17);
A_305 = 3e-10*var(index,18)*var(index,120);
A_306 = rconst(index,306)*var(index,18)*var(index,126);
A_307 = rconst(index,307)*var(index,18);
A_308 = rconst(index,308)*var(index,5);
A_309 = rconst(index,309)*var(index,6);
varDot(index,0) = - A_273;
varDot(index,1) = - A_280;
varDot(index,2) = - A_278;
varDot(index,3) = - A_284;
varDot(index,4) = - A_297;
varDot(index,5) = - A_308;
varDot(index,6) = - A_309;
varDot(index,7) = A_165+ 0.9*A_166+ A_167+ 2*A_168+ 2*A_169+ A_172+ A_173+ A_191+ A_194+ A_195+ A_203+ A_204+ A_205+ A_266+ 2 *A_267+ A_268+ A_269+ A_276+ A_277+ A_278+ A_280+ A_281+ A_282+ A_283;
varDot(index,8) = 2*A_172+ A_173+ A_268+ 2*A_269+ 3*A_278+ 2*A_280;
varDot(index,9) = 0.09*A_166+ 2*A_203+ A_204+ A_205+ 2*A_268+ A_269;
varDot(index,10) = 0.4*A_210+ A_213;
varDot(index,11) = A_206;
varDot(index,12) = 2*A_286;
varDot(index,13) = 2*A_286;
varDot(index,14) = A_299+ A_301+ A_303+ A_304+ A_307+ A_308+ A_309;
varDot(index,15) = - A_298- A_299;
varDot(index,16) = - A_300- A_301;
varDot(index,17) = - A_302- A_303- A_304;
varDot(index,18) = - A_305- A_306- A_307;
varDot(index,19) = A_297;
varDot(index,20) = A_11;
varDot(index,21) = A_17;
varDot(index,22) = 2*A_2+ 2*A_3+ A_5+ A_6+ A_7+ A_8+ A_10+ A_11+ A_17+ A_21+ A_22+ 2*A_25+ A_35+ A_41+ A_46+ A_47+ A_48+ A_52 + A_56+ A_61+ A_66+ A_70+ A_74+ A_78+ A_84+ A_94+ A_96+ A_117+ A_132+ A_134+ 2*A_142+ 2*A_143+ 2*A_144 + A_145+ A_152+ A_164+ A_166+ A_168+ 2*A_175+ 2*A_176+ 2*A_177+ A_181+ A_190+ A_199+ 2*A_200+ 2*A_201+ 2 *A_226+ 2*A_258+ A_261+ A_272+ A_285+ 3*A_286+ A_287+ A_288+ 2*A_290+ A_291+ A_293+ A_294+ A_295+ A_296;
varDot(index,23) = 2*A_175+ 2*A_176+ 2*A_177+ A_181+ A_190+ 0.5*A_199+ A_200+ A_201+ A_272+ A_291+ 0.333333*A_293+ 0.333333 *A_294+ 0.5*A_295+ 0.5*A_296;
varDot(index,24) = 2*A_142+ 2*A_143+ 2*A_144+ A_145+ A_152+ A_164+ A_166+ A_168+ 0.5*A_199+ A_200+ A_201+ 2*A_258+ A_261 + A_287+ 0.5*A_288+ A_290+ 0.333333*A_293+ 0.333333*A_294+ 0.5*A_295+ 0.5*A_296;
varDot(index,25) = A_5+ A_6+ A_7+ A_8+ A_10+ A_11;
varDot(index,26) = 2*A_25+ A_35+ A_41+ A_46+ A_47+ A_48+ A_52+ 2*A_226+ A_285+ 3*A_286+ 0.5*A_288+ A_290+ 0.333333*A_293 + 0.333333*A_294;
varDot(index,27) = 2*A_2+ 2*A_3+ A_17+ A_21+ A_22+ A_56;
varDot(index,28) = A_61+ A_66+ A_70+ A_74+ A_78+ A_84+ A_94+ A_96+ A_117+ A_132+ A_134;
varDot(index,29) = A_8;
varDot(index,30) = A_32;
varDot(index,31) = A_191+ A_275+ A_278+ A_280;
varDot(index,32) = 4*A_165+ A_166+ A_167+ 3*A_168+ 3*A_169+ 2*A_172+ 3*A_173+ A_265+ 4*A_266+ 3*A_267+ 3*A_268+ 2*A_269 + A_280;
varDot(index,33) = A_60;
varDot(index,34) = A_14+ A_19+ A_24+ A_32+ A_36+ A_37+ A_60+ A_73+ A_81+ A_82+ A_98+ A_102+ A_106+ A_115+ A_120+ A_127+ A_136 + A_150+ A_182+ A_207+ A_208+ 0.4*A_210+ A_214+ A_215+ 2*A_217+ A_222+ A_224+ 0.333*A_230+ A_234+ A_259 + A_262+ A_273;
varDot(index,35) = A_73+ A_82+ A_98+ A_102+ A_106+ A_115+ A_120+ A_127+ A_136;
varDot(index,36) = 3*A_194+ 2*A_195+ A_203+ 2*A_204+ A_205+ 2*A_276+ 3*A_277+ A_281+ A_282+ 2*A_283;
varDot(index,37) = A_281+ 2*A_282+ A_283;
varDot(index,38) = 0.8*A_128- A_252;
varDot(index,39) = A_146- A_147- A_258;
varDot(index,40) = - A_112;
varDot(index,41) = - A_165- A_266;
varDot(index,42) = - A_172- A_269;
varDot(index,43) = - A_173- A_268;
varDot(index,44) = - A_195- A_276;
varDot(index,45) = - A_194- A_277;
varDot(index,46) = A_212- A_213;
varDot(index,47) = - A_40;
varDot(index,48) = - A_69;
varDot(index,49) = - A_93;
varDot(index,50) = - A_262+ A_290;
varDot(index,51) = A_145+ A_199- A_259;
varDot(index,52) = - A_205- A_281;
varDot(index,53) = - A_191- A_275;
varDot(index,54) = - A_203- A_282;
varDot(index,55) = - A_204- A_283;
varDot(index,56) = - A_206+ 0.6*A_210+ A_211;
varDot(index,57) = - A_168- A_169- A_267;
varDot(index,58) = - A_90+ A_140- A_239;
varDot(index,59) = - A_19- A_24- A_27+ A_224;
varDot(index,60) = - A_21- A_22+ A_27+ A_46- A_222;
varDot(index,61) = A_51- A_54;
varDot(index,62) = 0.04*A_98- A_111- A_246;
varDot(index,63) = A_80- A_89- A_238;
varDot(index,64) = A_121- A_130- A_131- A_254;
varDot(index,65) = A_208- A_210+ A_216;
varDot(index,66) = A_135- A_139- A_255;
varDot(index,67) = A_101- A_103;
varDot(index,68) = A_126- A_128- A_251;
varDot(index,69) = A_97- A_100- A_241;
varDot(index,70) = A_49- A_51- A_53+ A_54;
varDot(index,71) = A_72- A_76- A_236;
varDot(index,72) = A_105- A_108- A_245;
varDot(index,73) = A_34- A_38- A_39- A_230;
varDot(index,74) = - A_79+ A_81+ A_86+ 0.18*A_87;
varDot(index,75) = - 0.9*A_166- A_167- A_265;
varDot(index,76) = A_31- A_36+ A_52- A_228;
varDot(index,77) = A_83- A_91- A_92- A_240;
varDot(index,78) = - A_68+ 0.23125*A_70+ 0.22*A_94+ 0.45*A_117+ 0.28*A_132;
varDot(index,79) = A_114- A_116- A_247;
varDot(index,80) = A_143+ A_160- A_197+ A_202- A_257+ A_287+ A_288;
varDot(index,81) = A_207+ A_209- A_211- A_212+ A_214+ A_215;
varDot(index,82) = A_119- A_124- A_249;
varDot(index,83) = A_29- A_30- A_227- A_285- A_286- A_290;
varDot(index,84) = A_41+ A_42+ A_47- A_48- A_49;
varDot(index,85) = 0.88*A_113+ 0.56*A_115+ 0.85*A_116- A_125+ A_129+ 0.67*A_247- A_250+ 0.67*A_253;
varDot(index,86) = 0.96*A_98+ A_99+ 0.7*A_100- A_104+ A_111+ A_241- A_242+ A_246;
varDot(index,87) = A_43- A_50- A_51- A_52+ A_53- A_55;
varDot(index,88) = A_16- A_18+ 0.13875*A_70+ 0.09*A_132- A_151- A_221;
varDot(index,89) = - A_58+ A_63+ 0.25*A_75+ 0.03*A_94+ 0.2*A_99+ 0.5*A_107+ 0.18*A_113+ 0.25*A_122+ 0.25*A_137;
varDot(index,90) = - A_196+ A_197+ A_198+ A_201- A_202- A_279+ A_293+ A_294+ A_295+ A_296;
varDot(index,91) = A_134+ 0.044*A_136- A_140- A_256;
varDot(index,92) = A_40- A_41- A_42- A_43- A_44- A_45- A_46- A_47+ A_48;
varDot(index,93) = 0.82*A_93- A_97- A_98- A_99+ 0.3*A_100;
varDot(index,94) = A_104- A_105- A_106- A_107+ 0.3*A_108;
varDot(index,95) = A_65+ A_66- A_67+ 0.63*A_70+ A_90+ A_91+ 0.31*A_94+ A_110+ 0.22*A_117+ 0.25*A_120+ 0.125*A_122+ 0.5*A_123 + 0.14*A_132+ A_162+ A_187+ A_232+ A_233+ A_234+ A_235+ A_237+ A_239+ A_244+ A_248+ 0.25*A_249;
varDot(index,96) = 0.04*A_94+ 0.5*A_107+ 0.7*A_108+ A_109- A_110+ 0.9*A_117+ 0.5*A_120+ 0.5*A_122+ A_123+ 0.25*A_137- A_244 + 0.5*A_249;
varDot(index,97) = - A_6- A_9+ A_13+ 0.05*A_56- A_148+ A_232+ 0.69*A_235;
varDot(index,98) = - A_56- A_57+ 0.06*A_94- A_161- A_235;
varDot(index,99) = A_125- A_126- A_127+ 0.2*A_128;
varDot(index,100) = A_177- A_182+ A_183+ A_196- A_198- A_270+ A_291;
varDot(index,101) = A_33- A_37+ A_66+ A_78+ A_209- A_229+ 2*A_285+ A_288+ A_289+ A_290+ A_292+ A_293+ A_294;
varDot(index,102) = A_112- A_113- A_114- A_115+ 0.15*A_116;
varDot(index,103) = - A_70- A_71- A_170- A_192;
varDot(index,104) = A_59- A_64- A_163- A_188- A_231;
varDot(index,105) = - A_183+ A_185- A_186- A_274- A_292- A_294;
varDot(index,106) = - A_132- A_133- A_134;
varDot(index,107) = - A_94- A_95- A_96;
varDot(index,108) = 0.5*A_103+ 0.2*A_107- A_109+ 0.25*A_120+ 0.375*A_122+ A_123+ A_130+ 0.25*A_137+ A_140- A_243+ 0.25*A_249 + A_254;
varDot(index,109) = A_133- A_135- A_136- A_137- 2*A_138;
varDot(index,110) = - A_117- A_118+ 0.65*A_132+ 0.956*A_136+ 0.5*A_137+ 2*A_138+ A_139- A_248+ A_255+ A_256;
varDot(index,111) = A_96+ 0.02*A_102+ 0.16*A_115+ 0.015*A_127- A_129- A_253;
varDot(index,112) = A_153- A_155- A_261- A_287+ A_289- A_295;
varDot(index,113) = A_118- A_119- A_120- A_121- A_122- 2*A_123+ A_124+ A_131+ 0.1*A_132;
varDot(index,114) = - A_207- A_208- A_209- A_214- A_215- A_216;
varDot(index,115) = 0.666667*A_71+ A_95- A_101- A_102+ 0.5*A_103+ 0.666667*A_170+ 0.666667*A_192;
varDot(index,116) = A_157- A_158- A_159- A_160- A_263- A_264- A_288- A_289- A_293;
varDot(index,117) = A_69- A_72- A_73- A_74- A_75+ 0.3*A_76- A_87+ 0.18*A_93+ 0.06*A_94+ 0.12*A_113+ 0.28*A_115+ 0.33*A_247 + A_250+ 0.33*A_253;
varDot(index,118) = A_179- A_181+ A_182+ A_189- A_272- A_291+ A_292- A_296;
varDot(index,119) = A_73+ A_74+ 0.75*A_75+ 0.7*A_76- A_77- A_78+ A_87+ 0.47*A_94+ 0.98*A_102+ 0.12*A_113+ 0.28*A_115+ 0.985 *A_127- A_171- A_193+ A_236- A_237+ 0.33*A_247+ A_251+ 0.33*A_253;
varDot(index,120) = - A_0- A_2- A_6- A_17- A_20- A_21- A_22- A_56- A_165- A_166- A_168- A_172- A_173+ A_218+ A_222;
varDot(index,121) = A_77+ A_78- A_80- A_81- A_82- A_83- A_84- A_85- A_86- A_87- 2*A_88+ A_89+ A_92+ 0.23*A_94+ A_106+ 0.3 *A_107+ A_110+ 0.1*A_117+ 0.25*A_120+ 0.125*A_122+ 0.985*A_127+ 0.1*A_132+ A_171+ A_193+ A_240+ A_242 + A_243+ A_244+ A_245+ A_248+ 0.25*A_249+ A_250+ A_251+ 2*A_252;
varDot(index,122) = - A_4- A_5+ A_6+ A_7+ A_9- A_12- A_13- A_14+ 0.4*A_56+ A_67+ A_148+ 0.09*A_166+ A_220+ A_233+ 0.31*A_235 + A_260;
varDot(index,123) = A_178- A_180+ A_187+ A_188+ A_192+ A_193+ A_215- A_291- A_293- A_295;
varDot(index,124) = A_1- A_2- A_3- A_5- A_8- A_11- A_23- A_26- A_41- A_48- A_70+ A_81- A_94- A_117- A_132- A_141- A_174 - A_212- A_218- A_219;
varDot(index,125) = 0.75*A_56+ A_57- A_59- A_60- A_61- 2*A_62- 2*A_63+ 0.7*A_64- A_75+ A_79+ A_82+ A_84- A_86+ 0.82*A_87+ 2 *A_88+ 0.07*A_94- A_99- A_107- A_113- A_122+ 0.08*A_132- A_137+ A_161- A_164+ A_188- A_189- A_190+ 0.6 *A_210+ A_211+ A_237+ A_238+ A_242+ A_265+ A_275+ A_284;
varDot(index,126) = A_5+ A_6- A_7- A_8- A_9+ A_10+ A_11+ 2*A_12- A_15+ 2*A_17- A_18- A_31+ A_32- A_33+ A_35- A_36- A_37 - A_39- A_40+ A_42+ A_44- A_50- A_53- A_54+ 0.75*A_56- A_57- A_58- 0.7*A_64- A_65- A_67- A_68- A_69+ 0.13 *A_70- A_71- 0.3*A_76- A_77- A_79- A_89- A_90- A_91- A_93+ 0.33*A_94- A_95- 0.3*A_100- 0.5*A_103- A_104 - 0.3*A_108- A_109- A_110- A_111- A_112- 0.15*A_116+ 0.19*A_117- A_118- A_124- A_125- 0.2*A_128- A_129 - A_130+ 0.25*A_132- A_133- A_140+ A_150- A_152- A_154- A_155+ A_163- A_167+ A_168- A_169- A_180+ A_181 - A_182- A_191- A_194- A_195- A_203- A_204- A_205- A_206- A_207- A_208- A_210+ A_220+ 2*A_221+ A_228+ A_229 + 0.333*A_230+ A_231+ A_236+ A_238+ A_241+ A_245+ A_247+ A_249+ A_251+ A_255+ A_261+ A_272;
varDot(index,127) = - A_141+ A_142+ 2*A_144+ A_145- A_148- A_149- A_150- A_151+ 0.94*A_152+ A_154+ A_156- A_160- A_161- A_162 - A_163+ A_164+ 3*A_165+ 0.35*A_166+ A_167+ 3*A_168+ 3*A_169- A_170- A_171+ A_172+ 2*A_173+ A_196+ A_197 - A_198+ A_200- A_202- A_214+ 2*A_257+ 2*A_258+ A_260+ A_261+ A_262+ A_263+ A_265+ 4*A_266+ 3*A_267+ A_268 + A_269+ A_279+ A_280+ A_281+ 2*A_282+ A_283;
varDot(index,128) = A_9+ A_14+ A_15- A_17+ A_18+ A_36+ A_37+ A_39+ A_40+ A_43+ A_45+ A_46+ A_50+ A_53+ A_54+ A_57+ A_64 + A_65+ A_68+ A_69+ A_77+ A_79+ A_89+ A_91+ A_93+ A_103+ A_104+ A_112+ 0.85*A_116+ A_129+ A_154+ A_155 + A_167+ A_169+ A_180+ A_191+ A_194+ A_195+ A_203+ A_204+ A_205- A_220+ 1.155*A_235- A_285+ A_287- A_289 + A_291- A_292+ A_295+ A_296;
varDot(index,129) = - A_174+ A_175+ 2*A_176- A_178+ A_180+ A_182- A_183+ A_184- A_187- A_188+ A_190+ A_191- A_192- A_193+ 3 *A_194+ 2*A_195- A_196- A_197+ A_198+ A_199+ A_200+ A_202+ A_203+ 2*A_204+ A_205- A_215+ A_216+ 2*A_270 + A_271+ A_272+ A_273+ 0.85*A_274+ A_275+ 2*A_276+ 3*A_277+ A_278+ A_279+ A_280+ A_281+ A_282+ 2*A_283;
varDot(index,130) = 0.25*A_56+ A_58+ A_60+ A_61+ 2*A_62+ A_63+ 0.3*A_64- A_65- A_66+ 1.13875*A_70+ 0.75*A_75+ A_85+ A_86 + A_90+ A_91+ 0.57*A_94+ 0.8*A_99+ 0.98*A_102+ A_106+ 0.8*A_107+ 0.68*A_113+ 0.75*A_120+ 1.125*A_122+ 0.5 *A_123+ 0.58*A_132+ 0.956*A_136+ 1.25*A_137+ A_138- A_162+ A_163+ A_164- A_187+ A_189+ A_190+ A_207+ A_209 + A_210+ A_214+ A_215+ A_231- A_232- A_233+ A_239+ A_243+ A_245+ A_248+ 0.75*A_249+ A_255+ A_256;
varDot(index,131) = A_0- A_1- A_3- A_7- A_10+ A_14+ A_19+ A_20+ A_24- A_25+ A_27- A_142- A_159+ 0.1*A_166- A_175- A_181+ 2 *A_217+ A_219+ A_223+ A_224+ A_225+ A_234+ A_259+ A_271;
varDot(index,132) = A_174- A_175- 2*A_176- 2*A_177- A_179+ A_181- A_184- A_185+ A_186- A_189- A_190- A_199- A_200- A_201 - A_216- A_271+ 0.15*A_274;
varDot(index,133) = A_19+ 2*A_21- A_23- A_24+ A_25- A_28- A_31- A_32- A_44- A_45+ A_47+ A_50+ A_51+ A_52+ A_55- A_60- A_73 - A_82- A_98- A_102- A_106- A_115- A_120- A_127- A_136- A_156- A_184+ A_223- A_224+ A_226+ A_228;
varDot(index,134) = A_141- A_142- 2*A_143- 2*A_144- 2*A_145- 2*A_146+ 2*A_147+ A_150- A_152- A_153+ A_155- A_156- A_157 + A_158+ A_159- A_164+ A_165+ 0.46*A_166+ A_172+ A_173- A_199- A_200- A_201+ A_259+ A_264;
varDot(index,135) = A_23- A_25- A_26- A_27+ 2*A_28- A_29+ A_30+ A_32- A_33- A_34+ A_35+ A_36+ A_38+ A_39- A_46- A_47- A_52 + A_60+ A_61+ A_73+ A_74+ A_82- A_83+ A_84+ A_90+ A_91+ A_92+ 0.96*A_98+ 0.98*A_102+ A_106+ A_111+ 0.84 *A_115+ A_120- A_121+ 0.985*A_127+ A_129+ A_130+ A_131+ 0.956*A_136+ A_156- A_157+ A_158+ A_184- A_185 + A_186- A_223+ A_225+ A_227+ A_229+ 0.667*A_230+ A_239+ A_240+ A_246+ A_253+ A_254+ A_256+ A_262+ A_264 + A_273+ 0.15*A_274;
varDot(index,136) = A_26- A_28- A_29+ A_30- A_35+ A_37- A_61- A_66- A_74- A_78- A_84- A_96- A_134+ A_159+ A_160+ A_183- A_209 - A_225- A_226+ A_227+ 0.333*A_230+ A_263+ 0.85*A_274;
varDot(index,137) = A_4+ A_8- A_10- A_11- A_12- A_13- A_14- A_15- 2*A_16+ A_18- A_32- A_34- A_35+ A_38- A_42- A_43+ A_44 + A_55+ A_58- A_59+ A_60+ A_61+ 2*A_62+ A_65+ A_66+ A_68+ 0.13*A_70- A_72+ A_73+ A_74+ A_75- A_80- A_81 + A_85+ 0.82*A_87+ 0.26*A_94- A_97+ 0.96*A_98+ 0.8*A_99- A_101+ 0.98*A_102- A_105+ 0.3*A_107+ A_109+ 1.23 *A_113- A_114+ 0.56*A_115+ 0.32*A_117- A_119+ 0.75*A_120+ 0.875*A_122+ A_123- A_126+ 0.25*A_132- A_135 + 0.956*A_136+ A_137+ A_138- A_149- A_150+ A_151+ 0.94*A_152- A_153+ A_162+ A_164- A_178- A_179+ A_187 + A_190+ A_206+ A_208+ 0.4*A_210- A_213+ 0.667*A_230+ A_231+ A_233+ A_236+ A_237+ A_241+ A_243+ A_244 + A_246+ 0.67*A_247+ A_248+ 0.75*A_249+ 0.67*A_253+ A_255+ A_256;
varDot(index,138) = A_148+ A_149+ A_151+ 0.06*A_152- A_154+ A_161+ A_162+ A_163+ A_170+ A_171+ A_214- A_260- A_287- A_288 - A_290- A_294- A_296;
}
}
__device__ void ros_FunTimeDerivative(const double T, double roundoff, double * __restrict__ var, const double * __restrict__ fix,
const double * __restrict__ rconst, double *dFdT, double *Fcn0, int &Nfun,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
const double DELTAMIN = 1.0E-6;
double delta,one_over_delta;
delta = sqrt(roundoff)*fmax(DELTAMIN,fabs(T));
one_over_delta = 1.0/delta;
Fun(var, fix, rconst, dFdT, Nfun, VL_GLO);
for (int i=0; i < NVAR; i++){
dFdT(index,i) = (dFdT(index,i) - Fcn0(index,i)) * one_over_delta;
}
}
__device__ static int ros_Integrator(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T,
// Rosenbrock method coefficients
const int ros_S, const double * __restrict__ ros_M, const double * __restrict__ ros_E, const double * __restrict__ ros_A, const double * __restrict__ ros_C,
const double * __restrict__ ros_Alpha, const double * __restrict__ ros_Gamma, const double ros_ELO, const int * ros_NewF,
// Integration parameters
const int autonomous, const int vectorTol, const int Max_no_steps,
const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit,
const double FacMin, const double FacMax, const double FacRej, const double FacSafe,
// Status parameters
int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng,
// cuda global mem buffers
const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0,
double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// VL_GLO
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
double H, Hnew, HC, HG, Fac; // Tau - not used
double Err; //*varErr;
int direction;
int rejectLastH, rejectMoreH;
const double DELTAMIN = 1.0E-5;
// ~~~> Initial preparations
T = Tstart;
Hexit = 0.0;
H = fmin(Hstart,Hmax);
if (fabs(H) <= 10.0*roundoff)
H = DELTAMIN;
if (Tend >= Tstart)
{
direction = + 1;
}
else
{
direction = - 1;
}
rejectLastH=0;
rejectMoreH=0;
// ~~~> Time loop begins below
// TimeLoop:
while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO))
{
if (Nstp > Max_no_steps) // Too many steps
return -6;
// Step size too small
if (H <= roundoff){ // Step size too small
//if (((T+ 0.1*H) == T) || (H <= roundoff)) {
return -7;
}
// ~~~> Limit H if necessary to avoid going beyond Tend
Hexit = H;
H = fmin(H,fabs(Tend-T));
// ~~~> Compute the function at current time
Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO); /// VAR READ - Fcn0 Write
// ~~~> Compute the function derivative with respect to T
if (!autonomous)
ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read
// ~~~> Compute the Jacobian at current time
Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ
// ~~~> Repeat step calculation until current step accepted
// UntilAccepted:
while(1)
{
ros_PrepareMatrix(H, direction, ros_Gamma[0], jac0, Ghimj, Nsng, Ndec, VL_GLO);
// ~~~> Compute the stages
// Stage:
for (int istage=0; istage < ros_S; istage++)
{
// For the 1st istage the function has been computed previously
if (istage == 0)
{
for (int i=0; i<NVAR; i++){
varNew(index,i) = Fcn0(index,i); // FCN0 Read
}
}
else if(ros_NewF[istage])
{
for (int i=0; i<NVAR; i++){
varNew(index,i) = var(index,i);
}
for (int j=0; j < (istage); j++){
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,j,i)*ros_A[(istage)*(istage-1)/2 + j] + varNew(index,i);
}
}
Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap
}
for (int i=0; i<NVAR; i++)
K(index,istage,i) = varNew(index,i);
for (int j=0; j<(istage); j++)
{
HC = ros_C[(istage)*(istage-1)/2 + j]/(direction*H);
for (int i=0; i<NVAR; i++){
double tmp = K(index,j,i);
K(index,istage,i) += tmp*HC;
}
}
if ((!autonomous) && (ros_Gamma[istage] ))
{
HG = direction*H*ros_Gamma[istage];
for (int i=0; i<NVAR; i++){
K(index,istage,i) += dFdT(index,i)*HG;
}
}
// R ,RW, RW, R, R
ros_Solve(Ghimj, K, Nsol, istage, ros_S);
} // Stage
// ~~~> Compute the new solution
for (int i=0; i<NVAR; i++){
double tmpNew = var(index,i); /// VAR READ
double tmpErr = ZERO;
for (int j=0; j<ros_S; j++){
double tmp = K(index,j,i);
#ifdef DEBUG
if (isnan(tmp)){
printf("Solver detected NAN!");
tmp = 0;
}
#endif
tmpNew += tmp*ros_M[j];
tmpErr += tmp*ros_E[j];
}
varNew(index,i) = tmpNew; // varNew is killed
varErr(index,i) = tmpErr;
}
Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol); /// VAR-varNew READ
// ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax
Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/ros_ELO)));
Hnew = H*Fac;
// ~~~> Check the error magnitude and adjust step size
Nstp = Nstp+ 1;
if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step
{
Nacc = Nacc + 1;
for (int j=0; j<NVAR ; j++)
var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read
T = T + direction*H;
Hnew = fmax(Hmin,fmin(Hnew,Hmax));
if (rejectLastH) // No step size increase after a rejected step
Hnew = fmin(Hnew,H);
rejectLastH = 0;
rejectMoreH = 0;
H = Hnew;
break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED
}
else // ~~~> Reject step
{
if (rejectMoreH)
Hnew = H*FacRej;
rejectMoreH = rejectLastH;
rejectLastH = 1;
H = Hnew;
if (Nacc >= 1)
Nrej += 1;
} // Err <= 1
} // UntilAccepted
} // TimeLoop
// ~~~> Succesful exit
return 0; // ~~~> The integration was successful
}
typedef struct {
double ros_A[15];
double ros_C[15];
int ros_NewF[8];
double ros_M[6];
double ros_E[6];
double ros_Alpha[6];
double ros_Gamma[6];
double ros_ELO;
int ros_S;
} ros_t;
/*
* Lookup tables for different ROS for branch elimination. It is much faster in GPU.
*/
__device__ __constant__ ros_t ros[5] = {
{
{.58578643762690495119831127579030,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-1.17157287525380990239662255158060,0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,0,0,0,0,0,0}, /* ros_NewF */
{.87867965644035742679746691368545,.29289321881345247559915563789515,0,0,0,0}, /* ros_M */
{.29289321881345247559915563789515,.29289321881345247559915563789515,0,0,0,0}, /* ros_E */
{0,1.0,0,0,0,0}, /* ros_Alpha */
{1.70710678118654752440084436210485,-1.70710678118654752440084436210485,0,0,0,0}, /* ros_Gamma */
2.0, /* ros_ELO */
2, /* ros_S*/
}, /* Ros2 */
{
{1.0,1.0,0,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-0.10156171083877702091975600115545E+01, 0.40759956452537699824805835358067E+01,0.92076794298330791242156818474003E+01,0,0,0,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,0,0,0,0,0,0}, /* ros_NewF */
{0.1E+01,0.61697947043828245592553615689730E+01,-0.42772256543218573326238373806514E+00,0,0,0}, /* ros_M */
{0.5E+00,- 0.29079558716805469821718236208017E+01,0.22354069897811569627360909276199E+00,0,0,0}, /* ros_E */
{0.0E+00,0.43586652150845899941601945119356E+00,0.43586652150845899941601945119356E+00,0,0,0}, /* ros_Alpha */
{0.43586652150845899941601945119356E+00,0.24291996454816804366592249683314E+00,0.21851380027664058511513169485832E+01,0,0,0}, /* ros_Gamma */
3.0, /* ros_ELO */
3
}, /* Ros3 */
{
{0.2000000000000000E+01, 0.1867943637803922E+01, 0.2344449711399156E+00, 0.1867943637803922E+01, 0.2344449711399156E+00,0,0,0,0,0,0,0,0,0,0}, /* ros_A */
{-0.7137615036412310E+01,0.2580708087951457E+01,0.6515950076447975E+00, - 0.2137148994382534E+01, - 0.3214669691237626E+00, - 0.6949742501781779E+00 ,0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,1,1,0,0,0,0,0}, /* ros_NewF */
{0.2255570073418735E+01, 0.2870493262186792E+00, 0.4353179431840180E+00, 0.1093502252409163E+01,0,0}, /* ros_M */
{ -0.2815431932141155E+00, -0.7276199124938920E-01, -0.1082196201495311E+00, -0.1093502252409163E+01, 0, 0}, /* ros_E */
{0.0, 0.1145640000000000E+01, 0.6552168638155900E+00, 0.6552168638155900E+00,0,0}, /* ros_Alpha */
{ 0.5728200000000000E+00, -0.1769193891319233E+01, 0.7592633437920482E+00, -0.1049021087100450E+00,0,0}, /* ros_Gamma */
4.0, /* ros_ELO */
4
}, /* Ros4 */
{
{ 0.0E+00, 2.0E+00, 0.0E+00, 2.0E+00, 0.0E+00, 1.0E+00, 0,0,0,0,0,0,0,0,0}, /* ros_A */
{ 4.0E+00, 1.0E+00, - 1.0E+00, 1.0E+00, - 1.0E+00, - 2.66666666666666666666666666666666, 0,0,0,0,0,0,0,0,0}, /* ros_C */
{1,0,1,1,0,0,0,0}, /* ros_NewF */
{2.0,0,1.0,1.0,0,0}, /* ros_M */
{0,0,0,1.0,0,0}, /* ros_E */
{0,0,1.0,1.0,0,0}, /* ros_Alpha */
{0.5,1.5,0,0,0,0}, /* ros_Gamma */
3.0, /* ros_ELO */
4
}, /* Rodas3 */
{
{
0.1544000000000000E+01, 0.9466785280815826E+00, 0.2557011698983284E+00, 0.3314825187068521E+01,
0.2896124015972201E+01, 0.9986419139977817E+00, 0.1221224509226641E+01, 0.6019134481288629E+01,
0.1253708332932087E+02, -0.6878860361058950E+00, 0.1221224509226641E+01, 0.6019134481288629E+01,
0.1253708332932087E+02, -0.6878860361058950E+00, 1.0E+00}, /* ros_A */
{
-0.5668800000000000E+01, -0.2430093356833875E+01, -0.2063599157091915E+00, -0.1073529058151375E+00,
-0.9594562251023355E+01, -0.2047028614809616E+02, 0.7496443313967647E+01, -0.1024680431464352E+02,
-0.3399990352819905E+02, 0.1170890893206160E+02, 0.8083246795921522E+01, -0.7981132988064893E+01,
-0.3152159432874371E+02, 0.1631930543123136E+02, -0.6058818238834054E+01}, /* ros_C */
{1,1,1,1,1,1,0,0}, /* ros_NewF */
{0.1221224509226641E+01,0.6019134481288629E+01,0.1253708332932087E+02,- 0.6878860361058950E+00,1,1}, /* ros_M */
{0,0,0,0,0,1.0}, /* ros_E */
{0.000, 0.386, 0.210, 0.630, 1.000, 1.000}, /* ros_Alpha */
{0.2500000000000000E+00, -0.1043000000000000E+00, 0.1035000000000000E+00, 0.3620000000000023E-01, 0, 0}, /* ros_Gamma */
4.0, /* ros_ELO */
6
} /* Rodas4 */
};
//__device__ double rconst_local[MAX_VL_GLO*NREACT];
/* Initialize rconst local */
//__device__ double * rconst_local;
__device__ double k_3rd(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc)
/*
*
* temp temperature [K]
* cair air concentration [molecules/cm3]
* k0_300K low pressure limit at 300 K
* n exponent for low pressure limit
* kinf_300K high pressure limit at 300 K
* m exponent for high pressure limit
* fc broadening factor (usually fc=0.6)
*
*/
{
double zt_help, k0_T, kinf_T, k_ratio, k_3rd_r;
zt_help = 300.0/temp;
k0_T = k0_300K *pow(zt_help,n) *cair;
kinf_T = kinf_300K *pow(zt_help,m);
k_ratio = k0_T/kinf_T;
k_3rd_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio),2)));
return k_3rd_r;
}
__device__ double k_3rd_iupac(double temp, double cair, double k0_300K, double n, double kinf_300K, double m, double fc)
/*
*
* temp temperature [K]
* cair air concentration [molecules/cm3]
* k0_300K low pressure limit at 300 K
* n exponent for low pressure limit
* kinf_300K high pressure limit at 300 K
* m exponent for high pressure limit
* fc broadening factor (e.g. 0.45 or 0.6...)
* nu N
*
*/
{
double zt_help, k0_T, kinf_T, k_ratio, nu, k_3rd_iupac_r;
zt_help = 300.0/temp;
k0_T = k0_300K *pow(zt_help,n) *cair;
kinf_T = kinf_300K *pow(zt_help,m);
k_ratio = k0_T/kinf_T;
nu = 0.75- 1.27*log10(fc);
k_3rd_iupac_r = k0_T/(1.0+ k_ratio)*pow(fc,1.0/(1.0+ pow(log10(k_ratio)/nu,2)));
return k_3rd_iupac_r;
}
double * temp_gpu;
double * press_gpu;
double * cair_gpu;
double * Ghimj;
double * K;
double * varNew;
double * Fcn0;
double * dFdT;
double * jac0;
double * varErr;
double * var;
double * fix;
double * rconst;
__device__ void update_rconst(const double * __restrict__ var,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx, double * __restrict__ rconst,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
/* Set local buffer */
{
const double temp_loc = temp_gpu[index];
const double press_loc = press_gpu[index];
const double cair_loc = cair_gpu[index];
double k_HO2_HO2, k_NO3_NO2, k_NO2_HO2, k_HNO3_OH, k_CH3OOH_OH, k_ClO_ClO, k_BrO_NO2, k_I_NO2, k_DMS_OH, k_CH2OO_SO2, k_O3s, beta_null_CH3NO3, beta_inf_CH3NO3, beta_CH3NO3, k_NO2_CH3O2, k_C6H5O2_NO2, k_CH2OO_NO2, beta_C2H5NO3, alpha_NO_HO2, beta_NO_HO2, k0_NO_HO2, k2d_NO_HO2, k1d_NO_HO2, k2w_NO_HO2, k1w_NO_HO2, k_PrO2_HO2, k_PrO2_NO, k_PrO2_CH3O2, G7402a_yield, k_CH3CO3_NO2, k_PAN_M, KRO2NO, KRO2HO2[12], KAPNO, KRO2NO3, KNO3AL, KAPHO2, k_CH3O2, k_RO2RCO3, k_RO2pRO2, k_RO2sRO2, k_RO2tRO2, k_RO2pORO2, k_RO2sORO2, k_RO2tORO2, k_RO2LISOPACO2, k_RO2ISOPBO2, k_RO2ISOPDO2, k_p, k_s, k_t, k_rohro, k_co2h, k_adp, k_ads, k_adt, KHSB, KHSD, K16HSZ14, K16HSZ41, K16HS, K15HSDHB, K14HSAL, K15HS24VYNAL, K15HS42VYNAL, KHYDEC, k_CH2CHOH_OH_HCOOH, k_CH2CHOH_OH_ALD, k_CH2CHOH_HCOOH, k_ALD_HCOOH, J_IC3H7NO3, J_ACETOL, J_HPALD, J_KETENE, RO2, k1_RO2RCO3, k1_RO2pRO2, k1_RO2sRO2, k1_RO2tRO2, k1_RO2pORO2, k1_RO2sORO2, k1_RO2tORO2, k1_RO2LISOPACO2, k1_RO2ISOPBO2, k1_RO2ISOPDO2;
k_HO2_HO2 = (3.0E-13 *exp(460. / temp_loc)+ 2.1E-33 *exp(920. / temp_loc) *cair_loc) * (1.+ 1.4E-21 *exp(2200. / temp_loc) *var(index,ind_H2O));
k_NO3_NO2 = k_3rd(temp_loc , cair_loc , 2.4E-30 , 3.0 , 1.6E-12 , - 0.1 , 0.6);
k_NO2_HO2 = k_3rd(temp_loc , cair_loc , 1.9E-31 , 3.4 , 4.0E-12 , 0.3 , 0.6);
k_HNO3_OH = 1.32E-14 *exp(527. / temp_loc) + 1. / (1. / (7.39E-32 *exp(453. / temp_loc) *cair_loc) + 1. / (9.73E-17 *exp(1910. / temp_loc)));
k_CH3OOH_OH = 5.3E-12 *exp(190. / temp_loc);
k_ClO_ClO = k_3rd(temp_loc , cair_loc , 1.9E-32 , 3.6 , 3.7E-12 , 1.6 , 0.6);
k_BrO_NO2 = k_3rd_iupac(temp_loc , cair_loc , 4.7E-31 , 3.1 , 1.8E-11 , 0.0 , 0.4);
k_I_NO2 = k_3rd_iupac(temp_loc , cair_loc , 3.0E-31 , 1.0 , 6.6E-11 , 0.0 , 0.63);
k_DMS_OH = 1.E-9 *exp(5820. / temp_loc) *var(index,ind_O2) / (1.E30+ 5. *exp(6280. / temp_loc) *var(index,ind_O2));
k_CH2OO_SO2 = 3.66E-11;
k_O3s = (1.7E-12 *exp(- 940. / temp_loc)) *var(index,ind_OH) + (1.E-14 *exp(- 490. / temp_loc)) *var(index,ind_HO2) + jx(index,ip_O1D) *2.2E-10 *var(index,ind_H2O) / (3.2E-11 *exp(70. / temp_loc) *var(index,ind_O2) + 1.8E-11 *exp(110. / temp_loc) *var(index,ind_N2) + 2.2E-10 *var(index,ind_H2O));
beta_null_CH3NO3 = 0.00295 + 5.15E-22 *cair_loc * pow(temp_loc / 298, 7.4);
beta_inf_CH3NO3 = 0.022;
beta_CH3NO3 = (beta_null_CH3NO3 *beta_inf_CH3NO3) / (beta_null_CH3NO3 + beta_inf_CH3NO3) / 10.;
k_NO2_CH3O2 = k_3rd(temp_loc , cair_loc , 1.0E-30 , 4.8 , 7.2E-12 , 2.1 , 0.6);
k_C6H5O2_NO2 = k_NO2_CH3O2;
k_CH2OO_NO2 = 4.25E-12;
beta_C2H5NO3 = (1- 1 / (1+ 1.E-2 *(3.88e-3 *cair_loc / 2.46e19 *760.+ .365) *(1+ 1500. *(1 / temp_loc - 1 / 298.))));
alpha_NO_HO2 = var(index,ind_H2O) *6.6E-27 *temp_loc *exp(3700. / temp_loc);
beta_NO_HO2 = max(((530. / temp_loc)+ (press_loc *4.8004E-6)- 1.73) *0.01 , 0.);
k0_NO_HO2 = 3.5E-12 *exp(250. / temp_loc);
k2d_NO_HO2 = (beta_NO_HO2 *k0_NO_HO2) / (1.+ beta_NO_HO2);
k1d_NO_HO2 = k0_NO_HO2 - k2d_NO_HO2;
k2w_NO_HO2 = (beta_NO_HO2 *k0_NO_HO2 *(1.+ 42. *alpha_NO_HO2))/ ((1.+ alpha_NO_HO2) *(1.+ beta_NO_HO2));
k1w_NO_HO2 = k0_NO_HO2 - k2w_NO_HO2;
k_PrO2_HO2 = 1.9E-13 *exp(1300. / temp_loc);
k_PrO2_NO = 2.7E-12 *exp(360. / temp_loc);
k_PrO2_CH3O2 = 9.46E-14 *exp(431. / temp_loc);
G7402a_yield = 0.8 / 1.1;
k_CH3CO3_NO2 = k_3rd(temp_loc , cair_loc , 9.7E-29 , 5.6 , 9.3E-12 , 1.5 , 0.6);
k_PAN_M = k_CH3CO3_NO2 / (9.0E-29 *exp(14000. / temp_loc));
KRO2NO = 2.54E-12 *exp(360. / temp_loc);
/*KRO2HO2(:) = 2.91E-13 *exp(1300. / temp_loc) *(1.- exp(- 0.245 *(nC(:))));*/
for (int ii=0;ii<12;ii++) {
KRO2HO2[ii] = 2.91E-13 *exp(1300. / temp_loc) * (1.- exp(- 0.245 *float(ii+1)));
}
KAPNO = 8.10E-12 *exp(270. / temp_loc);
KRO2NO3 = 2.50E-12;
KNO3AL = 1.4E-12 *exp(- 1900. / temp_loc);
KAPHO2 = 5.20E-13 *exp(980. / temp_loc) *1.865;
k_CH3O2 = 1.03E-13 *exp(365. / temp_loc);
k_RO2RCO3 = 2. *2.E-12 *exp(500. / temp_loc);
k_RO2pRO2 = 2. * pow(1.E-12 *k_CH3O2, .5);
k_RO2sRO2 = 2. * pow(1.6E-12 *exp(- 2200. / temp_loc) *k_CH3O2, .5);
k_RO2tRO2 = 2. *3.8E-13 *exp(- 1430. / temp_loc);
k_RO2pORO2 = 2. *7.5E-13 *exp(500. / temp_loc);
k_RO2sORO2 = 2. * pow(7.7E-15 *exp(1330. / temp_loc) *k_CH3O2, .5);
k_RO2tORO2 = 2. * pow(4.7E-13 *exp(- 1420. / temp_loc) *k_CH3O2, .5);
k_RO2LISOPACO2 = 2. * pow((2.8E-12+ 3.9E-12) / 2. *k_CH3O2, .5);
k_RO2ISOPBO2 = 2. * pow(6.9E-14 *k_CH3O2, .5);
k_RO2ISOPDO2 = 2. * pow(4.8E-12 *k_CH3O2, .5);
k_p = 4.49E-18 *temp_loc *temp_loc *exp(- 320. / temp_loc);
k_s = 4.50E-18 *temp_loc *temp_loc *exp(253. / temp_loc);
k_t = 2.12E-18 *temp_loc *temp_loc *exp(696. / temp_loc);
k_rohro = 2.1E-18 *temp_loc *temp_loc *exp(- 85. / temp_loc);
k_co2h = .7 *4.2E-14 *exp(850. / temp_loc);
k_adp = 4.5E-12 * pow(temp_loc / 300., - 0.85);
k_ads = .25 *(1.1E-11 *exp(485. / temp_loc)+ 1.0E-11 *exp(553. / temp_loc));
k_adt = 1.922E-11 *exp(450. / temp_loc) - k_ads;
KHSB = 1.52E11 *exp(- 9512. / temp_loc) *1.;
KHSD = 6.08E10 *exp(- 8893. / temp_loc) *1.;
K16HSZ14 = 2.28E9 *exp(- 6764 / temp_loc) *0.28;
K16HSZ41 = 1.23E9 *exp(- 6186 / temp_loc) *0.28;
K16HS = pow(K16HSZ14 *K16HSZ41, .5);
K15HSDHB = 5.;
K14HSAL = 2.9E7 *exp(- 1 *(5297+ 705) / temp_loc);
K15HS24VYNAL = K16HSZ14 *exp(- 3500 / (1.987 *temp_loc));
K15HS42VYNAL = K16HSZ41 *exp(- 3500 / (1.987 *temp_loc));
KHYDEC = 6.e14 *exp(- 16000. / (1.98588 *temp_loc));
k_CH2CHOH_OH_HCOOH = 4.3E-11;
k_CH2CHOH_OH_ALD = 2.4E-11;
k_CH2CHOH_HCOOH = 4.67E-26 * pow(temp_loc, 3.286 *exp(4509. / (1.987 *temp_loc)));
k_ALD_HCOOH = 1.17E-19 * pow(temp_loc, 1.209 *exp(- 556. / (1.987 *temp_loc)));
J_IC3H7NO3 = 3.7 *jx(index,ip_PAN);
J_ACETOL = 0.65 *0.11 *jx(index,ip_CHOH);
J_HPALD = (jx(index,ip_CH3OOH)+ jx(index,ip_MACR) / (2. *1.95E-3));
J_KETENE = jx(index,ip_MVK) / 0.004;
RO2 = 0.;
if (ind_LISOPACO2>0) RO2 = RO2 + var(index,ind_LISOPACO2);
if (ind_LDISOPACO2>0) RO2 = RO2 + var(index,ind_LDISOPACO2);
if (ind_ISOPBO2>0) RO2 = RO2 + var(index,ind_ISOPBO2);
if (ind_ISOPDO2>0) RO2 = RO2 + var(index,ind_ISOPDO2);
if (ind_LISOPEFO2>0) RO2 = RO2 + var(index,ind_LISOPEFO2);
if (ind_NISOPO2>0) RO2 = RO2 + var(index,ind_NISOPO2);
if (ind_LHC4ACCO3>0) RO2 = RO2 + var(index,ind_LHC4ACCO3);
if (ind_LC578O2>0) RO2 = RO2 + var(index,ind_LC578O2);
if (ind_C59O2>0) RO2 = RO2 + var(index,ind_C59O2);
if (ind_LNISO3>0) RO2 = RO2 + var(index,ind_LNISO3);
if (ind_CH3O2>0) RO2 = RO2 + var(index,ind_CH3O2);
if (ind_HOCH2O2>0) RO2 = RO2 + var(index,ind_HOCH2O2);
if (ind_CH3CO3>0) RO2 = RO2 + var(index,ind_CH3CO3);
if (ind_C2H5O2>0) RO2 = RO2 + var(index,ind_C2H5O2);
if (ind_HOCH2CO3>0) RO2 = RO2 + var(index,ind_HOCH2CO3);
if (ind_HYPROPO2>0) RO2 = RO2 + var(index,ind_HYPROPO2);
if (ind_LBUT1ENO2>0) RO2 = RO2 + var(index,ind_LBUT1ENO2);
if (ind_BUT2OLO2>0) RO2 = RO2 + var(index,ind_BUT2OLO2);
if (ind_HCOCO3>0) RO2 = RO2 + var(index,ind_HCOCO3);
if (ind_CO2H3CO3>0) RO2 = RO2 + var(index,ind_CO2H3CO3);
if (ind_LHMVKABO2>0) RO2 = RO2 + var(index,ind_LHMVKABO2);
if (ind_MACO3>0) RO2 = RO2 + var(index,ind_MACO3);
if (ind_MACRO2>0) RO2 = RO2 + var(index,ind_MACRO2);
if (ind_PRONO3BO2>0) RO2 = RO2 + var(index,ind_PRONO3BO2);
if (ind_HOCH2CH2O2>0) RO2 = RO2 + var(index,ind_HOCH2CH2O2);
if (ind_CH3COCH2O2>0) RO2 = RO2 + var(index,ind_CH3COCH2O2);
if (ind_IC3H7O2>0) RO2 = RO2 + var(index,ind_IC3H7O2);
if (ind_NC3H7O2>0) RO2 = RO2 + var(index,ind_NC3H7O2);
if (ind_LC4H9O2>0) RO2 = RO2 + var(index,ind_LC4H9O2);
if (ind_TC4H9O2>0) RO2 = RO2 + var(index,ind_TC4H9O2);
if (ind_LMEKO2>0) RO2 = RO2 + var(index,ind_LMEKO2);
if (ind_HCOCH2O2>0) RO2 = RO2 + var(index,ind_HCOCH2O2);
if (ind_EZCH3CO2CHCHO>0) RO2 = RO2 + var(index,ind_EZCH3CO2CHCHO);
if (ind_EZCHOCCH3CHO2>0) RO2 = RO2 + var(index,ind_EZCHOCCH3CHO2);
if (ind_CH3COCHO2CHO>0) RO2 = RO2 + var(index,ind_CH3COCHO2CHO);
if (ind_HCOCO2CH3CHO>0) RO2 = RO2 + var(index,ind_HCOCO2CH3CHO);
if (ind_C1ODC3O2C4OOH>0) RO2 = RO2 + var(index,ind_C1ODC3O2C4OOH);
if (ind_C1OOHC2O2C4OD>0) RO2 = RO2 + var(index,ind_C1OOHC2O2C4OD);
if (ind_C1ODC2O2C4OD>0) RO2 = RO2 + var(index,ind_C1ODC2O2C4OD);
if (ind_ISOPBDNO3O2>0) RO2 = RO2 + var(index,ind_ISOPBDNO3O2);
if (ind_LISOPACNO3O2>0) RO2 = RO2 + var(index,ind_LISOPACNO3O2);
if (ind_DB1O2>0) RO2 = RO2 + var(index,ind_DB1O2);
if (ind_DB2O2>0) RO2 = RO2 + var(index,ind_DB2O2);
if (ind_LME3FURANO2>0) RO2 = RO2 + var(index,ind_LME3FURANO2);
if (ind_NO3CH2CO3>0) RO2 = RO2 + var(index,ind_NO3CH2CO3);
if (ind_CH3COCO3>0) RO2 = RO2 + var(index,ind_CH3COCO3);
if (ind_ZCO3C23DBCOD>0) RO2 = RO2 + var(index,ind_ZCO3C23DBCOD);
if (ind_IBUTOLBO2>0) RO2 = RO2 + var(index,ind_IBUTOLBO2);
if (ind_IPRCO3>0) RO2 = RO2 + var(index,ind_IPRCO3);
if (ind_IC4H9O2>0) RO2 = RO2 + var(index,ind_IC4H9O2);
if (ind_LMBOABO2>0) RO2 = RO2 + var(index,ind_LMBOABO2);
if (ind_IPRHOCO3>0) RO2 = RO2 + var(index,ind_IPRHOCO3);
if (ind_LNMBOABO2>0) RO2 = RO2 + var(index,ind_LNMBOABO2);
if (ind_NC4OHCO3>0) RO2 = RO2 + var(index,ind_NC4OHCO3);
if (ind_LAPINABO2>0) RO2 = RO2 + var(index,ind_LAPINABO2);
if (ind_C96O2>0) RO2 = RO2 + var(index,ind_C96O2);
if (ind_C97O2>0) RO2 = RO2 + var(index,ind_C97O2);
if (ind_C98O2>0) RO2 = RO2 + var(index,ind_C98O2);
if (ind_C85O2>0) RO2 = RO2 + var(index,ind_C85O2);
if (ind_C86O2>0) RO2 = RO2 + var(index,ind_C86O2);
if (ind_PINALO2>0) RO2 = RO2 + var(index,ind_PINALO2);
if (ind_C96CO3>0) RO2 = RO2 + var(index,ind_C96CO3);
if (ind_C89CO3>0) RO2 = RO2 + var(index,ind_C89CO3);
if (ind_C85CO3>0) RO2 = RO2 + var(index,ind_C85CO3);
if (ind_OHMENTHEN6ONEO2>0) RO2 = RO2 + var(index,ind_OHMENTHEN6ONEO2);
if (ind_C511O2>0) RO2 = RO2 + var(index,ind_C511O2);
if (ind_C106O2>0) RO2 = RO2 + var(index,ind_C106O2);
if (ind_CO235C6CO3>0) RO2 = RO2 + var(index,ind_CO235C6CO3);
if (ind_CHOC3COCO3>0) RO2 = RO2 + var(index,ind_CHOC3COCO3);
if (ind_CO235C6O2>0) RO2 = RO2 + var(index,ind_CO235C6O2);
if (ind_C716O2>0) RO2 = RO2 + var(index,ind_C716O2);
if (ind_C614O2>0) RO2 = RO2 + var(index,ind_C614O2);
if (ind_HCOCH2CO3>0) RO2 = RO2 + var(index,ind_HCOCH2CO3);
if (ind_BIACETO2>0) RO2 = RO2 + var(index,ind_BIACETO2);
if (ind_CO23C4CO3>0) RO2 = RO2 + var(index,ind_CO23C4CO3);
if (ind_C109O2>0) RO2 = RO2 + var(index,ind_C109O2);
if (ind_C811CO3>0) RO2 = RO2 + var(index,ind_C811CO3);
if (ind_C89O2>0) RO2 = RO2 + var(index,ind_C89O2);
if (ind_C812O2>0) RO2 = RO2 + var(index,ind_C812O2);
if (ind_C813O2>0) RO2 = RO2 + var(index,ind_C813O2);
if (ind_C721CO3>0) RO2 = RO2 + var(index,ind_C721CO3);
if (ind_C721O2>0) RO2 = RO2 + var(index,ind_C721O2);
if (ind_C722O2>0) RO2 = RO2 + var(index,ind_C722O2);
if (ind_C44O2>0) RO2 = RO2 + var(index,ind_C44O2);
if (ind_C512O2>0) RO2 = RO2 + var(index,ind_C512O2);
if (ind_C513O2>0) RO2 = RO2 + var(index,ind_C513O2);
if (ind_CHOC3COO2>0) RO2 = RO2 + var(index,ind_CHOC3COO2);
if (ind_C312COCO3>0) RO2 = RO2 + var(index,ind_C312COCO3);
if (ind_HOC2H4CO3>0) RO2 = RO2 + var(index,ind_HOC2H4CO3);
if (ind_LNAPINABO2>0) RO2 = RO2 + var(index,ind_LNAPINABO2);
if (ind_C810O2>0) RO2 = RO2 + var(index,ind_C810O2);
if (ind_C514O2>0) RO2 = RO2 + var(index,ind_C514O2);
if (ind_CHOCOCH2O2>0) RO2 = RO2 + var(index,ind_CHOCOCH2O2);
if (ind_ROO6R1O2>0) RO2 = RO2 + var(index,ind_ROO6R1O2);
if (ind_ROO6R3O2>0) RO2 = RO2 + var(index,ind_ROO6R3O2);
if (ind_RO6R1O2>0) RO2 = RO2 + var(index,ind_RO6R1O2);
if (ind_RO6R3O2>0) RO2 = RO2 + var(index,ind_RO6R3O2);
if (ind_BPINAO2>0) RO2 = RO2 + var(index,ind_BPINAO2);
if (ind_C8BCO2>0) RO2 = RO2 + var(index,ind_C8BCO2);
if (ind_NOPINDO2>0) RO2 = RO2 + var(index,ind_NOPINDO2);
if (ind_LNBPINABO2>0) RO2 = RO2 + var(index,ind_LNBPINABO2);
if (ind_BZBIPERO2>0) RO2 = RO2 + var(index,ind_BZBIPERO2);
if (ind_C6H5CH2O2>0) RO2 = RO2 + var(index,ind_C6H5CH2O2);
if (ind_TLBIPERO2>0) RO2 = RO2 + var(index,ind_TLBIPERO2);
if (ind_BZEMUCCO3>0) RO2 = RO2 + var(index,ind_BZEMUCCO3);
if (ind_BZEMUCO2>0) RO2 = RO2 + var(index,ind_BZEMUCO2);
if (ind_C5DIALO2>0) RO2 = RO2 + var(index,ind_C5DIALO2);
if (ind_NPHENO2>0) RO2 = RO2 + var(index,ind_NPHENO2);
if (ind_PHENO2>0) RO2 = RO2 + var(index,ind_PHENO2);
if (ind_CRESO2>0) RO2 = RO2 + var(index,ind_CRESO2);
if (ind_NCRESO2>0) RO2 = RO2 + var(index,ind_NCRESO2);
if (ind_TLEMUCCO3>0) RO2 = RO2 + var(index,ind_TLEMUCCO3);
if (ind_TLEMUCO2>0) RO2 = RO2 + var(index,ind_TLEMUCO2);
if (ind_C615CO2O2>0) RO2 = RO2 + var(index,ind_C615CO2O2);
if (ind_MALDIALCO3>0) RO2 = RO2 + var(index,ind_MALDIALCO3);
if (ind_EPXDLCO3>0) RO2 = RO2 + var(index,ind_EPXDLCO3);
if (ind_C3DIALO2>0) RO2 = RO2 + var(index,ind_C3DIALO2);
if (ind_MALDIALO2>0) RO2 = RO2 + var(index,ind_MALDIALO2);
if (ind_C6H5O2>0) RO2 = RO2 + var(index,ind_C6H5O2);
if (ind_C6H5CO3>0) RO2 = RO2 + var(index,ind_C6H5CO3);
if (ind_OXYL1O2>0) RO2 = RO2 + var(index,ind_OXYL1O2);
if (ind_C5CO14O2>0) RO2 = RO2 + var(index,ind_C5CO14O2);
if (ind_NBZFUO2>0) RO2 = RO2 + var(index,ind_NBZFUO2);
if (ind_BZFUO2>0) RO2 = RO2 + var(index,ind_BZFUO2);
if (ind_HCOCOHCO3>0) RO2 = RO2 + var(index,ind_HCOCOHCO3);
if (ind_CATEC1O2>0) RO2 = RO2 + var(index,ind_CATEC1O2);
if (ind_MCATEC1O2>0) RO2 = RO2 + var(index,ind_MCATEC1O2);
if (ind_C5DICARBO2>0) RO2 = RO2 + var(index,ind_C5DICARBO2);
if (ind_NTLFUO2>0) RO2 = RO2 + var(index,ind_NTLFUO2);
if (ind_TLFUO2>0) RO2 = RO2 + var(index,ind_TLFUO2);
if (ind_NPHEN1O2>0) RO2 = RO2 + var(index,ind_NPHEN1O2);
if (ind_NNCATECO2>0) RO2 = RO2 + var(index,ind_NNCATECO2);
if (ind_NCATECO2>0) RO2 = RO2 + var(index,ind_NCATECO2);
if (ind_NBZQO2>0) RO2 = RO2 + var(index,ind_NBZQO2);
if (ind_PBZQO2>0) RO2 = RO2 + var(index,ind_PBZQO2);
if (ind_NPTLQO2>0) RO2 = RO2 + var(index,ind_NPTLQO2);
if (ind_PTLQO2>0) RO2 = RO2 + var(index,ind_PTLQO2);
if (ind_NCRES1O2>0) RO2 = RO2 + var(index,ind_NCRES1O2);
if (ind_MNNCATECO2>0) RO2 = RO2 + var(index,ind_MNNCATECO2);
if (ind_MNCATECO2>0) RO2 = RO2 + var(index,ind_MNCATECO2);
if (ind_MECOACETO2>0) RO2 = RO2 + var(index,ind_MECOACETO2);
if (ind_CO2H3CO3>0) RO2 = RO2 + var(index,ind_CO2H3CO3);
if (ind_MALANHYO2>0) RO2 = RO2 + var(index,ind_MALANHYO2);
if (ind_NDNPHENO2>0) RO2 = RO2 + var(index,ind_NDNPHENO2);
if (ind_DNPHENO2>0) RO2 = RO2 + var(index,ind_DNPHENO2);
if (ind_NDNCRESO2>0) RO2 = RO2 + var(index,ind_NDNCRESO2);
if (ind_DNCRESO2>0) RO2 = RO2 + var(index,ind_DNCRESO2);
if (ind_C5CO2OHCO3>0) RO2 = RO2 + var(index,ind_C5CO2OHCO3);
if (ind_C6CO2OHCO3>0) RO2 = RO2 + var(index,ind_C6CO2OHCO3);
if (ind_MMALANHYO2>0) RO2 = RO2 + var(index,ind_MMALANHYO2);
if (ind_ACCOMECO3>0) RO2 = RO2 + var(index,ind_ACCOMECO3);
if (ind_C4CO2DBCO3>0) RO2 = RO2 + var(index,ind_C4CO2DBCO3);
if (ind_C5CO2DBCO3>0) RO2 = RO2 + var(index,ind_C5CO2DBCO3);
if (ind_NSTYRENO2>0) RO2 = RO2 + var(index,ind_NSTYRENO2);
if (ind_STYRENO2>0) RO2 = RO2 + var(index,ind_STYRENO2);
k1_RO2RCO3 = RO2 *k_RO2RCO3;
k1_RO2pRO2 = RO2 *k_RO2pRO2;
k1_RO2sRO2 = RO2 *k_RO2sRO2;
k1_RO2tRO2 = RO2 *k_RO2tRO2;
k1_RO2pORO2 = RO2 *k_RO2pORO2;
k1_RO2sORO2 = RO2 *k_RO2sORO2;
k1_RO2tORO2 = RO2 *k_RO2tORO2;
k1_RO2LISOPACO2 = RO2 *k_RO2LISOPACO2;
k1_RO2ISOPBO2 = RO2 *k_RO2ISOPBO2;
k1_RO2ISOPDO2 = RO2 *k_RO2ISOPDO2;
rconst(index,0) = (3.3E-11 *exp(55. / temp_loc));
rconst(index,1) = (6.0E-34 *( pow(temp_loc / 300., - 2.4) )*cair_loc);
rconst(index,3) = (8.0E-12 *exp(- 2060. / temp_loc));
rconst(index,4) = (k_3rd(temp_loc , cair_loc , 4.4E-32 , 1.3 , 7.5E-11 , - 0.2 , 0.6));
rconst(index,5) = (1.4E-10 *exp(- 470. / temp_loc));
rconst(index,7) = (1.8E-11 *exp(180. / temp_loc));
rconst(index,8) = (1.7E-12 *exp(- 940. / temp_loc));
rconst(index,9) = (2.8E-12 *exp(- 1800. / temp_loc));
rconst(index,10) = (3.E-11 *exp(200. / temp_loc));
rconst(index,11) = (1.E-14 *exp(- 490. / temp_loc));
rconst(index,15) = (4.8E-11 *exp(250. / temp_loc));
rconst(index,16) = (k_HO2_HO2);
rconst(index,17) = (1.63E-10 *exp(60. / temp_loc));
rconst(index,19) = (1.5E-11 *exp(- 3600. / temp_loc));
rconst(index,20) = (2.15E-11 *exp(110. / temp_loc));
rconst(index,21) = (7.259E-11 *exp(20. / temp_loc));
rconst(index,22) = (4.641E-11 *exp(20. / temp_loc));
rconst(index,23) = (3.0E-12 *exp(- 1500. / temp_loc));
rconst(index,24) = (2.1E-11 *exp(100. / temp_loc));
rconst(index,25) = (5.1E-12 *exp(210. / temp_loc));
rconst(index,26) = (1.2E-13 *exp(- 2450. / temp_loc));
rconst(index,27) = (5.8E-12 *exp(220. / temp_loc));
rconst(index,28) = (1.5E-11 *exp(170. / temp_loc));
rconst(index,29) = (k_NO3_NO2);
rconst(index,30) = (k_NO3_NO2 / (5.8E-27 *exp(10840. / temp_loc)));
rconst(index,31) = (k_3rd(temp_loc , cair_loc , 7.0E-31 , 2.6 , 3.6E-11 , 0.1 , 0.6));
rconst(index,32) = (3.3E-12 *exp(270. / temp_loc));
rconst(index,33) = (k_3rd(temp_loc , cair_loc , 1.8E-30 , 3.0 , 2.8E-11 , 0. , 0.6));
rconst(index,34) = (k_NO2_HO2);
rconst(index,36) = (1.8E-11 *exp(- 390. / temp_loc));
rconst(index,37) = (k_HNO3_OH);
rconst(index,38) = (k_NO2_HO2 / (2.1E-27 *exp(10900. / temp_loc)));
rconst(index,39) = (1.3E-12 *exp(380. / temp_loc));
rconst(index,40) = (1.7E-12 *exp(- 710. / temp_loc));
rconst(index,41) = (4.3E-12 *exp(- 930. / temp_loc));
rconst(index,42) = (4.8E-07 *exp(- 628. / temp_loc) * pow(temp_loc, - 1.32) );
rconst(index,43) = (9.4E-09 *exp(- 356. / temp_loc) * pow(temp_loc, - 1.12) );
rconst(index,44) = (1.92E-12 *( pow(temp_loc / 298., - 1.5) ));
rconst(index,45) = (1.41E-11 *( pow(temp_loc / 298., - 1.5) ));
rconst(index,46) = (1.2E-11 *( pow(temp_loc / 298., - 2.0) ));
rconst(index,47) = (0.8E-11 *( pow(temp_loc / 298., - 2.0) ));
rconst(index,50) = (8.0E-11 *exp(- 500. / temp_loc));
rconst(index,51) = (1.66E-12 *exp(- 1500. / temp_loc));
rconst(index,52) = (1.0E-12 *exp(- 1000. / temp_loc));
rconst(index,54) = (4.13E-11 *exp(- 2138. / temp_loc));
rconst(index,55) = (3.65E-14 *exp(- 4600. / temp_loc));
rconst(index,57) = (1.85E-20 *exp(2.82 *log(temp_loc)- 987. / temp_loc));
rconst(index,58) = (2.9E-12 *exp(- 345. / temp_loc));
rconst(index,59) = (4.1E-13 *exp(750. / temp_loc));
rconst(index,60) = (2.8E-12 *exp(300. / temp_loc));
rconst(index,62) = (9.5E-14 *exp(390. / temp_loc) / (1.+ 1. / 26.2 *exp(1130. / temp_loc)));
rconst(index,63) = (9.5E-14 *exp(390. / temp_loc) / (1.+ 26.2 *exp(- 1130. / temp_loc)));
rconst(index,64) = (k_CH3OOH_OH);
rconst(index,65) = (9.52E-18 *exp(2.03 *log(temp_loc)+ 636. / temp_loc));
rconst(index,66) = (3.4E-13 *exp(- 1900. / temp_loc));
rconst(index,67) = ((1.57E-13+ cair_loc *3.54E-33));
rconst(index,69) = (1.49E-17 *temp_loc *temp_loc *exp(- 499. / temp_loc));
rconst(index,70) = (1.2E-14 *exp(- 2630. / temp_loc));
rconst(index,71) = (k_3rd(temp_loc , cair_loc , 1.0E-28 , 4.5 , 7.5E-12 , 0.85 , 0.6));
rconst(index,72) = (7.5E-13 *exp(700. / temp_loc));
rconst(index,73) = (2.6E-12 *exp(365. / temp_loc));
rconst(index,75) = (1.6E-13 *exp(195. / temp_loc));
rconst(index,76) = (k_CH3OOH_OH);
rconst(index,77) = (4.4E-12 *exp(365. / temp_loc));
rconst(index,78) = (1.4E-12 *exp(- 1900. / temp_loc));
rconst(index,79) = (4.2E-14 *exp(855. / temp_loc));
rconst(index,80) = (4.3E-13 *exp(1040. / temp_loc) / (1.+ 1. / 37. *exp(660. / temp_loc)));
rconst(index,81) = (4.3E-13 *exp(1040. / temp_loc) / (1.+ 37. *exp(- 660. / temp_loc)));
rconst(index,82) = (8.1E-12 *exp(270. / temp_loc));
rconst(index,83) = (k_CH3CO3_NO2);
rconst(index,85) = (0.9 *2.0E-12 *exp(500. / temp_loc));
rconst(index,86) = (0.1 *2.0E-12 *exp(500. / temp_loc));
rconst(index,87) = (4.9E-12 *exp(211. / temp_loc));
rconst(index,88) = (2.5E-12 *exp(500. / temp_loc));
rconst(index,89) = (0.6 *k_CH3OOH_OH);
rconst(index,90) = (5.6E-12 *exp(270. / temp_loc));
rconst(index,91) = (9.50E-13 *exp(- 650. / temp_loc));
rconst(index,92) = (k_PAN_M);
rconst(index,93) = (1.65E-17 *temp_loc *temp_loc *exp(- 87. / temp_loc));
rconst(index,94) = (6.5E-15 *exp(- 1900. / temp_loc));
rconst(index,95) = (k_3rd(temp_loc , cair_loc , 8.E-27 , 3.5 , 3.E-11 , 0. , 0.5));
rconst(index,96) = (4.6E-13 *exp(- 1155. / temp_loc));
rconst(index,97) = (k_PrO2_HO2);
rconst(index,98) = (k_PrO2_NO);
rconst(index,99) = (k_PrO2_CH3O2);
rconst(index,100) = (k_CH3OOH_OH);
rconst(index,101) = (6.5E-13 *exp(650. / temp_loc));
rconst(index,102) = (4.2E-12 *exp(180. / temp_loc));
rconst(index,103) = (3.8E-12 *exp(200. / temp_loc));
rconst(index,104) = (1.33E-13+ 3.82E-11 *exp(- 2000. / temp_loc));
rconst(index,105) = (8.6E-13 *exp(700. / temp_loc));
rconst(index,106) = (2.9E-12 *exp(300. / temp_loc));
rconst(index,107) = (7.5E-13 *exp(500. / temp_loc));
rconst(index,108) = (k_CH3OOH_OH);
rconst(index,109) = (2.15E-12 *exp(305. / temp_loc));
rconst(index,110) = (8.4E-13 *exp(830. / temp_loc));
rconst(index,111) = (6.2E-13 *exp(- 230. / temp_loc));
rconst(index,112) = (1.81E-17 *temp_loc *temp_loc *exp(114. / temp_loc));
rconst(index,113) = (k_PrO2_CH3O2);
rconst(index,114) = (k_PrO2_HO2);
rconst(index,115) = (k_PrO2_NO);
rconst(index,116) = (k_CH3OOH_OH);
rconst(index,117) = (.5 *(1.36E-15 *exp(- 2112. / temp_loc)+ 7.51E-16 *exp(- 1521. / temp_loc)));
rconst(index,118) = (.5 *(4.1E-12 *exp(452. / temp_loc)+ 1.9E-11 *exp(175. / temp_loc)));
rconst(index,119) = (1.82E-13 *exp(1300. / temp_loc));
rconst(index,120) = (2.54E-12 *exp(360. / temp_loc));
rconst(index,121) = (.25 *k_3rd(temp_loc , cair_loc , 9.7E-29 , 5.6 , 9.3E-12 , 1.5 , 0.6));
rconst(index,125) = (1.3E-12 *exp(- 25. / temp_loc));
rconst(index,126) = (k_PrO2_HO2);
rconst(index,127) = (k_PrO2_NO);
rconst(index,128) = (k_CH3OOH_OH);
rconst(index,131) = (k_PAN_M);
rconst(index,132) = (7.86E-15 *exp(- 1913. / temp_loc));
rconst(index,133) = (2.54E-11 *exp(410. / temp_loc));
rconst(index,134) = (3.03E-12 *exp(- 446. / temp_loc));
rconst(index,135) = (2.22E-13 *exp(1300. / temp_loc));
rconst(index,136) = (2.54E-12 *exp(360. / temp_loc));
rconst(index,141) = (2.8E-11 *exp(- 250. / temp_loc));
rconst(index,142) = (2.5E-11 *exp(110. / temp_loc));
rconst(index,143) = (1.0E-12 *exp(- 1590. / temp_loc));
rconst(index,144) = (3.0E-11 *exp(- 2450. / temp_loc));
rconst(index,145) = (3.5E-13 *exp(- 1370. / temp_loc));
rconst(index,146) = (k_ClO_ClO);
rconst(index,147) = (k_ClO_ClO / (2.16E-27 *exp(8537. / temp_loc)));
rconst(index,148) = (3.9E-11 *exp(- 2310. / temp_loc));
rconst(index,149) = (4.4E-11- 7.5E-11 *exp(- 620. / temp_loc));
rconst(index,150) = (7.5E-11 *exp(- 620. / temp_loc));
rconst(index,151) = (1.1E-11 *exp(- 980. / temp_loc));
rconst(index,152) = (7.3E-12 *exp(300. / temp_loc));
rconst(index,153) = (2.2E-12 *exp(340. / temp_loc));
rconst(index,154) = (1.7E-12 *exp(- 230. / temp_loc));
rconst(index,155) = (3.0E-12 *exp(- 500. / temp_loc));
rconst(index,156) = (6.2E-12 *exp(295. / temp_loc));
rconst(index,157) = (k_3rd_iupac(temp_loc , cair_loc , 1.6E-31 , 3.4 , 7.E-11 , 0. , 0.4));
rconst(index,158) = (6.918E-7 *exp(- 10909. / temp_loc) *cair_loc);
rconst(index,159) = (4.5E-12 *exp(- 900. / temp_loc));
rconst(index,160) = (6.2E-12 *exp(145. / temp_loc));
rconst(index,161) = (6.6E-12 *exp(- 1240. / temp_loc));
rconst(index,162) = (8.1E-11 *exp(- 34. / temp_loc));
rconst(index,164) = (1.8E-12 *exp(- 600. / temp_loc));
rconst(index,167) = (1.96E-12 *exp(- 1200. / temp_loc));
rconst(index,169) = (1.64E-12 *exp(- 1520. / temp_loc));
rconst(index,170) = (k_3rd_iupac(temp_loc , cair_loc , 1.85E-29 , 3.3 , 6.0E-10 , 0.0 , 0.4));
rconst(index,174) = (1.7E-11 *exp(- 800. / temp_loc));
rconst(index,175) = (1.9E-11 *exp(230. / temp_loc));
rconst(index,177) = (2.9E-14 *exp(840. / temp_loc));
rconst(index,178) = (7.7E-12 *exp(- 450. / temp_loc));
rconst(index,179) = (4.5E-12 *exp(500. / temp_loc));
rconst(index,180) = (6.7E-12 *exp(155. / temp_loc));
rconst(index,181) = (1.2E-10 *exp(- 430. / temp_loc));
rconst(index,182) = (2.0E-11 *exp(240. / temp_loc));
rconst(index,184) = (8.7E-12 *exp(260. / temp_loc));
rconst(index,185) = (k_BrO_NO2);
rconst(index,186) = (k_BrO_NO2 / (5.44E-9 *exp(14192. / temp_loc) *1.E6 *R_gas *temp_loc / (atm2Pa *N_A)));
rconst(index,187) = (7.7E-12 *exp(- 580. / temp_loc));
rconst(index,188) = (2.6E-12 *exp(- 1600. / temp_loc));
rconst(index,189) = (G7402a_yield *5.7E-12);
rconst(index,190) = ((1.- G7402a_yield) *5.7E-12);
rconst(index,191) = (1.42E-12 *exp(- 1150. / temp_loc));
rconst(index,192) = (2.8E-13 *exp(224. / temp_loc) / (1.+ 1.13E24 *exp(- 3200. / temp_loc) / var(index,ind_O2)));
rconst(index,193) = (1.8e-11 *exp(- 460. / temp_loc));
rconst(index,194) = (9.0E-13 *exp(- 360. / temp_loc));
rconst(index,195) = (2.0E-12 *exp(- 840. / temp_loc));
rconst(index,198) = (2.3E-10 *exp(135. / temp_loc));
rconst(index,199) = (1.6E-12 *exp(430. / temp_loc));
rconst(index,200) = (2.9E-12 *exp(220. / temp_loc));
rconst(index,201) = (5.8E-13 *exp(170. / temp_loc));
rconst(index,203) = (2.0E-12 *exp(- 840. / temp_loc));
rconst(index,204) = (2.0E-12 *exp(- 840. / temp_loc));
rconst(index,205) = (2.1E-12 *exp(- 880. / temp_loc));
rconst(index,206) = (k_3rd(temp_loc , cair_loc , 3.3E-31 , 4.3 , 1.6E-12 , 0. , 0.6));
rconst(index,207) = (1.13E-11 *exp(- 253. / temp_loc));
rconst(index,208) = (k_DMS_OH);
rconst(index,209) = (1.9E-13 *exp(520. / temp_loc));
rconst(index,211) = (1.8E13 *exp(- 8661. / temp_loc));
rconst(index,215) = (9.E-11 *exp(- 2386. / temp_loc));
rconst(index,217) = (jx(index,ip_O2));
rconst(index,218) = (jx(index,ip_O1D));
rconst(index,219) = (jx(index,ip_O3P));
rconst(index,220) = (jx(index,ip_H2O));
rconst(index,221) = (jx(index,ip_H2O2));
rconst(index,222) = (jx(index,ip_N2O));
rconst(index,223) = (jx(index,ip_NO2));
rconst(index,224) = (jx(index,ip_NO));
rconst(index,225) = (jx(index,ip_NO2O));
rconst(index,226) = (jx(index,ip_NOO2));
rconst(index,227) = (jx(index,ip_N2O5));
rconst(index,228) = (jx(index,ip_HONO));
rconst(index,229) = (jx(index,ip_HNO3));
rconst(index,230) = (jx(index,ip_HNO4));
rconst(index,231) = (jx(index,ip_CH3OOH));
rconst(index,232) = (jx(index,ip_COH2));
rconst(index,233) = (jx(index,ip_CHOH));
rconst(index,234) = (jx(index,ip_CO2));
rconst(index,235) = (jx(index,ip_CH4));
rconst(index,236) = (jx(index,ip_CH3OOH));
rconst(index,237) = (jx(index,ip_CH3CHO));
rconst(index,238) = (jx(index,ip_CH3CO3H));
rconst(index,239) = (0.19 *jx(index,ip_CHOH));
rconst(index,240) = (jx(index,ip_PAN));
rconst(index,241) = (jx(index,ip_CH3OOH));
rconst(index,242) = (jx(index,ip_CH3COCH3));
rconst(index,243) = (0.074 *jx(index,ip_CHOH));
rconst(index,244) = (jx(index,ip_MGLYOX));
rconst(index,245) = (jx(index,ip_CH3OOH));
rconst(index,246) = (3.7 *jx(index,ip_PAN));
rconst(index,247) = (jx(index,ip_CH3OOH));
rconst(index,248) = (0.019 *jx(index,ip_COH2)+ .015 *jx(index,ip_MGLYOX));
rconst(index,249) = (jx(index,ip_CH3OOH));
rconst(index,250) = (0.42 *jx(index,ip_CHOH));
rconst(index,251) = (jx(index,ip_CH3OOH));
rconst(index,252) = (2.15 *jx(index,ip_MGLYOX));
rconst(index,253) = (3.7 *jx(index,ip_PAN));
rconst(index,254) = (jx(index,ip_PAN));
rconst(index,255) = (jx(index,ip_CH3OOH));
rconst(index,256) = (3.7 *jx(index,ip_PAN));
rconst(index,257) = (jx(index,ip_Cl2));
rconst(index,258) = (jx(index,ip_Cl2O2));
rconst(index,259) = (jx(index,ip_OClO));
rconst(index,260) = (jx(index,ip_HCl));
rconst(index,261) = (jx(index,ip_HOCl));
rconst(index,262) = (jx(index,ip_ClNO2));
rconst(index,263) = (jx(index,ip_ClNO3));
rconst(index,264) = (jx(index,ip_ClONO2));
rconst(index,265) = (jx(index,ip_CH3Cl));
rconst(index,266) = (jx(index,ip_CCl4));
rconst(index,267) = (jx(index,ip_CH3CCl3));
rconst(index,268) = (jx(index,ip_CFCl3));
rconst(index,269) = (jx(index,ip_CF2Cl2));
rconst(index,270) = (jx(index,ip_Br2));
rconst(index,271) = (jx(index,ip_BrO));
rconst(index,272) = (jx(index,ip_HOBr));
rconst(index,273) = (jx(index,ip_BrNO2));
rconst(index,274) = (jx(index,ip_BrNO3));
rconst(index,275) = (jx(index,ip_CH3Br));
rconst(index,276) = (jx(index,ip_CH2Br2));
rconst(index,277) = (jx(index,ip_CHBr3));
rconst(index,278) = (jx(index,ip_CF3Br));
rconst(index,279) = (jx(index,ip_BrCl));
rconst(index,280) = (jx(index,ip_CF2ClBr));
rconst(index,281) = (jx(index,ip_CH2ClBr));
rconst(index,282) = (jx(index,ip_CHCl2Br));
rconst(index,283) = (jx(index,ip_CHClBr2));
rconst(index,284) = (jx(index,ip_CH3I));
rconst(index,285) = (khet_st(index,ihs_N2O5_H2O));
rconst(index,286) = (khet_tr(index,iht_N2O5));
rconst(index,287) = (khet_st(index,ihs_HOCl_HCl));
rconst(index,288) = (khet_st(index,ihs_ClNO3_HCl));
rconst(index,289) = (khet_st(index,ihs_ClNO3_H2O));
rconst(index,290) = (khet_st(index,ihs_N2O5_HCl));
rconst(index,291) = (khet_st(index,ihs_HOBr_HBr));
rconst(index,292) = (khet_st(index,ihs_BrNO3_H2O));
rconst(index,293) = (khet_st(index,ihs_ClNO3_HBr));
rconst(index,294) = (khet_st(index,ihs_BrNO3_HCl));
rconst(index,295) = (khet_st(index,ihs_HOCl_HBr));
rconst(index,296) = (khet_st(index,ihs_HOBr_HCl));
rconst(index,297) = (k_O3s);
rconst(index,299) = (jx(index,ip_CFCl3));
rconst(index,301) = (jx(index,ip_CF2Cl2));
rconst(index,302) = (7.25E-11 *exp(20. / temp_loc));
rconst(index,303) = (4.63E-11 *exp(20. / temp_loc));
rconst(index,304) = (jx(index,ip_N2O));
rconst(index,306) = (1.64E-12 *exp(- 1520. / temp_loc));
rconst(index,307) = (jx(index,ip_CH3CCl3));
rconst(index,308) = (jx(index,ip_CF2ClBr));
rconst(index,309) = (jx(index,ip_CF3Br));
rconst(index,(3)-1) = 1.2e-10;
rconst(index,(7)-1) = 1.2e-10;
rconst(index,(13)-1) = 7.2e-11;
rconst(index,(14)-1) = 6.9e-12;
rconst(index,(15)-1) = 1.6e-12;
rconst(index,(19)-1) = 1.8e-12;
rconst(index,(36)-1) = 3.5e-12;
rconst(index,(49)-1) = 1.2e-14;
rconst(index,(50)-1) = 1300;
rconst(index,(54)-1) = 1.66e-12;
rconst(index,(57)-1) = 1.75e-10;
rconst(index,(62)-1) = 1.3e-12;
rconst(index,(69)-1) = 4e-13;
rconst(index,(75)-1) = 2.3e-12;
rconst(index,(85)-1) = 4e-12;
rconst(index,(123)-1) = 2e-12;
rconst(index,(124)-1) = 2e-12;
rconst(index,(125)-1) = 3e-11;
rconst(index,(130)-1) = 1.7e-12;
rconst(index,(131)-1) = 3.2e-11;
rconst(index,(138)-1) = 2e-12;
rconst(index,(139)-1) = 2e-12;
rconst(index,(140)-1) = 1e-10;
rconst(index,(141)-1) = 1.3e-11;
rconst(index,(164)-1) = 5.9e-11;
rconst(index,(166)-1) = 3.3e-10;
rconst(index,(167)-1) = 1.65e-10;
rconst(index,(169)-1) = 3.25e-10;
rconst(index,(172)-1) = 8e-11;
rconst(index,(173)-1) = 1.4e-10;
rconst(index,(174)-1) = 2.3e-10;
rconst(index,(177)-1) = 2.7e-12;
rconst(index,(184)-1) = 4.9e-11;
rconst(index,(197)-1) = 3.32e-15;
rconst(index,(198)-1) = 1.1e-15;
rconst(index,(203)-1) = 1.45e-11;
rconst(index,(211)-1) = 1e-10;
rconst(index,(213)-1) = 3e-13;
rconst(index,(214)-1) = 5e-11;
rconst(index,(215)-1) = 3.3e-10;
rconst(index,(217)-1) = 4.4e-13;
rconst(index,(299)-1) = 2.3e-10;
rconst(index,(301)-1) = 1.4e-10;
rconst(index,(306)-1) = 3e-10;
}
}
__global__
void Rosenbrock(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus,
// values calculated from icntrl and rcntrl at host
const int autonomous, const int vectorTol, const int UplimTol, const int method, const int Max_no_steps,
const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff,
// cuda global mem buffers
const double * __restrict__ absTol, const double * __restrict__ relTol,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// global input
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
// extra
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
/* Temporary arrays allocated in stack */
/*
* Optimization NOTE: runs faster on Tesla/Fermi
* when tempallocated on stack instead of heap.
* In theory someone can aggregate accesses together,
* however due to algorithm, threads access
* different parts of memory, making it harder to
* optimize accesses.
*
*/
double varNew_stack[NVAR];
double var_stack[NSPEC];
double varErr_stack[NVAR];
double fix_stack[NFIX];
double Fcn0_stack[NVAR];
double jac0_stack[LU_NONZERO];
double dFdT_stack[NVAR];
double Ghimj_stack[LU_NONZERO];
double K_stack[6*NVAR];
/* Allocated in Global mem */
double rconst_stack[NREACT];
/* Allocated in stack */
double *Ghimj = Ghimj_stack;
double *K = K_stack;
double *varNew = varNew_stack;
double *Fcn0 = Fcn0_stack;
double *dFdT = dFdT_stack;
double *jac0 = jac0_stack;
double *varErr = varErr_stack;
double *var = var_stack;
double *fix = fix_stack;
double *rconst = rconst_stack;
if (index < VL_GLO)
{
int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng;
double Texit, Hexit;
Nfun = 0;
Njac = 0;
Nstp = 0;
Nacc = 0;
Nrej = 0;
Ndec = 0;
Nsol = 0;
Nsng = 0;
/* FIXME: add check for method */
const double *ros_A = &ros[method-1].ros_A[0];
const double *ros_C = &ros[method-1].ros_C[0];
const double *ros_M = &ros[method-1].ros_M[0];
const double *ros_E = &ros[method-1].ros_E[0];
const double *ros_Alpha = &ros[method-1].ros_Alpha[0];
const double *ros_Gamma = &ros[method-1].ros_Gamma[0];
const int *ros_NewF = &ros[method-1].ros_NewF[0];
const int ros_S = ros[method-1].ros_S;
const double ros_ELO = ros[method-1].ros_ELO;
/* Copy data from global memory to temporary array */
/*
* Optimization note: if we ever have enough constant
* memory, we could use it for storing the data.
* In current architectures if we use constant memory
* only a few threads will be able to run on the fly.
*
*/
for (int i=0; i<NSPEC; i++)
var(index,i) = conc(index,i);
for (int i=0; i<NFIX; i++)
fix(index,i) = conc(index,NVAR+i);
update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO);
ros_Integrator(var, fix, Tstart, Tend, Texit,
// Rosenbrock method coefficients
ros_S, ros_M, ros_E, ros_A, ros_C,
ros_Alpha, ros_Gamma, ros_ELO, ros_NewF,
// Integration parameters
autonomous, vectorTol, Max_no_steps,
roundoff, Hmin, Hmax, Hstart, Hexit,
FacMin, FacMax, FacRej, FacSafe,
// Status parameters
Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng,
// cuda global mem buffers
rconst, absTol, relTol, varNew, Fcn0,
K, dFdT, jac0, Ghimj, varErr,
// For update rconst
khet_st, khet_tr, jx,
VL_GLO
);
for (int i=0; i<NVAR; i++)
conc(index,i) = var(index,i);
/* Statistics */
istatus(index,ifun) = Nfun;
istatus(index,ijac) = Njac;
istatus(index,istp) = Nstp;
istatus(index,iacc) = Nacc;
istatus(index,irej) = Nrej;
istatus(index,idec) = Ndec;
istatus(index,isol) = Nsol;
istatus(index,isng) = Nsng;
// Last T and H
rstatus(index,itexit) = Texit;
rstatus(index,ihexit) = Hexit;
}
}
__device__ static int ros_Integrator_ros3(double * __restrict__ var, const double * __restrict__ fix, const double Tstart, const double Tend, double &T,
// Integration parameters
const int autonomous, const int vectorTol, const int Max_no_steps,
const double roundoff, const double Hmin, const double Hmax, const double Hstart, double &Hexit,
const double FacMin, const double FacMax, const double FacRej, const double FacSafe,
// Status parameters
int &Nfun, int &Njac, int &Nstp, int &Nacc, int &Nrej, int &Ndec, int &Nsol, int &Nsng,
// cuda global mem buffers
const double * __restrict__ rconst, const double * __restrict__ absTol, const double * __restrict__ relTol, double * __restrict__ varNew, double * __restrict__ Fcn0,
double * __restrict__ K, double * __restrict__ dFdT, double * __restrict__ jac0, double * __restrict__ Ghimj, double * __restrict__ varErr,
// for update_rconst
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
// VL_GLO
const int VL_GLO)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
double H, Hnew, HC, HC0,HC1, HG, Fac; // Tau - not used
double Err; //*varErr;
int direction;
int rejectLastH, rejectMoreH;
const double DELTAMIN = 1.0E-5;
const int ros_S = 3;
// ~~~> Initial preparations
T = Tstart;
Hexit = 0.0;
H = fmin(Hstart,Hmax);
if (fabs(H) <= 10.0*roundoff)
H = DELTAMIN;
if (Tend >= Tstart)
{
direction = + 1;
}
else
{
direction = - 1;
}
rejectLastH=0;
rejectMoreH=0;
// TimeLoop:
while((direction > 0) && ((T- Tend)+ roundoff <= ZERO) || (direction < 0) && ((Tend-T)+ roundoff <= ZERO))
{
if (Nstp > Max_no_steps) // Too many steps
return -6;
// Step size too small
if (H <= roundoff){ // Step size too small
//if (((T+ 0.1*H) == T) || (H <= roundoff)) {
return -7;
}
// ~~~> Limit H if necessary to avoid going beyond Tend
Hexit = H;
H = fmin(H,fabs(Tend-T));
// ~~~> Compute the function at current time
Fun(var, fix, rconst, Fcn0, Nfun, VL_GLO);
// ~~~> Compute the function derivative with respect to T
if (!autonomous)
ros_FunTimeDerivative(T, roundoff, var, fix, rconst, dFdT, Fcn0, Nfun, khet_st, khet_tr, jx, VL_GLO); /// VAR READ - fcn0 read
// ~~~> Compute the Jacobian at current time
Jac_sp(var, fix, rconst, jac0, Njac, VL_GLO); /// VAR READ
// ~~~> Repeat step calculation until current step accepted
// UntilAccepted:
while(1)
{
ros_PrepareMatrix(H, direction, 0.43586652150845899941601945119356E+00 , jac0, Ghimj, Nsng, Ndec, VL_GLO);
{ // istage=0
for (int i=0; i<NVAR; i++){
K(index,0,i) = Fcn0(index,i); // FCN0 Read
}
if ((!autonomous))
{
HG = direction*H*0.43586652150845899941601945119356E+00;
for (int i=0; i<NVAR; i++){
K(index,0,i) += dFdT(index,i)*HG;
}
}
ros_Solve(Ghimj, K, Nsol, 0, ros_S);
} // Stage
{ // istage = 1
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,0,i) + var(index,i);
}
Fun(varNew, fix, rconst, varNew, Nfun,VL_GLO); // FCN <- varNew / not overlap
HC = -0.10156171083877702091975600115545E+01/(direction*H);
for (int i=0; i<NVAR; i++){
double tmp = K(index,0,i);
K(index,1,i) = tmp*HC + varNew(index,i);
}
if ((!autonomous))
{
HG = direction*H*0.24291996454816804366592249683314E+00;
for (int i=0; i<NVAR; i++){
K(index,1,i) += dFdT(index,i)*HG;
}
}
// R ,RW, RW, R, R
ros_Solve(Ghimj, K, Nsol, 1, ros_S);
} // Stage
{
int istage = 2;
HC0 = 0.40759956452537699824805835358067E+01/(direction*H);
HC1 = 0.92076794298330791242156818474003E+01/(direction*H);
for (int i=0; i<NVAR; i++){
K(index,2,i) = K(index,1,i)*HC1 + K(index,0,i)*HC0 + varNew(index,i);
}
if ((!autonomous) )
{
HG = direction*H*0.21851380027664058511513169485832E+01;
for (int i=0; i<NVAR; i++){
K(index,istage,i) += dFdT(index,i)*HG;
}
}
ros_Solve(Ghimj, K, Nsol, istage, ros_S);
} // Stage
// ~~~> Compute the new solution
for (int i=0; i<NVAR; i++){
varNew(index,i) = K(index,0,i) + K(index,1,i)*0.61697947043828245592553615689730E+01 + K(index,2,i)*(-0.42772256543218573326238373806514) + var(index,i) ;
varErr(index,i) = K(index,0,i)/2 + K(index,1,i)*(-0.29079558716805469821718236208017E+01) + K(index,2,i)*(0.22354069897811569627360909276199);
}
Err = ros_ErrorNorm(var, varNew, varErr, absTol, relTol, vectorTol);
// ~~~> New step size is bounded by FacMin <= Hnew/H <= FacMax
Fac = fmin(FacMax,fmax(FacMin,FacSafe/pow(Err,ONE/3.0)));
Hnew = H*Fac;
// ~~~> Check the error magnitude and adjust step size
Nstp = Nstp+ 1;
if((Err <= ONE) || (H <= Hmin)) // ~~~> Accept step
{
Nacc = Nacc + 1;
for (int j=0; j<NVAR ; j++)
var(index,j) = fmax(varNew(index,j),ZERO); /////////// VAR WRITE - last VarNew read
T = T + direction*H;
Hnew = fmax(Hmin,fmin(Hnew,Hmax));
if (rejectLastH) // No step size increase after a rejected step
Hnew = fmin(Hnew,H);
rejectLastH = 0;
rejectMoreH = 0;
H = Hnew;
break; // EXIT THE LOOP: WHILE STEP NOT ACCEPTED
}
else // ~~~> Reject step
{
if (rejectMoreH)
Hnew = H*FacRej;
rejectMoreH = rejectLastH;
rejectLastH = 1;
H = Hnew;
if (Nacc >= 1)
Nrej += 1;
} // Err <= 1
} // UntilAccepted
} // TimeLoop
// ~~~> Succesful exit
return 0; // ~~~> The integration was successful
}
__global__
void Rosenbrock_ros3(double * __restrict__ conc, const double Tstart, const double Tend, double * __restrict__ rstatus, int * __restrict__ istatus,
const int autonomous, const int vectorTol, const int UplimTol, const int Max_no_steps,
const double Hmin, const double Hmax, const double Hstart, const double FacMin, const double FacMax, const double FacRej, const double FacSafe, const double roundoff,
const double * __restrict__ absTol, const double * __restrict__ relTol,
const double * __restrict__ khet_st, const double * __restrict__ khet_tr,
const double * __restrict__ jx,
const double * __restrict__ temp_gpu,
const double * __restrict__ press_gpu,
const double * __restrict__ cair_gpu,
const int VL_GLO,
double * __restrict__ Ghimj,
double * __restrict__ K,
double * __restrict__ varNew,
double * __restrict__ Fcn0,
double * __restrict__ dFdT,
double * __restrict__ jac0,
double * __restrict__ varErr,
double * __restrict__ var,
double * __restrict__ fix,
double * __restrict__ rconst)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
/* Temporary arrays allocated in stack */
/*
* Optimization NOTE: runs faster on Tesla/Fermi
* when tempallocated on stack instead of heap.
* In theory someone can aggregate accesses together,
* however due to algorithm, threads access
* different parts of memory, making it harder to
* optimize accesses.
*
*/
// double varNew_stack[NVAR];
// double var_stack[NVAR];
// double varErr_stack[NVAR];
// double fix_stack[NFIX];
// double Fcn0_stack[NVAR];
// double jac0_stack[LU_NONZERO];
// double dFdT_stack[NVAR];
// double Ghimj_stack[LU_NONZERO];
// double K_stack[3*NVAR];
// double rconst_stack[NREACT];
// /* Allocated in stack */
// double *Ghimj = Ghimj_stack;
// double *K = K_stack;
// double *varNew = varNew_stack;
// double *Fcn0 = Fcn0_stack;
// double *dFdT = dFdT_stack;
// double *jac0 = jac0_stack;
// double *varErr = varErr_stack;
// double *var = var_stack;
// double *fix = fix_stack;
// double *rconst = rconst_stack;
const int method = 2;
if (index < VL_GLO)
{
int Nfun,Njac,Nstp,Nacc,Nrej,Ndec,Nsol,Nsng;
double Texit, Hexit;
Nfun = 0;
Njac = 0;
Nstp = 0;
Nacc = 0;
Nrej = 0;
Ndec = 0;
Nsol = 0;
Nsng = 0;
/* Copy data from global memory to temporary array */
/*
* Optimization note: if we ever have enough constant
* memory, we could use it for storing the data.
* In current architectures if we use constant memory
* only a few threads will be able to run on the fly.
*
*/
for (int i=0; i<NSPEC; i++)
var(index,i) = conc(index,i);
for (int i=0; i<NFIX; i++)
fix(index,i) = conc(index,NVAR+i);
//update_rconst(var, khet_st, khet_tr, jx, VL_GLO);
update_rconst(var, khet_st, khet_tr, jx, rconst, temp_gpu, press_gpu, cair_gpu, VL_GLO);
ros_Integrator_ros3(var, fix, Tstart, Tend, Texit,
// Integration parameters
autonomous, vectorTol, Max_no_steps,
roundoff, Hmin, Hmax, Hstart, Hexit,
FacMin, FacMax, FacRej, FacSafe,
// Status parameters
Nfun, Njac, Nstp, Nacc, Nrej, Ndec, Nsol, Nsng,
// cuda global mem buffers
rconst, absTol, relTol, varNew, Fcn0,
K, dFdT, jac0, Ghimj, varErr,
// For update rconst
khet_st, khet_tr, jx,
VL_GLO
);
for (int i=0; i<NVAR; i++)
conc(index,i) = var(index,i);
/* Statistics */
istatus(index,ifun) = Nfun;
istatus(index,ijac) = Njac;
istatus(index,istp) = Nstp;
istatus(index,iacc) = Nacc;
istatus(index,irej) = Nrej;
istatus(index,idec) = Ndec;
istatus(index,isol) = Nsol;
istatus(index,isng) = Nsng;
// Last T and H
rstatus(index,itexit) = Texit;
rstatus(index,ihexit) = Hexit;
}
}
// no int8 in CUDA :(
__global__ void reduce_istatus_1(int *istatus, int4 *tmp_out_1, int4 *tmp_out_2, int VL_GLO, int *xNacc, int *xNrej)
{
int index = blockIdx.x*blockDim.x+threadIdx.x;
int idx_1 = threadIdx.x;
int global_size = blockDim.x*gridDim.x;
int foo;
//no int8 in CUDA :(
int4 accumulator_1 = make_int4(0,0,0,0);
int4 accumulator_2 = make_int4(0,0,0,0);
while (index < VL_GLO)
{
accumulator_1.x += istatus(index,0);
accumulator_1.y += istatus(index,1);
accumulator_1.z += istatus(index,2);
//some dirty work on the side...
foo = istatus(index,3);
xNacc[index] = foo;
accumulator_1.w += foo;
foo = istatus(index,4);
xNrej[index] = foo;
accumulator_2.x += foo;
accumulator_2.y += istatus(index,5);
accumulator_2.z += istatus(index,6);
accumulator_2.w += istatus(index,7);
index += global_size;
}
//no int8 in CUDA :(
__shared__ int4 buffer_1[REDUCTION_SIZE_1];
__shared__ int4 buffer_2[REDUCTION_SIZE_1];
buffer_1[idx_1] = accumulator_1;
buffer_2[idx_1] = accumulator_2;
__syncthreads();
int idx_2, active_threads = blockDim.x;
int4 tmp_1, tmp_2;
while (active_threads != 1)
{
active_threads /= 2;
if (idx_1 < active_threads)
{
idx_2 = idx_1+active_threads;
tmp_1 = buffer_1[idx_1];
tmp_2 = buffer_1[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_1[idx_1] = tmp_1;
tmp_1 = buffer_2[idx_1];
tmp_2 = buffer_2[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_2[idx_1] = tmp_1;
}
__syncthreads();
}
if (idx_1 == 0)
{
tmp_out_1[blockIdx.x] = buffer_1[0];
tmp_out_2[blockIdx.x] = buffer_2[0];
}
}
__global__ void reduce_istatus_2(int4 *tmp_out_1, int4 *tmp_out_2, int *out)
{
int idx_1 = threadIdx.x;
//no int8 in CUDA :(
__shared__ int4 buffer_1[REDUCTION_SIZE_2];
__shared__ int4 buffer_2[REDUCTION_SIZE_2];
buffer_1[idx_1] = tmp_out_1[idx_1];
buffer_2[idx_1] = tmp_out_2[idx_1];
__syncthreads();
int idx_2, active_threads = blockDim.x;
int4 tmp_1, tmp_2;
while (active_threads != 1)
{
active_threads /= 2;
if (idx_1 < active_threads)
{
idx_2 = idx_1+active_threads;
tmp_1 = buffer_1[idx_1];
tmp_2 = buffer_1[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_1[idx_1] = tmp_1;
tmp_1 = buffer_2[idx_1];
tmp_2 = buffer_2[idx_2];
tmp_1.x += tmp_2.x;
tmp_1.y += tmp_2.y;
tmp_1.z += tmp_2.z;
tmp_1.w += tmp_2.w;
buffer_2[idx_1] = tmp_1;
}
__syncthreads();
}
if (idx_1 == 0)
{
tmp_1 = buffer_1[0];
tmp_2 = buffer_2[0];
out[0] = tmp_1.x;
out[1] = tmp_1.y;
out[2] = tmp_1.z;
out[3] = tmp_1.w;
out[4] = tmp_2.x;
out[5] = tmp_2.y;
out[6] = tmp_2.z;
out[7] = tmp_2.w;
}
}
/* Assuming different processes */
enum { TRUE=1, FALSE=0 } ;
double *d_conc, *d_temp, *d_press, *d_cair, *d_khet_st, *d_khet_tr, *d_jx;
int initialized = FALSE;
/* Device pointers pointing to GPU */
double *d_rstatus, *d_absTol, *d_relTol;
int *d_istatus, *d_istatus_rd, *d_xNacc, *d_xNrej;
int4 *d_tmp_out_1, *d_tmp_out_2;
/* Allocate arrays on device for Rosenbrock */
__host__ void init_first_time(int pe, int VL_GLO, int size_khet_st, int size_khet_tr, int size_jx ){
/* Select the proper GPU CARD */
int deviceCount, device;
gpuErrchk( cudaGetDeviceCount(&deviceCount) );
device = pe % deviceCount;
gpuErrchk( cudaSetDevice(device) );
printf("PE[%d]: selected %d of total %d\n",pe,device,deviceCount);
cudaDeviceSetCacheConfig(cudaFuncCachePreferL1);
gpuErrchk( cudaMalloc ((void **) &d_conc , sizeof(double)*VL_GLO*(NSPEC)) );
gpuErrchk( cudaMalloc ((void **) &d_khet_st, sizeof(double)*VL_GLO*size_khet_st) );
gpuErrchk( cudaMalloc ((void **) &d_khet_tr, sizeof(double)*VL_GLO*size_khet_tr) );
gpuErrchk( cudaMalloc ((void **) &d_jx , sizeof(double)*VL_GLO*size_jx) );
gpuErrchk( cudaMalloc ((void **) &d_rstatus , sizeof(double)*VL_GLO*2) );
gpuErrchk( cudaMalloc ((void **) &d_istatus , sizeof(int)*VL_GLO*8) );
gpuErrchk( cudaMalloc ((void **) &d_absTol , sizeof(double)*NVAR) );
gpuErrchk( cudaMalloc ((void **) &d_relTol , sizeof(double)*NVAR) );
/* Allocate input arrays */
gpuErrchk( cudaMalloc ((void **) &temp_gpu , sizeof(double)*VL_GLO) );
gpuErrchk( cudaMalloc ((void **) &press_gpu , sizeof(double)*VL_GLO) );
gpuErrchk( cudaMalloc ((void **) &cair_gpu , sizeof(double)*VL_GLO) );
/* Allocate arrays on device for reduce_foo */
gpuErrchk( cudaMalloc ((void **) &d_istatus_rd , sizeof(int)*8));
gpuErrchk( cudaMalloc ((void **) &d_tmp_out_1 , sizeof(int4)*64));
gpuErrchk( cudaMalloc ((void **) &d_tmp_out_2 , sizeof(int4)*64));
gpuErrchk( cudaMalloc ((void **) &d_xNacc , sizeof(int)*VL_GLO));
gpuErrchk( cudaMalloc ((void **) &d_xNrej , sizeof(int)*VL_GLO));
gpuErrchk( cudaMalloc ((void **) &Ghimj, sizeof(double) * VL_GLO * LU_NONZERO));
gpuErrchk( cudaMalloc ((void **) &K, sizeof(double) * VL_GLO * 6*NVAR));
gpuErrchk( cudaMalloc ((void **) &varNew, sizeof(double) * VL_GLO * NVAR));
gpuErrchk( cudaMalloc ((void **) &Fcn0, sizeof(double) * VL_GLO * NVAR));
gpuErrchk( cudaMalloc ((void **) &dFdT, sizeof(double) * VL_GLO * NVAR));
gpuErrchk( cudaMalloc ((void **) &jac0, sizeof(double) * VL_GLO * LU_NONZERO));
gpuErrchk( cudaMalloc ((void **) &varErr, sizeof(double) * VL_GLO * NVAR));
gpuErrchk( cudaMalloc ((void **) &var, sizeof(double) * VL_GLO * NSPEC));
gpuErrchk( cudaMalloc ((void **) &fix, sizeof(double) * VL_GLO * NFIX));
gpuErrchk( cudaMalloc ((void **) &rconst, sizeof(double) * VL_GLO * NREACT));
initialized = TRUE;
}
/*
* TODO: We should call it in some point..
*/
extern "C" void finalize_cuda(){
/* Free memory on the device */
gpuErrchk( cudaFree(d_conc ) );
gpuErrchk( cudaFree(d_temp ) );
gpuErrchk( cudaFree(d_press ) );
gpuErrchk( cudaFree(d_cair ) );
gpuErrchk( cudaFree(d_khet_st ) );
gpuErrchk( cudaFree(d_khet_tr ) );
gpuErrchk( cudaFree(d_jx ) );
gpuErrchk( cudaFree(d_rstatus ) );
gpuErrchk( cudaFree(d_istatus ) );
gpuErrchk( cudaFree(d_absTol ) );
gpuErrchk( cudaFree(d_relTol ) );
gpuErrchk( cudaFree(d_istatus_rd ) );
gpuErrchk( cudaFree(d_tmp_out_1 ) );
gpuErrchk( cudaFree(d_tmp_out_2 ) );
gpuErrchk( cudaFree(d_xNacc ) );
gpuErrchk( cudaFree(d_xNrej ) );
gpuErrchk( cudaFree(temp_gpu ) );
gpuErrchk( cudaFree(press_gpu ) );
gpuErrchk( cudaFree(cair_gpu ) );
}
extern "C" void kpp_integrate_cuda_( int *pe_p, int *sizes, double *time_step_len_p, double *conc, double *temp, double *press, double *cair,
double *khet_st, double *khet_tr, double *jx, double *absTol, double *relTol, int *ierr, int *istatus,
int *xNacc, int *xNrej, double *rndoff, int *icntrl=NULL, double *rcntrl=NULL
)
/* // TODO
* Parameters:
* pe_p: scalar int - processor element
* VL_GLO: scalar int - size of the system
* NSPEC: scalar int - number of species
* NREACT: scalar int - number of reactions
* NVAR: scalar int -
*
* Input data:
* conc: 2D array of doubles - size: vl_glo x number of species
* temp: 1D array of doubles - size: vl_glo
* press: 1D array of doubles - size: vl_glo
* cair: 1D array of doubles - size: vl_glo
* khet_st: 2D array of doubles - size: vl_glo x number of species
* khet_tr: 2D array of doubles - size: vl_glo x number of species
* jx: 2D array of doubles - size: vl_glo x number of species
* absTol: 1D array of doubles - size: number of species
* relTol: 1D array of doubles - size: number of species
* Control:
* icntrl: 1D array of ints - size: 4
* sizes: 1D array of ints - size: 4
* rcntrl: 1D array of doubles - size: 7
*
*
*/
{
const double DELTAMIN = 1.0E-5;
int VL_GLO = sizes[0];
int size_khet_st = sizes[1];
int size_khet_tr = sizes[2];
int size_jx = sizes[3];
double roundoff = *rndoff;
double Tstart,Tend;
Tstart = ZERO;
Tend = *time_step_len_p;
int pe = *pe_p;
// variables from rcntrl and icntrl
int autonomous, vectorTol, UplimTol, method, Max_no_steps;
double Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe;
//int rcntrl_bool = 0, icntrl_bool=0;
if (rcntrl == NULL)
{
rcntrl = new double[7];
for (int i=0; i < 7; i++)
rcntrl[i] = 0.0;
}
if (icntrl == NULL)
{
icntrl = new int[4];
for (int i=0; i < 4; i++)
icntrl[i] = 0;
}
/* Allocate arrays on device for update_rconst kernel*/
if (initialized == FALSE) init_first_time(pe, VL_GLO, size_khet_st, size_khet_tr, size_jx);
/* Copy data from host memory to device memory */
gpuErrchk( cudaMemcpy(d_conc , conc , sizeof(double)*VL_GLO*NSPEC , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(temp_gpu , temp , sizeof(double)*VL_GLO , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(press_gpu , press , sizeof(double)*VL_GLO , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(cair_gpu , cair , sizeof(double)*VL_GLO , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_khet_st, khet_st , sizeof(double)*VL_GLO*size_khet_st , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_khet_tr, khet_tr , sizeof(double)*VL_GLO*size_khet_tr , cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_jx , jx , sizeof(double)*VL_GLO*size_jx , cudaMemcpyHostToDevice) );
/* Copy arrays from host memory to device memory for Rosenbrock */
gpuErrchk( cudaMemcpy(d_absTol, absTol, sizeof(double)*NVAR, cudaMemcpyHostToDevice) );
gpuErrchk( cudaMemcpy(d_relTol, relTol, sizeof(double)*NVAR, cudaMemcpyHostToDevice) );
/* Compute execution configuration for update_rconst */
int block_size, grid_size;
block_size = BLOCKSIZE;
grid_size = (VL_GLO + block_size - 1)/block_size;
dim3 dimBlock(block_size);
dim3 dimGrid(grid_size);
/* Execute the kernel */
//update_rconst<<<dimGrid,dimBlock>>>(d_conc, d_khet_st, d_khet_tr, d_jx, VL_GLO);
GPU_DEBUG();
// *------------------------------------------------------*
// | Default values vs input settings (icntrl, rcntrl) |
// *------------------------------------------------------*
int ierr_tmp=0;
{
// autonomous or time dependent ODE. Default is time dependent.
autonomous = !(icntrl[0] == 0);
// For Scalar tolerances (icntrl[1].NE.0) the code uses absTol(0) and relTol(0)
// For Vector tolerances (icntrl[1] == 0) the code uses absTol(0:NVAR) and relTol(0:NVAR)
if (icntrl[1] == 0)
{
vectorTol = 1; //bool
UplimTol = NVAR;
}
else
{
vectorTol = 0;
UplimTol = 1;
}
// The particular Rosenbrock method chosen
if (icntrl[2] == 0)
{
method = 4;
}
else if ((icntrl[2] >= 1) && (icntrl[2] <= 5))
{
method = icntrl[2];
}
else
{
printf("User-selected Rosenbrock method: icntrl[2]=%d\n",method);
ierr_tmp = -2;
}
// The maximum number of steps admitted
if (icntrl[3] == 0)
{
Max_no_steps = 100000;
}
else if (icntrl[3] > 0)
{
Max_no_steps=icntrl[3];
}
else
{
printf("User-selected max no. of steps: icntrl[3]=%d\n",icntrl[3]);
ierr_tmp = -1;
}
// Unit roundoff (1+ roundoff>1)
roundoff = machine_eps_flt();
// Lower bound on the step size: (positive value)
if (rcntrl[0] == ZERO)
{
Hmin = ZERO;
}
else if (rcntrl[0] > ZERO)
{
Hmin = rcntrl[0];
}
else
{
printf("User-selected Hmin: rcntrl[0]=%f\n",rcntrl[0]);
ierr_tmp = -3;
}
// Upper bound on the step size: (positive value)
if (rcntrl[1] == ZERO)
{
Hmax = fabs(Tend-Tstart);
}
else if (rcntrl[1] > ZERO)
{
Hmax = fmin(fabs(rcntrl[1]),fabs(Tend-Tstart));
}
else
{
printf("User-selected Hmax: rcntrl[1]=%f\n",rcntrl[1]);
ierr_tmp = -3;
}
// Starting step size: (positive value)
if (rcntrl[2] == ZERO)
{
Hstart = fmax(Hmin,DELTAMIN);
}
else if (rcntrl[2] > ZERO)
{
Hstart = fmin(fabs(rcntrl[2]),fabs(Tend-Tstart));
}
else
{
printf("User-selected Hstart: rcntrl[2]=%f\n",rcntrl[2]);
ierr_tmp = -3;
}
// Step size can be changed s.t. FacMin < Hnew/Hexit < FacMax
if (rcntrl[3] == ZERO)
{
FacMin = 0.2;
}
else if (rcntrl[3] > ZERO)
{
FacMin = rcntrl[3];
}
else
{
printf("User-selected FacMin: rcntrl[3]=%f\n",rcntrl[3]);
ierr_tmp = -4;
}
if (rcntrl[4] == ZERO)
{
FacMax = 6.0;
}
else if (rcntrl[4] > ZERO)
{
FacMax = rcntrl[4];
}
else
{
printf("User-selected FacMax: rcntrl[4]=%f\n",rcntrl[4]);
ierr_tmp = -4;
}
// FacRej: Factor to decrease step after 2 succesive rejections
if (rcntrl[5] == ZERO)
{
FacRej = 0.1;
}
else if (rcntrl[5] > ZERO)
{
FacRej = rcntrl[5];
}
else
{
printf("User-selected FacRej: rcntrl[5]=%f\n",rcntrl[5]);
ierr_tmp = -4;
}
// FacSafe: Safety Factor in the computation of new step size
if (rcntrl[6] == ZERO)
{
FacSafe = 0.9;
}
else if (rcntrl[6] > ZERO)
{
FacSafe = rcntrl[6];
}
else
{
printf("User-selected FacSafe: rcntrl[6]=%f\n",rcntrl[6]);
ierr_tmp = -4;
}
// Check if tolerances are reasonable
for (int i=0; i < UplimTol; i++)
{
if ((absTol[i] <= ZERO) || (relTol[i] <= 10.0*roundoff) || (relTol[i] >= 1.0))
{
printf("CCC absTol(%d) = %f \n",i,absTol[i]);
printf("CCC relTol(%d) = %f \n",i,relTol[i]);
ierr_tmp = -5;
}
}
}
switch (method){
case 2:
Rosenbrock_ros3<<<dimGrid,dimBlock>>>(d_conc, Tstart, Tend, d_rstatus, d_istatus,
autonomous, vectorTol, UplimTol, Max_no_steps,
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,
d_absTol, d_relTol,
d_khet_st, d_khet_tr, d_jx,
temp_gpu, press_gpu, cair_gpu,
VL_GLO,
Ghimj, K,varNew,Fcn0,dFdT,jac0,varErr,var,fix,rconst);
break;
default:
Rosenbrock<<<dimGrid,dimBlock>>>(d_conc, Tstart, Tend, d_rstatus, d_istatus,
// values calculated from icntrl and rcntrl at host
autonomous, vectorTol, UplimTol, method, Max_no_steps,
Hmin, Hmax, Hstart, FacMin, FacMax, FacRej, FacSafe, roundoff,
// cuda global mem buffers
d_absTol, d_relTol,
d_khet_st, d_khet_tr, d_jx,
// Global input arrays
temp_gpu, press_gpu, cair_gpu,
// extra - vector lenght and processor
VL_GLO);
break;
}
GPU_DEBUG();
reduce_istatus_1<<<REDUCTION_SIZE_2,REDUCTION_SIZE_1>>>(d_istatus, d_tmp_out_1, d_tmp_out_2, VL_GLO, d_xNacc, d_xNrej);
GPU_DEBUG();
reduce_istatus_2<<<1,REDUCTION_SIZE_2>>>(d_tmp_out_1, d_tmp_out_2, d_istatus_rd);
GPU_DEBUG();
/* Copy the result back */
gpuErrchk( cudaMemcpy( conc , d_conc , sizeof(double)*VL_GLO*NVAR, cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy( xNacc , d_xNacc , sizeof(int)*VL_GLO , cudaMemcpyDeviceToHost) );
gpuErrchk( cudaMemcpy( xNrej , d_xNrej , sizeof(int)*VL_GLO , cudaMemcpyDeviceToHost) );
return;
}
|
38d914eb2e65f91d47d7a0ee33f152352228d4c5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx_x = blockIdx.x;
int idx_y = blockIdx.y;
uchar4 rgba = rgbaImage[idx_y * numCols + idx_x];
float fcolor = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
unsigned char ucolor = (unsigned char)(fcolor);
greyImage[idx_y * numCols + idx_x] = fcolor;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, 1, 1); //TODO
const dim3 gridSize(numCols, numRows, 1); //TODO
rgba_to_greyscale << <gridSize, blockSize >> >(d_rgbaImage, d_greyImage, numRows, numCols);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
}
|
38d914eb2e65f91d47d7a0ee33f152352228d4c5.cu
|
// Homework 1
// Color to Greyscale Conversion
//A common way to represent color images is known as RGBA - the color
//is specified by how much Red, Green, and Blue is in it.
//The 'A' stands for Alpha and is used for transparency; it will be
//ignored in this homework.
//Each channel Red, Blue, Green, and Alpha is represented by one byte.
//Since we are using one byte for each color there are 256 different
//possible values for each color. This means we use 4 bytes per pixel.
//Greyscale images are represented by a single intensity value per pixel
//which is one byte in size.
//To convert an image from color to grayscale one simple method is to
//set the intensity to the average of the RGB channels. But we will
//use a more sophisticated method that takes into account how the eye
//perceives color and weights the channels unequally.
//The eye responds most strongly to green followed by red and then blue.
//The NTSC (National Television System Committee) recommends the following
//formula for color to greyscale conversion:
//I = .299f * R + .587f * G + .114f * B
//Notice the trailing f's on the numbers which indicate that they are
//single precision floating point constants and not double precision
//constants.
//You should fill in the kernel as well as set the block and grid sizes
//so that the entire image is processed.
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
__global__
void rgba_to_greyscale(const uchar4* const rgbaImage,
unsigned char* const greyImage,
int numRows, int numCols)
{
//TODO
//Fill in the kernel to convert from color to greyscale
//the mapping from components of a uchar4 to RGBA is:
// .x -> R ; .y -> G ; .z -> B ; .w -> A
//
//The output (greyImage) at each pixel should be the result of
//applying the formula: output = .299f * R + .587f * G + .114f * B;
//Note: We will be ignoring the alpha channel for this conversion
//First create a mapping from the 2D block and grid locations
//to an absolute 2D location in the image, then use that to
//calculate a 1D offset
int idx_x = blockIdx.x;
int idx_y = blockIdx.y;
uchar4 rgba = rgbaImage[idx_y * numCols + idx_x];
float fcolor = .299f * rgba.x + .587f * rgba.y + .114f * rgba.z;
unsigned char ucolor = (unsigned char)(fcolor);
greyImage[idx_y * numCols + idx_x] = fcolor;
}
void your_rgba_to_greyscale(const uchar4 * const h_rgbaImage, uchar4 * const d_rgbaImage,
unsigned char* const d_greyImage, size_t numRows, size_t numCols)
{
//You must fill in the correct sizes for the blockSize and gridSize
//currently only one block with one thread is being launched
const dim3 blockSize(1, 1, 1); //TODO
const dim3 gridSize(numCols, numRows, 1); //TODO
rgba_to_greyscale << <gridSize, blockSize >> >(d_rgbaImage, d_greyImage, numRows, numCols);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
}
|
d879d8a4d683ed67a74448f1e00f9195dee0ee9e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void addProduct_vs(int n, float *a, float *b, float c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i] * c;
}
}
|
d879d8a4d683ed67a74448f1e00f9195dee0ee9e.cu
|
#include "includes.h"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
extern "C"
/*
* Perfom a reduction from data of length 'size' to result, where length of result will be 'number of blocks'.
*/
extern "C"
__global__ void addProduct_vs(int n, float *a, float *b, float c, float *result)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i<n)
{
result[i] = a[i] + b[i] * c;
}
}
|
90fdaffc857f2cb9b4d95937863ee9f346872643.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <THHUNN/THHUNN.h>
#include <TH/THHalf.h>
#include <THH/THHNumerics.cuh>
#include <THH/THHApply.cuh>
template <typename T>
struct absupdateOutput_functor
{
__device__ void operator()(T* output, const T* input) const
{
*output = THCNumerics<T>::abs(*input);
}
};
template <typename T>
struct absupdateGradInput_functor
{
__device__ void operator()(T* gradInput, const T* input, const T* gradOutput) const
{
*gradInput = *input < 0 ? - *gradOutput : *gradOutput;
}
};
#include <THHUNN/generic/Abs.hip>
#include <THH/THHGenerateFloatTypes.h>
|
90fdaffc857f2cb9b4d95937863ee9f346872643.cu
|
#include <THCUNN/THCUNN.h>
#include <TH/THHalf.h>
#include <THC/THCNumerics.cuh>
#include <THC/THCApply.cuh>
template <typename T>
struct absupdateOutput_functor
{
__device__ void operator()(T* output, const T* input) const
{
*output = THCNumerics<T>::abs(*input);
}
};
template <typename T>
struct absupdateGradInput_functor
{
__device__ void operator()(T* gradInput, const T* input, const T* gradOutput) const
{
*gradInput = *input < 0 ? - *gradOutput : *gradOutput;
}
};
#include <THCUNN/generic/Abs.cu>
#include <THC/THCGenerateFloatTypes.h>
|
e1f887f19c84b7430bb3ebc658fdf9e420b4c266.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <opencv2/cudafeatures2d.hpp>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_X 8
#define BLOCK_Y 4
#define BLOCK_Z 4
using namespace cv;
namespace {
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) {
return (bitmap >> pos) & 1;
}
// Only use it with unsigned numeric types
//template <typename T>
//__device__ __forceinline__ void SetBit(T &bitmap, unsigned char pos) {
// bitmap |= (1 << pos);
//}
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned Find(const int *s_buf, unsigned n) {
while (s_buf[n] != n) {
n = s_buf[n];
}
return n;
}
__device__ unsigned FindAndCompress(int *s_buf, unsigned n) {
unsigned id = n;
while (s_buf[n] != n) {
n = s_buf[n];
s_buf[id] = n;
}
return n;
}
// Unisce gli alberi contenenti i nodi a e b, collegandone le radici
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a);
done = (old == b);
b = old;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b);
done = (old == a);
a = old;
}
else {
done = true;
}
} while (!done);
}
__global__ void InitLabeling(cuda::PtrStepSz3i labels) {
unsigned x = (blockIdx.x * BLOCK_X + threadIdx.x) * 2;
unsigned y = (blockIdx.y * BLOCK_Y + threadIdx.y) * 2;
unsigned z = (blockIdx.z * BLOCK_Z + threadIdx.z) * 2;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
labels[labels_index] = labels_index;
}
}
__global__ void Merge(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels, unsigned char* last_cube_fg) {
unsigned x = (blockIdx.x * BLOCK_X + threadIdx.x) * 2;
unsigned y = (blockIdx.y * BLOCK_Y + threadIdx.y) * 2;
unsigned z = (blockIdx.z * BLOCK_Z + threadIdx.z) * 2;
unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
const unsigned long long P0 = 0x77707770777;
unsigned long long P = 0ULL;
unsigned char foreground = 0;
unsigned short buffer;
{
if (x + 1 < img.x) {
buffer = *reinterpret_cast<unsigned short *>(img.data + img_index);
if (buffer & 1) {
P |= P0;
foreground |= 1;
}
if (buffer & (1 << 8)) {
P |= (P0 << 1);
foreground |= (1 << 1);
}
if (y + 1 < img.y) {
buffer = *reinterpret_cast<unsigned short *>(img.data + img_index + img.stepy / img.elem_size);
if (buffer & 1) {
P |= (P0 << 4);
foreground |= (1 << 2);
}
if (buffer & (1 << 8)) {
P |= (P0 << 5);
foreground |= (1 << 3);
}
}
if (z + 1 < img.z) {
buffer = *reinterpret_cast<unsigned short *>(img.data + img_index + img.stepz / img.elem_size);
if (buffer & 1) {
P |= (P0 << 16);
foreground |= (1 << 4);
}
if (buffer & (1 << 8)) {
P |= (P0 << 17);
foreground |= (1 << 5);
}
if (y + 1 < img.y) {
buffer = *reinterpret_cast<unsigned short *>(img.data + img_index + img.stepz / img.elem_size + img.stepy / img.elem_size);
if (buffer & 1) {
P |= (P0 << 20);
foreground |= (1 << 6);
}
if (buffer & (1 << 8)) {
P |= (P0 << 21);
foreground |= (1 << 7);
}
}
}
}
else {
if (img[img_index]) {
P |= P0;
foreground |= 1;
}
if (y + 1 < labels.y) {
if (img[img_index + img.stepy / img.elem_size]) {
P |= (P0 << 4);
foreground |= (1 << 2);
}
}
if (z + 1 < labels.z) {
if (img[img_index + img.stepz / img.elem_size]) {
P |= (P0 << 16);
foreground |= (1 << 4);
}
if (y + 1 < labels.y) {
if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) {
P |= (P0 << 20);
foreground |= (1 << 6);
}
}
}
}
}
/* {
if (img[img_index]) {
P |= P0;
foreground |= 1;
}
if (x + 1 < img.x) {
if (img[img_index + 1]) {
P |= (P0 << 1);
foreground |= (1 << 1);
}
if (y + 1 < img.y && img[img_index + img.stepy / img.elem_size + 1]) {
P |= (P0 << 5);
foreground |= (1 << 3);
}
}
if (y + 1 < img.y) {
if (img[img_index + img.stepy / img.elem_size]) {
P |= (P0 << 4);
foreground |= (1 << 2);
}
}
if (z + 1 < img.z) {
if (img[img_index + img.stepz / img.elem_size]) {
P |= (P0 << 16);
foreground |= (1 << 4);
}
if (x + 1 < img.x) {
if (img[img_index + img.stepz / img.elem_size + 1]) {
P |= (P0 << 17);
foreground |= (1 << 5);
}
if (y + 1 < img.y && img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) {
P |= (P0 << 21);
foreground |= (1 << 7);
}
}
if (y + 1 < img.y) {
if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) {
P |= (P0 << 20);
foreground |= (1 << 6);
}
}
}
}*/
// Store foreground voxels bitmask into memory
if (x + 1 < labels.x) {
labels[labels_index + 1] = foreground;
}
else if (y + 1 < labels.y) {
labels[labels_index + labels.stepy / labels.elem_size] = foreground;
}
else if (z + 1 < labels.z) {
labels[labels_index + labels.stepz / labels.elem_size] = foreground;
}
else {
*last_cube_fg = foreground;
}
// checks on borders
if (x == 0) {
P &= 0xEEEEEEEEEEEEEEEE;
}
if (x + 1 >= img.x) {
P &= 0x3333333333333333;
}
else if (x + 2 >= img.x) {
P &= 0x7777777777777777;
}
if (y == 0) {
P &= 0xFFF0FFF0FFF0FFF0;
}
if (y + 1 >= img.y) {
P &= 0x00FF00FF00FF00FF;
}
else if (y + 2 >= img.y) {
P &= 0x0FFF0FFF0FFF0FFF;
}
if (z == 0) {
P &= 0xFFFFFFFFFFFF0000;
}
if (z + 1 >= img.z) {
P &= 0x00000000FFFFFFFF;
}
//else if (z + 2 >= img.z) {
// P &= 0x0000FFFFFFFFFFFF;
//}
// P is now ready to be used to find neighbour blocks
// P value avoids range errors
if (P > 0) {
// Lower plane
unsigned char * plane_data = img.data + img_index - img.stepz;
unsigned lower_plane_index = labels_index - 2 * (labels.stepz / labels.elem_size);
if (HasBit(P, 0) && plane_data[0 - img.stepy - 1]) {
Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size + 1));
}
if ((HasBit(P, 1) && plane_data[0 - img.stepy]) || (HasBit(P, 2) && plane_data[0 - img.stepy + 1])) {
Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size));
}
if (HasBit(P, 3) && plane_data[0 - img.stepy + 2]) {
Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size - 1));
}
if ((HasBit(P, 4) && plane_data[-1]) || (HasBit(P, 8) && plane_data[img.stepy - 1])) {
Union(labels.data, labels_index, lower_plane_index - 2);
}
if ((HasBit(P, 5) && plane_data[0]) || (HasBit(P, 6) && plane_data[1]) || (HasBit(P, 9) && plane_data[img.stepy]) || (HasBit(P, 10) && plane_data[img.stepy + 1])) {
Union(labels.data, labels_index, lower_plane_index);
}
if ((HasBit(P, 7) && plane_data[2]) || (HasBit(P, 11) && plane_data[img.stepy + 2])) {
Union(labels.data, labels_index, lower_plane_index + 2);
}
if (HasBit(P, 12) && plane_data[2 * img.stepy - 1]) {
Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size - 1));
}
if ((HasBit(P, 13) && plane_data[2 * img.stepy]) || (HasBit(P, 14) && plane_data[2 * img.stepy + 1])) {
Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size));
}
if (HasBit(P, 15) && plane_data[2 * img.stepy + 2]) {
Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size + 1));
}
// Current planes
plane_data += img.stepz;
if ((HasBit(P, 16) && plane_data[0 - img.stepy - 1]) || (HasBit(P, 32) && plane_data[img.stepz - img.stepy - 1])) {
Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size + 1));
}
if ((HasBit(P, 17) && plane_data[0 - img.stepy]) || (HasBit(P, 18) && plane_data[0 - img.stepy + 1]) || (HasBit(P, 33) && plane_data[img.stepz - img.stepy]) || (HasBit(P, 34) && plane_data[img.stepz - img.stepy + 1])) {
Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size));
}
if ((HasBit(P, 19) && plane_data[0 - img.stepy + 2]) || (HasBit(P, 35) && plane_data[img.stepz - img.stepy + 2])) {
Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size - 1));
}
if ((HasBit(P, 20) && plane_data[-1]) || (HasBit(P, 24) && plane_data[img.stepy - 1]) || (HasBit(P, 36) && plane_data[img.stepz - 1]) || (HasBit(P, 40) && plane_data[img.stepz + img.stepy - 1])) {
Union(labels.data, labels_index, labels_index - 2);
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSz3i labels) {
unsigned x = 2 * (blockIdx.x * BLOCK_X + threadIdx.x);
unsigned y = 2 * (blockIdx.y * BLOCK_Y + threadIdx.y);
unsigned z = 2 * (blockIdx.z * BLOCK_Z + threadIdx.z);
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
FindAndCompress(labels.data, labels_index);
}
}
__global__ void FinalLabeling(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels, unsigned char* last_cube_fg) {
unsigned x = 2 * (blockIdx.x * BLOCK_X + threadIdx.x);
unsigned y = 2 * (blockIdx.y * BLOCK_Y + threadIdx.y);
unsigned z = 2 * (blockIdx.z * BLOCK_Z + threadIdx.z);
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
int label;
unsigned char foreground;
unsigned long long buffer;
if (x + 1 < labels.x) {
buffer = *reinterpret_cast<unsigned long long *>(labels.data + labels_index);
label = (buffer & (0xFFFFFFFF)) + 1;
foreground = (buffer >> 32) & 0xFFFFFFFF;
}
else {
label = labels[labels_index] + 1;
if (y + 1 < labels.y) {
foreground = labels[labels_index + labels.stepy / labels.elem_size];
}
else if (z + 1 < labels.z) {
foreground = labels[labels_index + labels.stepz / labels.elem_size];
}
else {
foreground = *last_cube_fg;
}
}
if (x + 1 < labels.x) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index) =
(static_cast<unsigned long long>(((foreground >> 1) & 1) * label) << 32) | (((foreground >> 0) & 1) * label);
if (y + 1 < labels.y) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.stepy / labels.elem_size) =
(static_cast<unsigned long long>(((foreground >> 3) & 1) * label) << 32) | (((foreground >> 2) & 1) * label);
}
if (z + 1 < labels.z) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.stepz / labels.elem_size) =
(static_cast<unsigned long long>(((foreground >> 5) & 1) * label) << 32) | (((foreground >> 4) & 1) * label);
if (y + 1 < labels.y) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)) =
(static_cast<unsigned long long>(((foreground >> 7) & 1) * label) << 32) | (((foreground >> 6) & 1) * label);
}
}
}
else {
labels[labels_index] = ((foreground >> 0) & 1) * label;
if (y + 1 < labels.y) {
labels[labels_index + (labels.stepy / labels.elem_size)] = ((foreground >> 2) & 1) * label;
}
if (z + 1 < labels.z) {
labels[labels_index + labels.stepz / labels.elem_size] = ((foreground >> 4) & 1) * label;
if (y + 1 < labels.y) {
labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = ((foreground >> 6) & 1) * label;
}
}
}
}
}
}
class BUF_IC_3D : public GpuLabeling3D<CONN_26> {
private:
dim3 grid_size_;
dim3 block_size_;
unsigned char* last_cube_fg_;
bool allocated_last_cude_fg_;
public:
BUF_IC_3D() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
allocated_last_cude_fg_ = false;
if ((d_img_.x % 2 == 1) && (d_img_.y % 2 == 1) && (d_img_.z % 2 == 1)) {
if (d_img_.x > 1 && d_img_.y > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 1) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 2;
}
else if (d_img_.x > 1 && d_img_.z > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 1) * d_img_labels_.stepy) + d_img_labels_.x - 2;
}
else if (d_img_.y > 1 && d_img_.z > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 1;
}
else {
hipMalloc(&last_cube_fg_, sizeof(unsigned char));
allocated_last_cude_fg_ = true;
}
}
grid_size_ = dim3(((d_img_.x + 1) / 2 + BLOCK_X - 1) / BLOCK_X, ((d_img_.y + 1) / 2 + BLOCK_Y - 1) / BLOCK_Y, ((d_img_.z + 1) / 2 + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
InitLabeling << <grid_size_, block_size_ >> > (d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_);
if (allocated_last_cude_fg_) {
hipFree(last_cube_fg_);
}
// d_img_labels_.download(img_labels_);
hipDeviceSynchronize();
}
private:
void Alloc() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
allocated_last_cude_fg_ = false;
if ((d_img_.x % 2 == 1) && (d_img_.y % 2 == 1) && (d_img_.z % 2 == 1)) {
if (d_img_.x > 1 && d_img_.y > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 1) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 2;
}
else if (d_img_.x > 1 && d_img_.z > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 1) * d_img_labels_.stepy) + d_img_labels_.x - 2;
}
else if (d_img_.y > 1 && d_img_.z > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 1;
}
else {
hipMalloc(&last_cube_fg_, sizeof(unsigned char));
allocated_last_cude_fg_ = true;
}
}
}
void Dealloc() {
if (allocated_last_cude_fg_) {
hipFree(last_cube_fg_);
}
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3(((d_img_.x + 1) / 2 + BLOCK_X - 1) / BLOCK_X, ((d_img_.y + 1) / 2 + BLOCK_Y - 1) / BLOCK_Y, ((d_img_.z + 1) / 2 + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
InitLabeling << <grid_size_, block_size_ >> > (d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_);
hipDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
perf_.start();
Alloc();
perf_.stop();
double alloc_timing = perf_.last();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
perf_.start();
Dealloc();
perf_.stop();
double dealloc_timing = perf_.last();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(BUF_IC_3D);
|
e1f887f19c84b7430bb3ebc658fdf9e420b4c266.cu
|
#include <opencv2/cudafeatures2d.hpp>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "labeling_algorithms.h"
#include "register.h"
#define BLOCK_X 8
#define BLOCK_Y 4
#define BLOCK_Z 4
using namespace cv;
namespace {
// Only use it with unsigned numeric types
template <typename T>
__device__ __forceinline__ unsigned char HasBit(T bitmap, unsigned char pos) {
return (bitmap >> pos) & 1;
}
// Only use it with unsigned numeric types
//template <typename T>
//__device__ __forceinline__ void SetBit(T &bitmap, unsigned char pos) {
// bitmap |= (1 << pos);
//}
// Risale alla radice dell'albero a partire da un suo nodo n
__device__ unsigned Find(const int *s_buf, unsigned n) {
while (s_buf[n] != n) {
n = s_buf[n];
}
return n;
}
__device__ unsigned FindAndCompress(int *s_buf, unsigned n) {
unsigned id = n;
while (s_buf[n] != n) {
n = s_buf[n];
s_buf[id] = n;
}
return n;
}
// Unisce gli alberi contenenti i nodi a e b, collegandone le radici
__device__ void Union(int *s_buf, unsigned a, unsigned b) {
bool done;
do {
a = Find(s_buf, a);
b = Find(s_buf, b);
if (a < b) {
int old = atomicMin(s_buf + b, a);
done = (old == b);
b = old;
}
else if (b < a) {
int old = atomicMin(s_buf + a, b);
done = (old == a);
a = old;
}
else {
done = true;
}
} while (!done);
}
__global__ void InitLabeling(cuda::PtrStepSz3i labels) {
unsigned x = (blockIdx.x * BLOCK_X + threadIdx.x) * 2;
unsigned y = (blockIdx.y * BLOCK_Y + threadIdx.y) * 2;
unsigned z = (blockIdx.z * BLOCK_Z + threadIdx.z) * 2;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
labels[labels_index] = labels_index;
}
}
__global__ void Merge(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels, unsigned char* last_cube_fg) {
unsigned x = (blockIdx.x * BLOCK_X + threadIdx.x) * 2;
unsigned y = (blockIdx.y * BLOCK_Y + threadIdx.y) * 2;
unsigned z = (blockIdx.z * BLOCK_Z + threadIdx.z) * 2;
unsigned img_index = z * (img.stepz / img.elem_size) + y * (img.stepy / img.elem_size) + x;
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
const unsigned long long P0 = 0x77707770777;
unsigned long long P = 0ULL;
unsigned char foreground = 0;
unsigned short buffer;
{
if (x + 1 < img.x) {
buffer = *reinterpret_cast<unsigned short *>(img.data + img_index);
if (buffer & 1) {
P |= P0;
foreground |= 1;
}
if (buffer & (1 << 8)) {
P |= (P0 << 1);
foreground |= (1 << 1);
}
if (y + 1 < img.y) {
buffer = *reinterpret_cast<unsigned short *>(img.data + img_index + img.stepy / img.elem_size);
if (buffer & 1) {
P |= (P0 << 4);
foreground |= (1 << 2);
}
if (buffer & (1 << 8)) {
P |= (P0 << 5);
foreground |= (1 << 3);
}
}
if (z + 1 < img.z) {
buffer = *reinterpret_cast<unsigned short *>(img.data + img_index + img.stepz / img.elem_size);
if (buffer & 1) {
P |= (P0 << 16);
foreground |= (1 << 4);
}
if (buffer & (1 << 8)) {
P |= (P0 << 17);
foreground |= (1 << 5);
}
if (y + 1 < img.y) {
buffer = *reinterpret_cast<unsigned short *>(img.data + img_index + img.stepz / img.elem_size + img.stepy / img.elem_size);
if (buffer & 1) {
P |= (P0 << 20);
foreground |= (1 << 6);
}
if (buffer & (1 << 8)) {
P |= (P0 << 21);
foreground |= (1 << 7);
}
}
}
}
else {
if (img[img_index]) {
P |= P0;
foreground |= 1;
}
if (y + 1 < labels.y) {
if (img[img_index + img.stepy / img.elem_size]) {
P |= (P0 << 4);
foreground |= (1 << 2);
}
}
if (z + 1 < labels.z) {
if (img[img_index + img.stepz / img.elem_size]) {
P |= (P0 << 16);
foreground |= (1 << 4);
}
if (y + 1 < labels.y) {
if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) {
P |= (P0 << 20);
foreground |= (1 << 6);
}
}
}
}
}
/* {
if (img[img_index]) {
P |= P0;
foreground |= 1;
}
if (x + 1 < img.x) {
if (img[img_index + 1]) {
P |= (P0 << 1);
foreground |= (1 << 1);
}
if (y + 1 < img.y && img[img_index + img.stepy / img.elem_size + 1]) {
P |= (P0 << 5);
foreground |= (1 << 3);
}
}
if (y + 1 < img.y) {
if (img[img_index + img.stepy / img.elem_size]) {
P |= (P0 << 4);
foreground |= (1 << 2);
}
}
if (z + 1 < img.z) {
if (img[img_index + img.stepz / img.elem_size]) {
P |= (P0 << 16);
foreground |= (1 << 4);
}
if (x + 1 < img.x) {
if (img[img_index + img.stepz / img.elem_size + 1]) {
P |= (P0 << 17);
foreground |= (1 << 5);
}
if (y + 1 < img.y && img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size + 1]) {
P |= (P0 << 21);
foreground |= (1 << 7);
}
}
if (y + 1 < img.y) {
if (img[img_index + img.stepz / img.elem_size + img.stepy / img.elem_size]) {
P |= (P0 << 20);
foreground |= (1 << 6);
}
}
}
}*/
// Store foreground voxels bitmask into memory
if (x + 1 < labels.x) {
labels[labels_index + 1] = foreground;
}
else if (y + 1 < labels.y) {
labels[labels_index + labels.stepy / labels.elem_size] = foreground;
}
else if (z + 1 < labels.z) {
labels[labels_index + labels.stepz / labels.elem_size] = foreground;
}
else {
*last_cube_fg = foreground;
}
// checks on borders
if (x == 0) {
P &= 0xEEEEEEEEEEEEEEEE;
}
if (x + 1 >= img.x) {
P &= 0x3333333333333333;
}
else if (x + 2 >= img.x) {
P &= 0x7777777777777777;
}
if (y == 0) {
P &= 0xFFF0FFF0FFF0FFF0;
}
if (y + 1 >= img.y) {
P &= 0x00FF00FF00FF00FF;
}
else if (y + 2 >= img.y) {
P &= 0x0FFF0FFF0FFF0FFF;
}
if (z == 0) {
P &= 0xFFFFFFFFFFFF0000;
}
if (z + 1 >= img.z) {
P &= 0x00000000FFFFFFFF;
}
//else if (z + 2 >= img.z) {
// P &= 0x0000FFFFFFFFFFFF;
//}
// P is now ready to be used to find neighbour blocks
// P value avoids range errors
if (P > 0) {
// Lower plane
unsigned char * plane_data = img.data + img_index - img.stepz;
unsigned lower_plane_index = labels_index - 2 * (labels.stepz / labels.elem_size);
if (HasBit(P, 0) && plane_data[0 - img.stepy - 1]) {
Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size + 1));
}
if ((HasBit(P, 1) && plane_data[0 - img.stepy]) || (HasBit(P, 2) && plane_data[0 - img.stepy + 1])) {
Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size));
}
if (HasBit(P, 3) && plane_data[0 - img.stepy + 2]) {
Union(labels.data, labels_index, lower_plane_index - 2 * (labels.stepy / labels.elem_size - 1));
}
if ((HasBit(P, 4) && plane_data[-1]) || (HasBit(P, 8) && plane_data[img.stepy - 1])) {
Union(labels.data, labels_index, lower_plane_index - 2);
}
if ((HasBit(P, 5) && plane_data[0]) || (HasBit(P, 6) && plane_data[1]) || (HasBit(P, 9) && plane_data[img.stepy]) || (HasBit(P, 10) && plane_data[img.stepy + 1])) {
Union(labels.data, labels_index, lower_plane_index);
}
if ((HasBit(P, 7) && plane_data[2]) || (HasBit(P, 11) && plane_data[img.stepy + 2])) {
Union(labels.data, labels_index, lower_plane_index + 2);
}
if (HasBit(P, 12) && plane_data[2 * img.stepy - 1]) {
Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size - 1));
}
if ((HasBit(P, 13) && plane_data[2 * img.stepy]) || (HasBit(P, 14) && plane_data[2 * img.stepy + 1])) {
Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size));
}
if (HasBit(P, 15) && plane_data[2 * img.stepy + 2]) {
Union(labels.data, labels_index, lower_plane_index + 2 * (labels.stepy / labels.elem_size + 1));
}
// Current planes
plane_data += img.stepz;
if ((HasBit(P, 16) && plane_data[0 - img.stepy - 1]) || (HasBit(P, 32) && plane_data[img.stepz - img.stepy - 1])) {
Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size + 1));
}
if ((HasBit(P, 17) && plane_data[0 - img.stepy]) || (HasBit(P, 18) && plane_data[0 - img.stepy + 1]) || (HasBit(P, 33) && plane_data[img.stepz - img.stepy]) || (HasBit(P, 34) && plane_data[img.stepz - img.stepy + 1])) {
Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size));
}
if ((HasBit(P, 19) && plane_data[0 - img.stepy + 2]) || (HasBit(P, 35) && plane_data[img.stepz - img.stepy + 2])) {
Union(labels.data, labels_index, labels_index - 2 * (labels.stepy / labels.elem_size - 1));
}
if ((HasBit(P, 20) && plane_data[-1]) || (HasBit(P, 24) && plane_data[img.stepy - 1]) || (HasBit(P, 36) && plane_data[img.stepz - 1]) || (HasBit(P, 40) && plane_data[img.stepz + img.stepy - 1])) {
Union(labels.data, labels_index, labels_index - 2);
}
}
}
}
__global__ void PathCompression(cuda::PtrStepSz3i labels) {
unsigned x = 2 * (blockIdx.x * BLOCK_X + threadIdx.x);
unsigned y = 2 * (blockIdx.y * BLOCK_Y + threadIdx.y);
unsigned z = 2 * (blockIdx.z * BLOCK_Z + threadIdx.z);
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
FindAndCompress(labels.data, labels_index);
}
}
__global__ void FinalLabeling(const cuda::PtrStepSz3b img, cuda::PtrStepSz3i labels, unsigned char* last_cube_fg) {
unsigned x = 2 * (blockIdx.x * BLOCK_X + threadIdx.x);
unsigned y = 2 * (blockIdx.y * BLOCK_Y + threadIdx.y);
unsigned z = 2 * (blockIdx.z * BLOCK_Z + threadIdx.z);
unsigned labels_index = z * (labels.stepz / labels.elem_size) + y * (labels.stepy / labels.elem_size) + x;
if (x < labels.x && y < labels.y && z < labels.z) {
int label;
unsigned char foreground;
unsigned long long buffer;
if (x + 1 < labels.x) {
buffer = *reinterpret_cast<unsigned long long *>(labels.data + labels_index);
label = (buffer & (0xFFFFFFFF)) + 1;
foreground = (buffer >> 32) & 0xFFFFFFFF;
}
else {
label = labels[labels_index] + 1;
if (y + 1 < labels.y) {
foreground = labels[labels_index + labels.stepy / labels.elem_size];
}
else if (z + 1 < labels.z) {
foreground = labels[labels_index + labels.stepz / labels.elem_size];
}
else {
foreground = *last_cube_fg;
}
}
if (x + 1 < labels.x) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index) =
(static_cast<unsigned long long>(((foreground >> 1) & 1) * label) << 32) | (((foreground >> 0) & 1) * label);
if (y + 1 < labels.y) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.stepy / labels.elem_size) =
(static_cast<unsigned long long>(((foreground >> 3) & 1) * label) << 32) | (((foreground >> 2) & 1) * label);
}
if (z + 1 < labels.z) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.stepz / labels.elem_size) =
(static_cast<unsigned long long>(((foreground >> 5) & 1) * label) << 32) | (((foreground >> 4) & 1) * label);
if (y + 1 < labels.y) {
*reinterpret_cast<unsigned long long *>(labels.data + labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)) =
(static_cast<unsigned long long>(((foreground >> 7) & 1) * label) << 32) | (((foreground >> 6) & 1) * label);
}
}
}
else {
labels[labels_index] = ((foreground >> 0) & 1) * label;
if (y + 1 < labels.y) {
labels[labels_index + (labels.stepy / labels.elem_size)] = ((foreground >> 2) & 1) * label;
}
if (z + 1 < labels.z) {
labels[labels_index + labels.stepz / labels.elem_size] = ((foreground >> 4) & 1) * label;
if (y + 1 < labels.y) {
labels[labels_index + labels.stepz / labels.elem_size + (labels.stepy / labels.elem_size)] = ((foreground >> 6) & 1) * label;
}
}
}
}
}
}
class BUF_IC_3D : public GpuLabeling3D<CONN_26> {
private:
dim3 grid_size_;
dim3 block_size_;
unsigned char* last_cube_fg_;
bool allocated_last_cude_fg_;
public:
BUF_IC_3D() {}
void PerformLabeling() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
allocated_last_cude_fg_ = false;
if ((d_img_.x % 2 == 1) && (d_img_.y % 2 == 1) && (d_img_.z % 2 == 1)) {
if (d_img_.x > 1 && d_img_.y > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 1) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 2;
}
else if (d_img_.x > 1 && d_img_.z > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 1) * d_img_labels_.stepy) + d_img_labels_.x - 2;
}
else if (d_img_.y > 1 && d_img_.z > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 1;
}
else {
cudaMalloc(&last_cube_fg_, sizeof(unsigned char));
allocated_last_cude_fg_ = true;
}
}
grid_size_ = dim3(((d_img_.x + 1) / 2 + BLOCK_X - 1) / BLOCK_X, ((d_img_.y + 1) / 2 + BLOCK_Y - 1) / BLOCK_Y, ((d_img_.z + 1) / 2 + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
InitLabeling << <grid_size_, block_size_ >> > (d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_);
if (allocated_last_cude_fg_) {
cudaFree(last_cube_fg_);
}
// d_img_labels_.download(img_labels_);
cudaDeviceSynchronize();
}
private:
void Alloc() {
d_img_labels_.create(d_img_.x, d_img_.y, d_img_.z, CV_32SC1);
allocated_last_cude_fg_ = false;
if ((d_img_.x % 2 == 1) && (d_img_.y % 2 == 1) && (d_img_.z % 2 == 1)) {
if (d_img_.x > 1 && d_img_.y > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 1) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 2;
}
else if (d_img_.x > 1 && d_img_.z > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 1) * d_img_labels_.stepy) + d_img_labels_.x - 2;
}
else if (d_img_.y > 1 && d_img_.z > 1) {
last_cube_fg_ = reinterpret_cast<unsigned char*>(d_img_labels_.data + (d_img_labels_.z - 2) * d_img_labels_.stepz + (d_img_labels_.y - 2) * d_img_labels_.stepy) + d_img_labels_.x - 1;
}
else {
cudaMalloc(&last_cube_fg_, sizeof(unsigned char));
allocated_last_cude_fg_ = true;
}
}
}
void Dealloc() {
if (allocated_last_cude_fg_) {
cudaFree(last_cube_fg_);
}
}
double MemoryTransferHostToDevice() {
perf_.start();
d_img_.upload(img_);
perf_.stop();
return perf_.last();
}
void MemoryTransferDeviceToHost() {
d_img_labels_.download(img_labels_);
}
void AllScans() {
grid_size_ = dim3(((d_img_.x + 1) / 2 + BLOCK_X - 1) / BLOCK_X, ((d_img_.y + 1) / 2 + BLOCK_Y - 1) / BLOCK_Y, ((d_img_.z + 1) / 2 + BLOCK_Z - 1) / BLOCK_Z);
block_size_ = dim3(BLOCK_X, BLOCK_Y, BLOCK_Z);
InitLabeling << <grid_size_, block_size_ >> > (d_img_labels_);
//cuda::GpuMat d_expanded_connections;
//d_expanded_connections.create(d_connections_.rows * 3, d_connections_.cols * 3, CV_8UC1);
//ExpandConnections << <grid_size_, block_size_ >> > (d_connections_, d_expanded_connections);
//Mat1b expanded_connections;
//d_expanded_connections.download(expanded_connections);
//d_expanded_connections.release();
//Mat1i init_labels;
//d_block_labels_.download(init_labels);
Merge << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_);
//Mat1i block_info_final;
//d_img_labels_.download(block_info_final);
PathCompression << <grid_size_, block_size_ >> > (d_img_labels_);
FinalLabeling << <grid_size_, block_size_ >> > (d_img_, d_img_labels_, last_cube_fg_);
cudaDeviceSynchronize();
}
public:
void PerformLabelingWithSteps()
{
perf_.start();
Alloc();
perf_.stop();
double alloc_timing = perf_.last();
perf_.start();
AllScans();
perf_.stop();
perf_.store(Step(StepType::ALL_SCANS), perf_.last());
perf_.start();
Dealloc();
perf_.stop();
double dealloc_timing = perf_.last();
perf_.store(Step(StepType::ALLOC_DEALLOC), alloc_timing + dealloc_timing);
}
};
REGISTER_LABELING(BUF_IC_3D);
|
40c1f4f98619841510e85f3222fc6a9b5f9d5bb8.hip
|
// !!! This is a file automatically generated by hipify!!!
///*
// * File: BucketSort.c
// * Author: vijay manoharan
// *. C program for running bucket sort on CUDA.
// * Created on November 8, 2014, 8:19 PM
// */
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include <stdint.h>
//header file for random value generation.
#include "rnd.h"
#include "rnd.c"
//headed file for cuda
#include <hip/hip_runtime.h>
int size;
/*
* Using quickselect to partially sort the array
* algorithm refered from wikipedia "http://en.wikipedia.org/wiki/Quickselect"
* Implementation is done by me.
*/
int partition(float *a, int left, int right, int pivot) {
float pivotValue = a[pivot], temp;
int index = left;
int i;
//move pivot to rightmost of the array
a[pivot] = a[right];
a[right] = pivotValue;
/* We start from left of the array and keep comparing the values.
* If we find any value greater than pivot we swap the pivot with that value.
* this is done so that the value on the left are always lesser than pivot
*/
for (i = left; i < right; i++) {
if (a[i] < pivotValue) {
temp = a[i];
a[i] = a[index];
a[index] = temp;
index++;
}
}
/*move pivot back to its initial position
*/
temp = a[index];
a[index] = a[right];
a[right] = temp;
return index;
}
/*the main aspect of the function is
* find the pivot value which is pivot given and return the value
* next thing is all the value in the left index of pivot index is smaller
* all the values in the right index is greater.
*/
float quickSelect(float *a, int left, int right, int pivot) {
//base condition if list contains one element return that
if (left == right)
return a[left];
/* select the pivot index between left and right
* i am selecting the mid value
*/
int pivotindex = (right + left) / 2;
pivotindex = partition(a, left, right, pivotindex);
//int size_leftarray = pivotposition - low + 1;
//pivot in its correct position
if (pivot == pivotindex)
return a[pivotindex];
else if (pivot < pivotindex)
return quickSelect(a, left, pivotindex - 1, pivot);
else
return quickSelect(a, pivotindex + 1, right, pivot);
}
//merging the sorted left and right of the merge sort
__device__ void merge(float *a, int low, int mid, int high) {
//printf("inside merge\n");
int i = 0;
int left = low, right = mid + 1;
int j;
float *temp =(float*) malloc((high - low + 1 )* sizeof(*temp) );
/* Create a temp sorted list
* get the min of left part and right part
* if min is in the left part iterate left or else iterate right
*/
while ((left <= mid)&&(right <= high)) {
if (a[left] < a[right])
temp[i++] = a[left++];
else
temp[i++] = a[right++];
}
//left part contains larger values
while (left <= mid)
temp[i++] = a[left++];
//right part contains larger values
while (right <= high)
temp[i++] = a[right++];
//copy the sorted values
for (j = 0; j < i; j++) {
a[low + j] = temp[j];
}
}
//merge sort algorithm
void Quick(float *a, int low, int mid, int high) {
//printf("inside merge\n");
int i = 0;
int left = low, right = mid + 1;
int j;
float *temp =(float*) malloc((high - low + 1 )* sizeof(*temp) );
/* Create a temp sorted list
* get the min of left part and right part
* if min is in the left part iterate left or else iterate right
*/
while ((left <= mid)&&(right <= high)) {
if (a[left] < a[right])
temp[i++] = a[left++];
else
temp[i++] = a[right++];
}
//left part contains larger values
while (left <= mid)
temp[i++] = a[left++];
//right part contains larger values
while (right <= high)
temp[i++] = a[right++];
//copy the sorted values
for (j = 0; j < i; j++) {
a[low + j] = temp[j];
}
}
__device__ void Mergesort(float *a, int low, int high) {
int mid;
if (low < high) {
mid = (low + high) / 2;
Mergesort(a, low, mid);
Mergesort(a, mid + 1, high);
merge(a, low, mid, high);
}
}
void Quickselect(float *a, int low, int high) {
int mid;
if (low < high) {
mid = (low + high) / 2;
Quickselect(a, low, mid);
Quickselect(a, mid + 1, high);
Quick(a, low, mid, high);
}
}
int isnumber(int size) {
int flag = 0;
return flag;
}
__global__ void cudaBucketSort(float *array, int *pivots) {
/* 1.call merge sort for each threads
* 2.Range is given based on the pivot values.
* 3.Each thread will sort its own data set.
*/
if(threadIdx.x==0){
int start = pivots[blockIdx.x],i;
int end = pivots[blockIdx.x +1];
float value=array[start];
printf("pivot index %d\t start %d \t end %d\n", (blockIdx.x ) , start, end);
Mergesort(array, start, end);
if(blockIdx.x==9)
array[start]=value;
}
}
extern "C" void cuda_main(float *array,int size ,int p_id) {
//variables for size and number of threads, blocks etc.
int BlockNum=10;
int Range=size/10;
int ThreadSize ;
if(size<1000)
ThreadSize=size;
else
ThreadSize=1000;
//variables for pivots
int pivots[11];
float pivots_value[11];
struct timeval tv1, tv2;
printf("the size is %d", size);
int i;
//cuda device variables
float *dev_array;
int *dev_pivots;
hipMalloc((void **) &dev_array, size * sizeof (float));
hipMalloc((void **) &dev_pivots, (11) * sizeof (int));
// for (i = 0; i < size; i++) {
// printf("%d-->> %f\t", i, array[i]);
// }
//find 25th element
gettimeofday(&tv1, NULL);
for (i = 0; i < 10; i++) {
pivots[i] = Range*i;
}
pivots[i] = size;
pivots_value[0] = 0;
pivots_value[i] = array[size - 1];
for (i = 1; i < 10; i++) {
pivots_value[i] = quickSelect(array, pivots[i - 1], size - 1, (pivots[i]));
if(size>10000)
Quickselect(array, pivots[i-1], pivots[i]);
//printf("is the %d largest value %f\n", pivots[i], pivots_value[i]);
//find 5th largest element
}
/* MergeSort here.
*
*/
hipMemcpy(dev_array, array, size * sizeof (float), hipMemcpyHostToDevice);
hipMemcpy(dev_pivots, pivots, (11) * sizeof (int), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( cudaBucketSort) , dim3(BlockNum), dim3(ThreadSize), 0, 0, dev_array, dev_pivots);
if(size<=10000)
hipMemcpy(array, dev_array, size * sizeof (float), hipMemcpyDeviceToHost);
//cuda function here.
printf("\nmergesort starts \n");
gettimeofday(&tv2, NULL);
//sort(array);
printf("sorted list is \n");
for (i = 0; i < size; i++) {
printf("%d-->> %f\n", i + p_id * size , array[i]);
}
}
|
40c1f4f98619841510e85f3222fc6a9b5f9d5bb8.cu
|
///*
// * File: BucketSort.c
// * Author: vijay manoharan
// *. C program for running bucket sort on CUDA.
// * Created on November 8, 2014, 8:19 PM
// */
#include<stdio.h>
#include<stdlib.h>
#include<sys/time.h>
#include <stdint.h>
//header file for random value generation.
#include "rnd.h"
#include "rnd.c"
//headed file for cuda
#include <cuda.h>
int size;
/*
* Using quickselect to partially sort the array
* algorithm refered from wikipedia "http://en.wikipedia.org/wiki/Quickselect"
* Implementation is done by me.
*/
int partition(float *a, int left, int right, int pivot) {
float pivotValue = a[pivot], temp;
int index = left;
int i;
//move pivot to rightmost of the array
a[pivot] = a[right];
a[right] = pivotValue;
/* We start from left of the array and keep comparing the values.
* If we find any value greater than pivot we swap the pivot with that value.
* this is done so that the value on the left are always lesser than pivot
*/
for (i = left; i < right; i++) {
if (a[i] < pivotValue) {
temp = a[i];
a[i] = a[index];
a[index] = temp;
index++;
}
}
/*move pivot back to its initial position
*/
temp = a[index];
a[index] = a[right];
a[right] = temp;
return index;
}
/*the main aspect of the function is
* find the pivot value which is pivot given and return the value
* next thing is all the value in the left index of pivot index is smaller
* all the values in the right index is greater.
*/
float quickSelect(float *a, int left, int right, int pivot) {
//base condition if list contains one element return that
if (left == right)
return a[left];
/* select the pivot index between left and right
* i am selecting the mid value
*/
int pivotindex = (right + left) / 2;
pivotindex = partition(a, left, right, pivotindex);
//int size_leftarray = pivotposition - low + 1;
//pivot in its correct position
if (pivot == pivotindex)
return a[pivotindex];
else if (pivot < pivotindex)
return quickSelect(a, left, pivotindex - 1, pivot);
else
return quickSelect(a, pivotindex + 1, right, pivot);
}
//merging the sorted left and right of the merge sort
__device__ void merge(float *a, int low, int mid, int high) {
//printf("inside merge\n");
int i = 0;
int left = low, right = mid + 1;
int j;
float *temp =(float*) malloc((high - low + 1 )* sizeof(*temp) );
/* Create a temp sorted list
* get the min of left part and right part
* if min is in the left part iterate left or else iterate right
*/
while ((left <= mid)&&(right <= high)) {
if (a[left] < a[right])
temp[i++] = a[left++];
else
temp[i++] = a[right++];
}
//left part contains larger values
while (left <= mid)
temp[i++] = a[left++];
//right part contains larger values
while (right <= high)
temp[i++] = a[right++];
//copy the sorted values
for (j = 0; j < i; j++) {
a[low + j] = temp[j];
}
}
//merge sort algorithm
void Quick(float *a, int low, int mid, int high) {
//printf("inside merge\n");
int i = 0;
int left = low, right = mid + 1;
int j;
float *temp =(float*) malloc((high - low + 1 )* sizeof(*temp) );
/* Create a temp sorted list
* get the min of left part and right part
* if min is in the left part iterate left or else iterate right
*/
while ((left <= mid)&&(right <= high)) {
if (a[left] < a[right])
temp[i++] = a[left++];
else
temp[i++] = a[right++];
}
//left part contains larger values
while (left <= mid)
temp[i++] = a[left++];
//right part contains larger values
while (right <= high)
temp[i++] = a[right++];
//copy the sorted values
for (j = 0; j < i; j++) {
a[low + j] = temp[j];
}
}
__device__ void Mergesort(float *a, int low, int high) {
int mid;
if (low < high) {
mid = (low + high) / 2;
Mergesort(a, low, mid);
Mergesort(a, mid + 1, high);
merge(a, low, mid, high);
}
}
void Quickselect(float *a, int low, int high) {
int mid;
if (low < high) {
mid = (low + high) / 2;
Quickselect(a, low, mid);
Quickselect(a, mid + 1, high);
Quick(a, low, mid, high);
}
}
int isnumber(int size) {
int flag = 0;
return flag;
}
__global__ void cudaBucketSort(float *array, int *pivots) {
/* 1.call merge sort for each threads
* 2.Range is given based on the pivot values.
* 3.Each thread will sort its own data set.
*/
if(threadIdx.x==0){
int start = pivots[blockIdx.x],i;
int end = pivots[blockIdx.x +1];
float value=array[start];
printf("pivot index %d\t start %d \t end %d\n", (blockIdx.x ) , start, end);
Mergesort(array, start, end);
if(blockIdx.x==9)
array[start]=value;
}
}
extern "C" void cuda_main(float *array,int size ,int p_id) {
//variables for size and number of threads, blocks etc.
int BlockNum=10;
int Range=size/10;
int ThreadSize ;
if(size<1000)
ThreadSize=size;
else
ThreadSize=1000;
//variables for pivots
int pivots[11];
float pivots_value[11];
struct timeval tv1, tv2;
printf("the size is %d", size);
int i;
//cuda device variables
float *dev_array;
int *dev_pivots;
cudaMalloc((void **) &dev_array, size * sizeof (float));
cudaMalloc((void **) &dev_pivots, (11) * sizeof (int));
// for (i = 0; i < size; i++) {
// printf("%d-->> %f\t", i, array[i]);
// }
//find 25th element
gettimeofday(&tv1, NULL);
for (i = 0; i < 10; i++) {
pivots[i] = Range*i;
}
pivots[i] = size;
pivots_value[0] = 0;
pivots_value[i] = array[size - 1];
for (i = 1; i < 10; i++) {
pivots_value[i] = quickSelect(array, pivots[i - 1], size - 1, (pivots[i]));
if(size>10000)
Quickselect(array, pivots[i-1], pivots[i]);
//printf("is the %d largest value %f\n", pivots[i], pivots_value[i]);
//find 5th largest element
}
/* MergeSort here.
*
*/
cudaMemcpy(dev_array, array, size * sizeof (float), cudaMemcpyHostToDevice);
cudaMemcpy(dev_pivots, pivots, (11) * sizeof (int), cudaMemcpyHostToDevice);
cudaBucketSort <<<BlockNum, ThreadSize>>>(dev_array, dev_pivots);
if(size<=10000)
cudaMemcpy(array, dev_array, size * sizeof (float), cudaMemcpyDeviceToHost);
//cuda function here.
printf("\nmergesort starts \n");
gettimeofday(&tv2, NULL);
//sort(array);
printf("sorted list is \n");
for (i = 0; i < size; i++) {
printf("%d-->> %f\n", i + p_id * size , array[i]);
}
}
|
9dc3a9fe8df751eddc33984a7e0cb6c6dc7d2eea.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename T, typename Z>
static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<Z*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
Z mIdx(0);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape)];;
if (mVal < val) {
mIdx = static_cast<Z>(i);
mVal = val;
}
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape)] = mIdx;
}
}
template <typename T, typename Z>
static void mergeMaxIndex_(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMaxIndex");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeMaxIndex_<T,Z>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMaxIndex(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape)];;
if (mVal < val)
mVal = val;
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape)] = mVal;
}
}
template<typename T>
static void mergeMax_(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMax");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeMax_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMax(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape)];
}
output[shape::getIndexOffset(e, outputShape)] = sum / numArrays;
}
}
template<typename T>
static void mergeAvg_(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAvg");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeAvg_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeAvg(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape)];
}
output[shape::getIndexOffset(e, outputShape)] = sum;
}
}
template<typename T>
static void mergeAdd_(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAdd");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
hipLaunchKernelGGL(( global_mergeAdd_<T>), dim3(512), dim3(512), 512, *context->getCudaStream(), pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), NUMERIC_TYPES);
void mergeAdd(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), NUMERIC_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
}
}
}
|
9dc3a9fe8df751eddc33984a7e0cb6c6dc7d2eea.cu
|
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 20.04.2018
//
#include<ops/declarable/helpers/transforms.h>
#include <array/ResultSet.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <array/NDArrayFactory.h>
#include <helpers/TAD.h>
#include <exceptions/cuda_exception.h>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
//////////////////////////////////////////////////////////////////////////
template <typename T, typename Z>
static __global__ void global_mergeMaxIndex_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<Z*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
Z mIdx(0);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape)];;
if (mVal < val) {
mIdx = static_cast<Z>(i);
mVal = val;
}
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape)] = mIdx;
}
}
template <typename T, typename Z>
static void mergeMaxIndex_(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMaxIndex");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeMaxIndex_<T,Z><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMaxIndex(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_DOUBLE_SELECTOR(inArrs[0]->dataType(), output.dataType(), mergeMaxIndex_, (context, inArrs, output), LIBND4J_TYPES, INDEXING_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeMax_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T mVal = -DataTypeUtils::max<T>();
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
auto val = x[shape::getIndexOffset(e, xShape)];;
if (mVal < val)
mVal = val;
}
__syncthreads();
output[shape::getIndexOffset(e, outputShape)] = mVal;
}
}
template<typename T>
static void mergeMax_(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeMax");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeMax_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeMax(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeMax_, (context, inArrs, output), LIBND4J_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAvg_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape)];
}
output[shape::getIndexOffset(e, outputShape)] = sum / numArrays;
}
}
template<typename T>
static void mergeAvg_(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAvg");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeAvg_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
void mergeAvg(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAvg_, (context, inArrs, output), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
//////////////////////////////////////////////////////////////////////////
template <typename T>
static __global__ void global_mergeAdd_(void **inArrs, void **inShapes, const int numArrays, void *voutput, Nd4jLong *outputShape, Nd4jLong length) {
auto output = reinterpret_cast<T*>(voutput);
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
const auto step = gridDim.x * blockDim.x;
for (Nd4jLong e = tid; e < length; e += step) {
T sum(0.0f);
for (int i = 0; i < numArrays; i++) {
auto x = reinterpret_cast<T*>(inArrs[i]);
auto xShape = reinterpret_cast<Nd4jLong *>(inShapes[i]);
sum += x[shape::getIndexOffset(e, xShape)];
}
output[shape::getIndexOffset(e, outputShape)] = sum;
}
}
template<typename T>
static void mergeAdd_(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
std::vector<void *> inBuffers(inArrs.size());
std::vector<void *> inShapes(inArrs.size());
for (int e = 0; e < inArrs.size(); e++) {
inBuffers[e] = inArrs[e]->getSpecialBuffer();
inShapes[e] = inArrs[e]->getSpecialShapeInfo();
}
PointersManager manager(context, "mergeAdd");
auto pInBuffers = reinterpret_cast<void **>(manager.replicatePointer(inBuffers.data(), inBuffers.size() * sizeof(void *)));
auto pInShapes = reinterpret_cast<void **>(manager.replicatePointer(inShapes.data(), inShapes.size() * sizeof(void *)));
auto length = output.lengthOf();
global_mergeAdd_<T><<<512, 512, 512, *context->getCudaStream()>>>(pInBuffers, pInShapes, (int) inArrs.size(), output.getSpecialBuffer(), output.getSpecialShapeInfo(), length);
manager.synchronize();
}
BUILD_SINGLE_TEMPLATE(template void mergeAdd_, (sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output), NUMERIC_TYPES);
void mergeAdd(sd::LaunchContext * context, const std::vector<NDArray*>& inArrs, NDArray& output) {
NDArray::prepareSpecialUse({&output}, {});
for (auto v:inArrs)
v->syncToDevice();
BUILD_SINGLE_SELECTOR(output.dataType(), mergeAdd_, (context, inArrs, output), NUMERIC_TYPES);
NDArray::registerSpecialUse({&output}, {});
}
}
}
}
|
1481d4bb4d90009dcafec5bc69cd543ecdf182d5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "nbody_helper.h"
#include "nbody_helper_cuda.h"
//__global__ void bodyBodyInteraction (float3 ai, float *o_r, float *m, unsigned int j, unsigned int i)
//{
// float3 r;
// r.x = o_r[ND*j] - o_r[ND*i];
// r.y = o_r[ND*j+1] - o_r[ND*i+1];
// r.z = o_r[ND*j+2] - o_r[ND*i+2];
//
// float rDistSquared = r.x*r.x + r.y*r.y + r.z*r.z + SOFTENING;
// float MinvDistCubed = m[j] * rsqrtf(rDistSquared*rDistSquared*rDistSquared);
//
// ai.x = r.x * MinvDistCubed;
// ai.y = r.y * MinvDistCubed;
// ai.z = r.z * MinvDistCubed;
//}
int main (int argc, char *argv[])
{
if (argc > 3) {
printf("Error: Wrong number of arguments.\n");
exit(EXIT_FAILURE);
}
unsigned long nElem = 16384;
unsigned long nIter = 100;
char *ptr1, *ptr2;
if (argc > 1)
nElem = strtoul(argv[1], &ptr1, 10);
if (argc > 2)
nIter = strtoul(argv[2], &ptr2, 10);
////////////////////////////////////////////////////////////////
/// SETTING UP DEVICE
////////////////////////////////////////////////////////////////
int dev = 0, driverVersion = 0, runtimeVersion = 0;
hipDeviceProp_t deviceProp;
checkCudaErrors (hipGetDeviceProperties (&deviceProp, dev));
checkCudaErrors (hipSetDevice (dev));
checkCudaErrors (hipDriverGetVersion (&driverVersion));
checkCudaErrors (hipRuntimeGetVersion (&runtimeVersion));
print_deviceProperties (dev, driverVersion, runtimeVersion, deviceProp);
print_simulationParameters (nElem, nIter, NUM_CPU_THREADS);
////////////////////////////////////////////////////////////////
/// INITIALIZING SIMULATION
////////////////////////////////////////////////////////////////
float4 *h_r[2], *h_v, *h_a;
float4 *d_r[2], *d_v, *d_a;
float4 *h_dref_r, *h_dref_v, *h_dref_a;
size_t nBytes = nElem * sizeof(float4);
h_r[0] = (float4 *) malloc(nBytes);
h_r[1] = (float4 *) malloc(nBytes);
h_v = (float4 *) malloc(nBytes);
h_a = (float4 *) malloc(nBytes);
checkCudaErrors (hipMalloc ((void**) &(d_r[0]), nBytes));
checkCudaErrors (hipMalloc ((void**) &(d_r[1]), nBytes));
checkCudaErrors (hipMalloc ((void**) &(d_v), nBytes));
checkCudaErrors (hipMalloc ((void**) &(d_a), nBytes));
// allocating page-locked memory for higher communication bandwidth during real-time vis.
checkCudaErrors (hipHostMalloc ((void**) &h_dref_r, nBytes));
checkCudaErrors (hipHostMalloc ((void**) &h_dref_v, nBytes));
checkCudaErrors (hipHostMalloc ((void**) &h_dref_a, nBytes));
memset (h_r[0], 0, nBytes);
memset (h_r[1], 0, nBytes);
memset (h_v, 0, nBytes);
memset (h_a, 0, nBytes);
// initialize data on host size and then transfer to device
printf("Initializing bodies on HOST. Time taken: ");
double time0 = getTimeStamp();
init_MassPositionVelocity();
// for portability, explicity create threads in a joinable state
pthread_t threads [NUM_CPU_THREADS];
pthread_attr_t attr;
pthread_attr_init (&attr);
pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_JOINABLE);
// creating the threads to calculate initial body accelerations on HOST
unsigned long i;
int rc;
void *status;
for (i=0; i<NUM_CPU_THREADS; i++) {
rc = pthread_create (&threads[i], &attr, init_Acceleration_SMT, (void *) i);
if (rc) {
printf("Error; return code from pthread_create() is %d.\n", rc);
exit(EXIT_FAILURE);
}
}
// wait on the other threads after initial body accelerations on HOST
for (i=0; i<NUM_CPU_THREADS; i++) {
rc = pthread_join (threads[i], &status);
if (rc) {
printf("ERROR; return code from pthread_join() is %d.\n", rc);
exit(EXIT_FAILURE);
}
}
printf ("%lfs\n", getTimeStamp()-time0);
//print_BodyStats(h_m, h_r1, h_v1, h_a1);
// copying initialized data from host to device
checkCudaErrors (hipMemcpy (d_m, h_m, nBytes, hipMemcpyHostToDevice));
checkCudaErrors (hipMemcpy (d_r1, h_r1, nBytes*ND, hipMemcpyHostToDevice));
checkCudaErrors (hipMemcpy (d_r2, h_r2, nBytes*ND, hipMemcpyHostToDevice));
checkCudaErrors (hipMemcpy (d_v1, h_v1, nBytes*ND, hipMemcpyHostToDevice));
checkCudaErrors (hipMemcpy (d_v2, h_v2, nBytes*ND, hipMemcpyHostToDevice));
checkCudaErrors (hipMemcpy (d_a1, h_a1, nBytes*ND, hipMemcpyHostToDevice));
checkCudaErrors (hipMemcpy (d_a2, h_a2, nBytes*ND, hipMemcpyHostToDevice));
////////////////////////////////////////////////////////////////
/// PERFORMING SIMULATION ON DEVICE
////////////////////////////////////////////////////////////////
dim3 block_size (1024);
dim3 grid_size ((nElem + block_size.x-1)/(block_size.x));
unsigned int nTiles = (nElem + block_size.x-1)/block_size.x;
double timestamp_GPU_start = getTimeStamp();
for (unsigned iter=0; iter<nIter; iter++) {
hipLaunchKernelGGL(( calcIntegration) , dim3(grid_size), dim3(block_size), 0, 0,
d_r[(iter+1)%2], // pointer to new positions
d_r[iter%2], // pointer to curr positions
d_v, // pointer to curr velocities
d_a, // pointer to curr accelerations
nElem, // number of bodies in simulation
nTiles); // number of shared memory sections per block
hipDeviceSynchronize ();
hipMemcpy(gref_m, d_m, nBytes, hipMemcpyDeviceToHost);
// hipMemcpy(gref_r, d_r2, nBytes*2, hipMemcpyDeviceToHost);
// hipMemcpy(gref_v, d_v2, nBytes*2, hipMemcpyDeviceToHost);
// hipMemcpy(gref_a, d_a2, nBytes*2, hipMemcpyDeviceToHost);
// if (iter%1000 == 0)
// print_BodyStats (gref_m, gref_r, gref_v, gref_a);
}
double timestamp_GPU_end = getTimeStamp();
double elapsedTime = timestamp_GPU_end - timestamp_GPU_start;
////////////////////////////////////////////////////////////////
/// SIMULATION COMPLETE
////////////////////////////////////////////////////////////////
hipFree (d_m);
hipFree (d_r1); hipFree (d_r2);
hipFree (d_v1); hipFree (d_v2);
hipFree (d_a1); hipFree (d_a2);
hipHostFree (gref_r);
hipHostFree (gref_v);
hipHostFree (gref_a);
checkCudaErrors (hipDeviceReset());
printf("Device successfully reset.\n");
printf("\nElapsed Time: %lfs\n", elapsedTime);
printf("Average timestep simulation duration: %lfs\n", elapsedTime/nIter);
free (h_m);
free (h_r1); free (h_r2);
free (h_v1); free (h_v2);
free (h_a1); free (h_a2);
return 0;
}
__device__ float3 bodyBodyInteraction (float3 ai, float4 bi, float4 bj)
{
float3 dist;
dist.x = bj.x - bi.x;
dist.y = bj.y - bi.y;
dist.z = bj.z - bi.z;
float distSqr = dot(dist, dist) + SOFTENING;
float invDistCube = rsqrtf(distSqr * distSqr * distSqr);
float s = bj.w * invDistCube
ai.x += s * r.x;
ai.y += s * r.y;
ai.z += s * r.z;
return ai;
}
__device__ float4 calcAcceleration (float4 *devX, unsigned nTiles)
{
unsigned gtid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float4[] shPosition4;
float4 myPosition4;
float3 acc3 = {0.0f, 0.0f, 0.0f};
myPosition4 = devX[gtid];
for (unsigned tile=0; tile<nTiles; tile++) {
shPosition4[threadIdx] = devX[ tile*blockDim.x + threadIdx ];
__syncthreads(); // Wait for all threads in block to load data
// ... into shared memory
#pragma unroll 4
for (unsigned j=0, j<blockDim.x; j++)
acc3 = bodyBodyInteraction(acc3, myPosition4, shPosition4[j]);
__syncthreads(); // wait for all threads in block to complete their
// ... computations to not overwrite sh. mem.
}
float4 acc4 = {acc3.x, acc3.y, acc3.z, 0.0f};
return acc4;
}
__global__ void calcIntegration (float4 *devX_ip1, const float *devX_i,
float4 *devV_i, float4 *devA_i, const unsigned nElem, const unsigned nTiles)
{
unsigned gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid < nElem) {
float4 old_acc = devA_i[gtid];
float4 old_vel = devV_i[gtid];
float4 old_pos = devX_i[gtid];
float4 new_pos = old_pos + scalevec(old_vel, DT) + scalevec(old_acc, DTSQd2);
float4 new_acc = calcAcceleration (devX_i, nTiles);
float4 new_vel = old_vel + scale_vec(old_acc + new_acc, DTd2)
devA_i [gtid] = new_acc;
devV_i [gtid] = new_vel;
devX_ip1[gtid] = new_pos;
}
}
|
1481d4bb4d90009dcafec5bc69cd543ecdf182d5.cu
|
#include "nbody_helper.h"
#include "nbody_helper_cuda.h"
//__global__ void bodyBodyInteraction (float3 ai, float *o_r, float *m, unsigned int j, unsigned int i)
//{
// float3 r;
// r.x = o_r[ND*j] - o_r[ND*i];
// r.y = o_r[ND*j+1] - o_r[ND*i+1];
// r.z = o_r[ND*j+2] - o_r[ND*i+2];
//
// float rDistSquared = r.x*r.x + r.y*r.y + r.z*r.z + SOFTENING;
// float MinvDistCubed = m[j] * rsqrtf(rDistSquared*rDistSquared*rDistSquared);
//
// ai.x = r.x * MinvDistCubed;
// ai.y = r.y * MinvDistCubed;
// ai.z = r.z * MinvDistCubed;
//}
int main (int argc, char *argv[])
{
if (argc > 3) {
printf("Error: Wrong number of arguments.\n");
exit(EXIT_FAILURE);
}
unsigned long nElem = 16384;
unsigned long nIter = 100;
char *ptr1, *ptr2;
if (argc > 1)
nElem = strtoul(argv[1], &ptr1, 10);
if (argc > 2)
nIter = strtoul(argv[2], &ptr2, 10);
////////////////////////////////////////////////////////////////
/// SETTING UP DEVICE
////////////////////////////////////////////////////////////////
int dev = 0, driverVersion = 0, runtimeVersion = 0;
cudaDeviceProp deviceProp;
checkCudaErrors (cudaGetDeviceProperties (&deviceProp, dev));
checkCudaErrors (cudaSetDevice (dev));
checkCudaErrors (cudaDriverGetVersion (&driverVersion));
checkCudaErrors (cudaRuntimeGetVersion (&runtimeVersion));
print_deviceProperties (dev, driverVersion, runtimeVersion, deviceProp);
print_simulationParameters (nElem, nIter, NUM_CPU_THREADS);
////////////////////////////////////////////////////////////////
/// INITIALIZING SIMULATION
////////////////////////////////////////////////////////////////
float4 *h_r[2], *h_v, *h_a;
float4 *d_r[2], *d_v, *d_a;
float4 *h_dref_r, *h_dref_v, *h_dref_a;
size_t nBytes = nElem * sizeof(float4);
h_r[0] = (float4 *) malloc(nBytes);
h_r[1] = (float4 *) malloc(nBytes);
h_v = (float4 *) malloc(nBytes);
h_a = (float4 *) malloc(nBytes);
checkCudaErrors (cudaMalloc ((void**) &(d_r[0]), nBytes));
checkCudaErrors (cudaMalloc ((void**) &(d_r[1]), nBytes));
checkCudaErrors (cudaMalloc ((void**) &(d_v), nBytes));
checkCudaErrors (cudaMalloc ((void**) &(d_a), nBytes));
// allocating page-locked memory for higher communication bandwidth during real-time vis.
checkCudaErrors (cudaMallocHost ((void**) &h_dref_r, nBytes));
checkCudaErrors (cudaMallocHost ((void**) &h_dref_v, nBytes));
checkCudaErrors (cudaMallocHost ((void**) &h_dref_a, nBytes));
memset (h_r[0], 0, nBytes);
memset (h_r[1], 0, nBytes);
memset (h_v, 0, nBytes);
memset (h_a, 0, nBytes);
// initialize data on host size and then transfer to device
printf("Initializing bodies on HOST. Time taken: ");
double time0 = getTimeStamp();
init_MassPositionVelocity();
// for portability, explicity create threads in a joinable state
pthread_t threads [NUM_CPU_THREADS];
pthread_attr_t attr;
pthread_attr_init (&attr);
pthread_attr_setdetachstate (&attr, PTHREAD_CREATE_JOINABLE);
// creating the threads to calculate initial body accelerations on HOST
unsigned long i;
int rc;
void *status;
for (i=0; i<NUM_CPU_THREADS; i++) {
rc = pthread_create (&threads[i], &attr, init_Acceleration_SMT, (void *) i);
if (rc) {
printf("Error; return code from pthread_create() is %d.\n", rc);
exit(EXIT_FAILURE);
}
}
// wait on the other threads after initial body accelerations on HOST
for (i=0; i<NUM_CPU_THREADS; i++) {
rc = pthread_join (threads[i], &status);
if (rc) {
printf("ERROR; return code from pthread_join() is %d.\n", rc);
exit(EXIT_FAILURE);
}
}
printf ("%lfs\n", getTimeStamp()-time0);
//print_BodyStats(h_m, h_r1, h_v1, h_a1);
// copying initialized data from host to device
checkCudaErrors (cudaMemcpy (d_m, h_m, nBytes, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_r1, h_r1, nBytes*ND, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_r2, h_r2, nBytes*ND, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_v1, h_v1, nBytes*ND, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_v2, h_v2, nBytes*ND, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_a1, h_a1, nBytes*ND, cudaMemcpyHostToDevice));
checkCudaErrors (cudaMemcpy (d_a2, h_a2, nBytes*ND, cudaMemcpyHostToDevice));
////////////////////////////////////////////////////////////////
/// PERFORMING SIMULATION ON DEVICE
////////////////////////////////////////////////////////////////
dim3 block_size (1024);
dim3 grid_size ((nElem + block_size.x-1)/(block_size.x));
unsigned int nTiles = (nElem + block_size.x-1)/block_size.x;
double timestamp_GPU_start = getTimeStamp();
for (unsigned iter=0; iter<nIter; iter++) {
calcIntegration <<<grid_size, block_size, 0, 0>>> (
d_r[(iter+1)%2], // pointer to new positions
d_r[iter%2], // pointer to curr positions
d_v, // pointer to curr velocities
d_a, // pointer to curr accelerations
nElem, // number of bodies in simulation
nTiles); // number of shared memory sections per block
cudaDeviceSynchronize ();
cudaMemcpy(gref_m, d_m, nBytes, cudaMemcpyDeviceToHost);
// cudaMemcpy(gref_r, d_r2, nBytes*2, cudaMemcpyDeviceToHost);
// cudaMemcpy(gref_v, d_v2, nBytes*2, cudaMemcpyDeviceToHost);
// cudaMemcpy(gref_a, d_a2, nBytes*2, cudaMemcpyDeviceToHost);
// if (iter%1000 == 0)
// print_BodyStats (gref_m, gref_r, gref_v, gref_a);
}
double timestamp_GPU_end = getTimeStamp();
double elapsedTime = timestamp_GPU_end - timestamp_GPU_start;
////////////////////////////////////////////////////////////////
/// SIMULATION COMPLETE
////////////////////////////////////////////////////////////////
cudaFree (d_m);
cudaFree (d_r1); cudaFree (d_r2);
cudaFree (d_v1); cudaFree (d_v2);
cudaFree (d_a1); cudaFree (d_a2);
cudaFreeHost (gref_r);
cudaFreeHost (gref_v);
cudaFreeHost (gref_a);
checkCudaErrors (cudaDeviceReset());
printf("Device successfully reset.\n");
printf("\nElapsed Time: %lfs\n", elapsedTime);
printf("Average timestep simulation duration: %lfs\n", elapsedTime/nIter);
free (h_m);
free (h_r1); free (h_r2);
free (h_v1); free (h_v2);
free (h_a1); free (h_a2);
return 0;
}
__device__ float3 bodyBodyInteraction (float3 ai, float4 bi, float4 bj)
{
float3 dist;
dist.x = bj.x - bi.x;
dist.y = bj.y - bi.y;
dist.z = bj.z - bi.z;
float distSqr = dot(dist, dist) + SOFTENING;
float invDistCube = rsqrtf(distSqr * distSqr * distSqr);
float s = bj.w * invDistCube
ai.x += s * r.x;
ai.y += s * r.y;
ai.z += s * r.z;
return ai;
}
__device__ float4 calcAcceleration (float4 *devX, unsigned nTiles)
{
unsigned gtid = blockIdx.x * blockDim.x + threadIdx.x;
extern __shared__ float4[] shPosition4;
float4 myPosition4;
float3 acc3 = {0.0f, 0.0f, 0.0f};
myPosition4 = devX[gtid];
for (unsigned tile=0; tile<nTiles; tile++) {
shPosition4[threadIdx] = devX[ tile*blockDim.x + threadIdx ];
__syncthreads(); // Wait for all threads in block to load data
// ... into shared memory
#pragma unroll 4
for (unsigned j=0, j<blockDim.x; j++)
acc3 = bodyBodyInteraction(acc3, myPosition4, shPosition4[j]);
__syncthreads(); // wait for all threads in block to complete their
// ... computations to not overwrite sh. mem.
}
float4 acc4 = {acc3.x, acc3.y, acc3.z, 0.0f};
return acc4;
}
__global__ void calcIntegration (float4 *devX_ip1, const float *devX_i,
float4 *devV_i, float4 *devA_i, const unsigned nElem, const unsigned nTiles)
{
unsigned gtid = blockIdx.x * blockDim.x + threadIdx.x;
if (gtid < nElem) {
float4 old_acc = devA_i[gtid];
float4 old_vel = devV_i[gtid];
float4 old_pos = devX_i[gtid];
float4 new_pos = old_pos + scalevec(old_vel, DT) + scalevec(old_acc, DTSQd2);
float4 new_acc = calcAcceleration (devX_i, nTiles);
float4 new_vel = old_vel + scale_vec(old_acc + new_acc, DTd2)
devA_i [gtid] = new_acc;
devV_i [gtid] = new_vel;
devX_ip1[gtid] = new_pos;
}
}
|
583e629c6a503e358527d2b5ef45a4f814013995.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* MonteCarloKernel.cu
* Monte Carlo methods in CUDA
* Dissertation project
* Created on: 06/feb/2018
* Author: Marco Matteo Buzzulini
*/
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include "MonteCarlo.h"
#define max(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
// Struct for Monte Carlo methods
typedef struct{
OptionValue *h_CallValue, *d_CallValue;
OptionValue callValue;
OptionData sopt;
MultiOptionData mopt;
hiprandState_t *RNG;
int numBlocks, numThreads, numOpt, path;
} dev_MonteCarloData;
// Memory initialization for MC
void MonteCarlo_init(dev_MonteCarloData *data);
// Freeing memory after MC
void MonteCarlo_closing(dev_MonteCarloData *data);
// Monte Carlo method for Option Pricing
void MonteCarlo(dev_MonteCarloData *data);
// Monte Carlo method for CVA - 1 black-scholes option
void cvaMonteCarlo(dev_MonteCarloData *data, double intdef, double lgd, int n_grid);
/*
* Error handling from Cuda programming - shane cook
*/
void cuda_error_check(const char * prefix, const char * postfix){
if (hipPeekAtLastError() != hipSuccess){
printf("\n%s%s%s", prefix, hipGetErrorString(hipGetLastError()), postfix);
hipDeviceReset();
//wait_exit();
exit(1);
}
}
////////////////////////////////////////////////////////////////
//////////////// CONSTANT MEMORY ////////////////////////
////////////////////////////////////////////////////////////////
// Basket Option
__device__ __constant__ MultiOptionData MOPTION;
// Vanilla Call Option
__device__ __constant__ OptionData OPTION;
// Number of underlyings, num simulations per block and the sims for CVA
__device__ __constant__ int N_OPTION, N_PATH, N_GRID;
// Financial parameters for CVA: Default intensity and Loss given default
__device__ __constant__ double INTDEF, LGD;
////////////////////////////////////////////////////////////////
//////////////// KERNEL FUNCTIONS ////////////////////////
////////////////////////////////////////////////////////////////
/* * * * * ONLY DEVICE * * * * */
// Call Option payoff
__device__ double callPayoff(hiprandState_t *threadState){
double z = hiprand_normal(threadState);
double s = OPTION.s * exp((OPTION.r - 0.5 * OPTION.v * OPTION.v) * OPTION.t + OPTION.v * sqrt(OPTION.t) * z);
return max(s - OPTION.k,0);
}
// Basket option random number
__device__ void brownianVect(double *bt, hiprandState_t *threadState){
int i,j;
double g[N];
for(i=0;i<N_OPTION;i++)
g[i]=hiprand_normal(threadState);
for(i=0;i<N_OPTION;i++){
double somma = 0;
for(j=0;j<N_OPTION;j++)
somma += MOPTION.p[i][j] * g[j];
bt[i] = somma;
}
for(i=0;i<N_OPTION;i++)
bt[i] += MOPTION.d[i];
}
// Basket option payoff
__device__ double basketPayoff(double *bt){
int j;
double s[N], st_sum=0, price;
for(j=0;j<N_OPTION;j++)
s[j] = MOPTION.s[j] * exp((MOPTION.r - 0.5 * MOPTION.v[j] * MOPTION.v[j])*MOPTION.t+MOPTION.v[j] * bt[j] * sqrt(MOPTION.t));
// Third step: Mean price
for(j=0;j<N_OPTION;j++)
st_sum += s[j] * MOPTION.w[j];
// Fourth step: Option payoff
price = st_sum - MOPTION.k;
return max(price,0);
}
// Simulating Geometric Brownian path
__device__ double geomBrownian( double s, double t, double z ){
double x = (OPTION.r - 0.5 * OPTION.v * OPTION.v) * t + OPTION.v * sqrt(t) * z;
return s * exp(x);
}
// Hastings approximation of cumulative normal distribution
__device__ double cnd(double d){
const double A1 = 0.31938153;
const double A2 = -0.356563782;
const double A3 = 1.781477937;
const double A4 = -1.821255978;
const double A5 = 1.330274429;
const double ONEOVER2PI = 0.39894228040143267793994605993438;
double K = 1.0 / (1.0 + 0.2316419 * fabs(d));
double cnd = ONEOVER2PI * exp(- 0.5 * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if (d > 0)
return 1.0 - cnd;
else
return cnd;
}
// Black & Scholes price formula for vanilla options
__device__ double device_bsCall ( double s, double t){
double d1 = ( log(s / OPTION.k) + (OPTION.r + 0.5 * OPTION.v * OPTION.v) * t) / (OPTION.v * sqrt(t));
double d2 = d1 - OPTION.v * sqrt(t);
return s * cnd(d1) - OPTION.k * exp(- OPTION.r * t) * cnd(d2);
}
/* * * * * GLOBAL * * * * */
// Basket Option Kernel
__global__ void basketOptMonteCarlo(hiprandState_t * randseed, OptionValue *d_CallValue){
// Parameters for shared memory
int sumIndex = threadIdx.x;
int sum2Index = sumIndex + blockDim.x;
/* - SHARED MEMORY - */
extern __shared__ double s_Sum[];
// Global thread index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Copy random number state to local memory
hiprandState_t threadState = randseed[tid];
int i;
OptionValue sum = {0, 0};
for( i=sumIndex; i<N_PATH; i+=blockDim.x){
double price=0.0f, bt[N];
// Random Number Generation
brownianVect(bt,&threadState);
// Price simulation with the basket call option payoff function
price=basketPayoff(bt);
// Mean sum
sum.Expected += price;
sum.Confidence += price*price;
}
// Copy to the shared memory
s_Sum[sumIndex] = sum.Expected;
s_Sum[sum2Index] = sum.Confidence;
__syncthreads();
// Reduce shared memory accumulators and write final result to global memory
int halfblock = blockDim.x/2;
// Reduction in log2(threadBlocks) steps, so threadBlock must be power of 2
do{
if ( sumIndex < halfblock ){
s_Sum[sumIndex] += s_Sum[sumIndex+halfblock];
s_Sum[sum2Index] += s_Sum[sum2Index+halfblock];
}
__syncthreads();
halfblock /= 2;
}while ( halfblock != 0 );
// Copy to the global memory
if (sumIndex == 0){
d_CallValue[blockIdx.x].Expected = s_Sum[sumIndex];
d_CallValue[blockIdx.x].Confidence = s_Sum[sum2Index];
}
}
// Vanilla Option call Kernel
__global__ void vanillaOptMonteCarlo(hiprandState_t * randseed, OptionValue *d_CallValue){
// Parameters for shared memory
int sumIndex = threadIdx.x;
int sum2Index = sumIndex + blockDim.x;
/* - SHARED MEMORY - */
extern __shared__ double s_Sum[];
// Global thread index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Copy random number state to local memory
hiprandState_t threadState = randseed[tid];
OptionValue sum = {0, 0};
int i;
for( i=sumIndex; i<N_PATH; i+=blockDim.x){
double price=0.0f;
// Price simulation with the vanilla call option payoff function
price = callPayoff(&threadState);
sum.Expected += price;
sum.Confidence += price*price;
}
// Copy to the shared memory
s_Sum[sumIndex] = sum.Expected;
s_Sum[sum2Index] = sum.Confidence;
__syncthreads();
// Reduce shared memory accumulators and write final result to global memory
int halfblock = blockDim.x/2;
// Reduction in log2(threadBlocks) steps, so threadBlock must be power of 2
do{
if ( sumIndex < halfblock ){
s_Sum[sumIndex] += s_Sum[sumIndex+halfblock];
s_Sum[sum2Index] += s_Sum[sum2Index+halfblock];
}
__syncthreads();
halfblock /= 2;
}while ( halfblock != 0 );
// Copy to the global memory
if (sumIndex == 0){
d_CallValue[blockIdx.x].Expected = s_Sum[sumIndex];
d_CallValue[blockIdx.x].Confidence = s_Sum[sum2Index];
}
}
__global__ void cvaCallOptMC(hiprandState_t * randseed, OptionValue *d_CallValue){
// Parameters for shared memory
int sumIndex = threadIdx.x;
int sum2Index = sumIndex + blockDim.x;
/* - SHARED MEMORY - */
extern __shared__ double s_Sum[];
// Global thread index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Copy random number state to local memory
hiprandState_t threadState = randseed[tid];
double dt = OPTION.t / N_GRID;
// Calcolo di un CVA
// Step 1: simulare traiettoria sottostante, ad ogni istante dt calcolare prezzo opzione attualizzato con B&S
// Step 2: calcolo CVA per ogni traiettoria e sommarlo alla variabile mean_price
// Step 3: salvare nella memoria condivisa i CVA calcolati
OptionValue sum = {0, 0};
int i,j;
for( i=sumIndex; i<N_PATH; i+=blockDim.x){
double s, ee, t;
double mean_price = 0;
s = OPTION.s;
t = OPTION.t;
ee = device_bsCall(s,t);
for(j=1; j <= N_GRID; j++){
double dp = exp(-(dt*(j-1)) * INTDEF) - exp(-(dt*j) * INTDEF);
if( (t -= dt)>=0 ){
double z = hiprand_normal(&threadState);
s = geomBrownian(s, dt, z);
ee = device_bsCall(s,t);
}
else{
ee = 0;
}
mean_price += dp * ee;
}
mean_price *= LGD;
sum.Expected += mean_price;
sum.Confidence += mean_price * mean_price;
}
// Copy to the shared memory
s_Sum[sumIndex] = sum.Expected;
s_Sum[sum2Index] = sum.Confidence;
__syncthreads();
// Reduce shared memory accumulators and write final result to global memory
int halfblock = blockDim.x/2;
// Reduction in log2(threadBlocks) steps, so threadBlock must be power of 2
do{
if ( sumIndex < halfblock ){
s_Sum[sumIndex] += s_Sum[sumIndex+halfblock];
s_Sum[sum2Index] += s_Sum[sum2Index+halfblock];
}
__syncthreads();
halfblock /= 2;
}while ( halfblock != 0 );
// Copy to the global memory
if (sumIndex == 0){
d_CallValue[blockIdx.x].Expected = s_Sum[sumIndex];
d_CallValue[blockIdx.x].Confidence = s_Sum[sum2Index];
}
}
__global__ void randomSetup( hiprandState_t *randSeed ){
// Global thread index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Each thread block gets different seed, threads within a thread block get different sequence numbers
hiprand_init(blockIdx.x + gridDim.x, threadIdx.x, 0, &randSeed[tid]);
}
////////////////////////////////////////////////////////////////
//////////////// HOST FUNCTIONS ////////////////////////////
////////////////////////////////////////////////////////////////
void MonteCarlo_init(dev_MonteCarloData *data){
hipEvent_t start, stop;
CudaCheck( hipEventCreate( &start ));
CudaCheck( hipEventCreate( &stop ));
float time;
/*--------------- CONSTANT MEMORY ----------------*/
if( data->numOpt > 1){
int n_option = data->numOpt;
CudaCheck(hipMemcpyToSymbol(N_OPTION,&n_option,sizeof(int)));
}
int n_path = data->path;
printf("Numero di simulazioni per blocco: \t %d\n",n_path);
printf("Numero di simulazioni per processo: \t %d\n",n_path/data->numThreads);
CudaCheck(hipMemcpyToSymbol(N_PATH,&n_path,sizeof(int)));
// RANDOM NUMBER GENERATION KERNEL
//Allocate states for pseudo random number generators
CudaCheck(hipMalloc((void **) &data->RNG, data->numBlocks * data->numThreads * sizeof(hiprandState_t)));
//Setup for the random number sequence
CudaCheck( hipEventRecord( start, 0 ));
hipLaunchKernelGGL(( randomSetup), dim3(data->numBlocks), dim3(data->numThreads), 0, 0, data->RNG);
cuda_error_check("\Errore nel lancio randomSetup: ","\n");
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "RNG done in ms \t %f\n", time);
// Host Memory Allocation
CudaCheck( hipEventRecord( start, 0 ));
CudaCheck(hipHostMalloc(&data->h_CallValue, sizeof(OptionValue)*data->numBlocks));
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "Host memory allocation done in ms \t %f\n", time);
// Device Memory Allocation
CudaCheck( hipEventRecord( start, 0 ));
CudaCheck(hipMalloc(&data->d_CallValue, sizeof(OptionValue)*data->numBlocks));
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "Device memory allocation done in ms \t %f\n", time);
CudaCheck( hipEventDestroy( start ));
CudaCheck( hipEventDestroy( stop ));
}
void MonteCarlo_closing(dev_MonteCarloData *data){
hipEvent_t start, stop;
CudaCheck( hipEventCreate( &start ));
CudaCheck( hipEventCreate( &stop ));
float time;
CudaCheck( hipEventRecord( start, 0 ));
//Free memory space
CudaCheck(hipFree(data->RNG));
CudaCheck(hipHostFree(data->h_CallValue));
CudaCheck(hipFree(data->d_CallValue));
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "Free memory done in ms \t %f\n", time);
CudaCheck( hipEventDestroy( start ));
CudaCheck( hipEventDestroy( stop ));
}
void MonteCarlo(dev_MonteCarloData *data){
hipEvent_t start, stop;
CudaCheck( hipEventCreate( &start ));
CudaCheck( hipEventCreate( &stop ));
float time, r,t;
/*----------------- SHARED MEMORY -------------------*/
int i, numShared = sizeof(double) * data->numThreads * 2;
/*--------------- CONSTANT MEMORY ----------------*/
if( data->numOpt == 1){
r = data->sopt.r;
t = data->sopt.t;
CudaCheck(hipMemcpyToSymbol(OPTION,&data->sopt,sizeof(OptionData)));
// Time
CudaCheck( hipEventRecord( start, 0 ));
hipLaunchKernelGGL(( vanillaOptMonteCarlo), dim3(data->numBlocks), dim3(data->numThreads), numShared, 0, data->RNG,(OptionValue *)(data->d_CallValue));
cuda_error_check("\Errore nel lancio vanillaOptMonteCarlo: ","\n");
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "Kernel done in ms \t %f\n", time);
}
else{
r = data->mopt.r;
t = data->mopt.t;
CudaCheck(hipMemcpyToSymbol(MOPTION,&data->mopt,sizeof(MultiOptionData)));
// Time
CudaCheck( hipEventRecord( start, 0 ));
hipLaunchKernelGGL(( basketOptMonteCarlo), dim3(data->numBlocks), dim3(data->numThreads), numShared, 0, data->RNG,(OptionValue *)(data->d_CallValue));
cuda_error_check("\Errore nel lancio basketOptMonteCarlo: ","\n");
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "Kernel done in ms \t %f\n", time);
}
//MEMORY CPY: prices per block
// Time
CudaCheck( hipEventRecord( start, 0 ));
CudaCheck(hipMemcpy(data->h_CallValue, data->d_CallValue, data->numBlocks * sizeof(OptionValue), hipMemcpyDeviceToHost));
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "Copy from device-to-host done in ms \t %f\n", time);
// Closing Monte Carlo
double sum=0, sum2=0, price, empstd;
long int nSim = data->numBlocks * data->path;
// Time
CudaCheck( hipEventRecord( start, 0 ));
for ( i = 0; i < data->numBlocks; i++ ){
sum += data->h_CallValue[i].Expected;
sum2 += data->h_CallValue[i].Confidence;
}
price = exp(-r*t) * (sum/(double)nSim);
empstd = sqrt((double)((double)nSim * sum2 - sum * sum)/((double)nSim * (double)(nSim - 1)));
data->callValue.Confidence = 1.96 * empstd / (double)sqrt((double)nSim);
data->callValue.Expected = price;
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "Call price done in ms \t %f\n", time);
CudaCheck( hipEventDestroy( start ));
CudaCheck( hipEventDestroy( stop ));
}
void cvaMonteCarlo(dev_MonteCarloData *data, double intdef, double lgd, int n_grid){
hipEvent_t start, stop;
CudaCheck( hipEventCreate( &start ));
CudaCheck( hipEventCreate( &stop ));
float time;
/*----------------- SHARED MEMORY -------------------*/
int i, numShared = sizeof(double) * data->numThreads * 2;
/*--------------- CONSTANT MEMORY ----------------*/
CudaCheck(hipMemcpyToSymbol(INTDEF, &intdef, sizeof(double)));
CudaCheck(hipMemcpyToSymbol(LGD, &lgd, sizeof(double)));
CudaCheck(hipMemcpyToSymbol(N_GRID, &n_grid, sizeof(int)));
CudaCheck(hipMemcpyToSymbol(OPTION, &data->sopt, sizeof(OptionData)));
//Time
CudaCheck( hipEventRecord( start, 0 ));
hipLaunchKernelGGL(( cvaCallOptMC), dim3(data->numBlocks), dim3(data->numThreads), numShared, 0, data->RNG,(OptionValue *)(data->d_CallValue));
cuda_error_check("\Errore nel lancio cvaCallOptMC: ","\n");
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "Kernel done in ms \t %f\n", time);
//MEMORY CPY: prices per block
CudaCheck(hipMemcpy(data->h_CallValue, data->d_CallValue, data->numBlocks * sizeof(OptionValue), hipMemcpyDeviceToHost));
// Closing Monte Carlo
double sum=0, sum2=0, price, empstd;
long int nSim = data->numBlocks * data->path;
CudaCheck( hipEventRecord( start, 0 ));
for ( i = 0; i < data->numBlocks; i++ ){
sum += data->h_CallValue[i].Expected;
sum2 += data->h_CallValue[i].Confidence;
}
price = sum/(double)nSim;
empstd = sqrt((double)((double)nSim * sum2 - sum * sum)/((double)nSim * (double)(nSim - 1)));
data->callValue.Confidence = 1.96 * empstd / (double)sqrt((double)nSim);
data->callValue.Expected = price;
CudaCheck( hipEventRecord( stop, 0));
CudaCheck( hipEventSynchronize( stop ));
CudaCheck( hipEventElapsedTime( &time, start, stop ));
printf( "CVA price done in ms \t %f\n", time);
CudaCheck( hipEventDestroy( start ));
CudaCheck( hipEventDestroy( stop ));
}
////////////////////////////////////////////////
//////////////// WRAPPERS ////////////////
////////////////////////////////////////////////
extern "C" OptionValue dev_basketOpt(MultiOptionData *option, int numBlocks, int numThreads, int sims){
dev_MonteCarloData data;
// Option
data.mopt = *option;
// Kernel parameters
data.numBlocks = numBlocks;
data.numThreads = numThreads;
data.numOpt = N;
data.path = sims / numBlocks;
// Core
MonteCarlo_init(&data);
MonteCarlo(&data);
MonteCarlo_closing(&data);
return data.callValue;
}
extern "C" OptionValue dev_vanillaOpt(OptionData *opt, int numBlocks, int numThreads, int sims){
dev_MonteCarloData data;
// Option
data.sopt = *opt;
// Kernel parameters
data.numBlocks = numBlocks;
data.numThreads = numThreads;
data.numOpt = 1;
data.path = sims / numBlocks;
// Core
MonteCarlo_init(&data);
MonteCarlo(&data);
MonteCarlo_closing(&data);
return data.callValue;
}
extern "C" OptionValue dev_cvaEquityOption(CVA *cva, int numBlocks, int numThreads, int sims){
dev_MonteCarloData data;
data.sopt = cva->option;
// Kernel parameters
data.numBlocks = numBlocks;
data.numThreads = numThreads;
data.numOpt = 1;
data.path = sims / numBlocks;
// Core
MonteCarlo_init(&data);
cvaMonteCarlo(&data, (double)cva->defInt, (double)cva->lgd, cva->n);
// Closing
MonteCarlo_closing(&data);
return data.callValue;
}
|
583e629c6a503e358527d2b5ef45a4f814013995.cu
|
/*
* MonteCarloKernel.cu
* Monte Carlo methods in CUDA
* Dissertation project
* Created on: 06/feb/2018
* Author: Marco Matteo Buzzulini
*/
#include <curand.h>
#include <curand_kernel.h>
#include "MonteCarlo.h"
#define max(a,b) \
({ __typeof__ (a) _a = (a); \
__typeof__ (b) _b = (b); \
_a > _b ? _a : _b; })
// Struct for Monte Carlo methods
typedef struct{
OptionValue *h_CallValue, *d_CallValue;
OptionValue callValue;
OptionData sopt;
MultiOptionData mopt;
curandState *RNG;
int numBlocks, numThreads, numOpt, path;
} dev_MonteCarloData;
// Memory initialization for MC
void MonteCarlo_init(dev_MonteCarloData *data);
// Freeing memory after MC
void MonteCarlo_closing(dev_MonteCarloData *data);
// Monte Carlo method for Option Pricing
void MonteCarlo(dev_MonteCarloData *data);
// Monte Carlo method for CVA - 1 black-scholes option
void cvaMonteCarlo(dev_MonteCarloData *data, double intdef, double lgd, int n_grid);
/*
* Error handling from Cuda programming - shane cook
*/
void cuda_error_check(const char * prefix, const char * postfix){
if (cudaPeekAtLastError() != cudaSuccess){
printf("\n%s%s%s", prefix, cudaGetErrorString(cudaGetLastError()), postfix);
cudaDeviceReset();
//wait_exit();
exit(1);
}
}
////////////////////////////////////////////////////////////////
//////////////// CONSTANT MEMORY ////////////////////////
////////////////////////////////////////////////////////////////
// Basket Option
__device__ __constant__ MultiOptionData MOPTION;
// Vanilla Call Option
__device__ __constant__ OptionData OPTION;
// Number of underlyings, num simulations per block and the sims for CVA
__device__ __constant__ int N_OPTION, N_PATH, N_GRID;
// Financial parameters for CVA: Default intensity and Loss given default
__device__ __constant__ double INTDEF, LGD;
////////////////////////////////////////////////////////////////
//////////////// KERNEL FUNCTIONS ////////////////////////
////////////////////////////////////////////////////////////////
/* * * * * ONLY DEVICE * * * * */
// Call Option payoff
__device__ double callPayoff(curandState *threadState){
double z = curand_normal(threadState);
double s = OPTION.s * exp((OPTION.r - 0.5 * OPTION.v * OPTION.v) * OPTION.t + OPTION.v * sqrt(OPTION.t) * z);
return max(s - OPTION.k,0);
}
// Basket option random number
__device__ void brownianVect(double *bt, curandState *threadState){
int i,j;
double g[N];
for(i=0;i<N_OPTION;i++)
g[i]=curand_normal(threadState);
for(i=0;i<N_OPTION;i++){
double somma = 0;
for(j=0;j<N_OPTION;j++)
somma += MOPTION.p[i][j] * g[j];
bt[i] = somma;
}
for(i=0;i<N_OPTION;i++)
bt[i] += MOPTION.d[i];
}
// Basket option payoff
__device__ double basketPayoff(double *bt){
int j;
double s[N], st_sum=0, price;
for(j=0;j<N_OPTION;j++)
s[j] = MOPTION.s[j] * exp((MOPTION.r - 0.5 * MOPTION.v[j] * MOPTION.v[j])*MOPTION.t+MOPTION.v[j] * bt[j] * sqrt(MOPTION.t));
// Third step: Mean price
for(j=0;j<N_OPTION;j++)
st_sum += s[j] * MOPTION.w[j];
// Fourth step: Option payoff
price = st_sum - MOPTION.k;
return max(price,0);
}
// Simulating Geometric Brownian path
__device__ double geomBrownian( double s, double t, double z ){
double x = (OPTION.r - 0.5 * OPTION.v * OPTION.v) * t + OPTION.v * sqrt(t) * z;
return s * exp(x);
}
// Hastings approximation of cumulative normal distribution
__device__ double cnd(double d){
const double A1 = 0.31938153;
const double A2 = -0.356563782;
const double A3 = 1.781477937;
const double A4 = -1.821255978;
const double A5 = 1.330274429;
const double ONEOVER2PI = 0.39894228040143267793994605993438;
double K = 1.0 / (1.0 + 0.2316419 * fabs(d));
double cnd = ONEOVER2PI * exp(- 0.5 * d * d) * (K * (A1 + K * (A2 + K * (A3 + K * (A4 + K * A5)))));
if (d > 0)
return 1.0 - cnd;
else
return cnd;
}
// Black & Scholes price formula for vanilla options
__device__ double device_bsCall ( double s, double t){
double d1 = ( log(s / OPTION.k) + (OPTION.r + 0.5 * OPTION.v * OPTION.v) * t) / (OPTION.v * sqrt(t));
double d2 = d1 - OPTION.v * sqrt(t);
return s * cnd(d1) - OPTION.k * exp(- OPTION.r * t) * cnd(d2);
}
/* * * * * GLOBAL * * * * */
// Basket Option Kernel
__global__ void basketOptMonteCarlo(curandState * randseed, OptionValue *d_CallValue){
// Parameters for shared memory
int sumIndex = threadIdx.x;
int sum2Index = sumIndex + blockDim.x;
/* - SHARED MEMORY - */
extern __shared__ double s_Sum[];
// Global thread index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Copy random number state to local memory
curandState threadState = randseed[tid];
int i;
OptionValue sum = {0, 0};
for( i=sumIndex; i<N_PATH; i+=blockDim.x){
double price=0.0f, bt[N];
// Random Number Generation
brownianVect(bt,&threadState);
// Price simulation with the basket call option payoff function
price=basketPayoff(bt);
// Mean sum
sum.Expected += price;
sum.Confidence += price*price;
}
// Copy to the shared memory
s_Sum[sumIndex] = sum.Expected;
s_Sum[sum2Index] = sum.Confidence;
__syncthreads();
// Reduce shared memory accumulators and write final result to global memory
int halfblock = blockDim.x/2;
// Reduction in log2(threadBlocks) steps, so threadBlock must be power of 2
do{
if ( sumIndex < halfblock ){
s_Sum[sumIndex] += s_Sum[sumIndex+halfblock];
s_Sum[sum2Index] += s_Sum[sum2Index+halfblock];
}
__syncthreads();
halfblock /= 2;
}while ( halfblock != 0 );
// Copy to the global memory
if (sumIndex == 0){
d_CallValue[blockIdx.x].Expected = s_Sum[sumIndex];
d_CallValue[blockIdx.x].Confidence = s_Sum[sum2Index];
}
}
// Vanilla Option call Kernel
__global__ void vanillaOptMonteCarlo(curandState * randseed, OptionValue *d_CallValue){
// Parameters for shared memory
int sumIndex = threadIdx.x;
int sum2Index = sumIndex + blockDim.x;
/* - SHARED MEMORY - */
extern __shared__ double s_Sum[];
// Global thread index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Copy random number state to local memory
curandState threadState = randseed[tid];
OptionValue sum = {0, 0};
int i;
for( i=sumIndex; i<N_PATH; i+=blockDim.x){
double price=0.0f;
// Price simulation with the vanilla call option payoff function
price = callPayoff(&threadState);
sum.Expected += price;
sum.Confidence += price*price;
}
// Copy to the shared memory
s_Sum[sumIndex] = sum.Expected;
s_Sum[sum2Index] = sum.Confidence;
__syncthreads();
// Reduce shared memory accumulators and write final result to global memory
int halfblock = blockDim.x/2;
// Reduction in log2(threadBlocks) steps, so threadBlock must be power of 2
do{
if ( sumIndex < halfblock ){
s_Sum[sumIndex] += s_Sum[sumIndex+halfblock];
s_Sum[sum2Index] += s_Sum[sum2Index+halfblock];
}
__syncthreads();
halfblock /= 2;
}while ( halfblock != 0 );
// Copy to the global memory
if (sumIndex == 0){
d_CallValue[blockIdx.x].Expected = s_Sum[sumIndex];
d_CallValue[blockIdx.x].Confidence = s_Sum[sum2Index];
}
}
__global__ void cvaCallOptMC(curandState * randseed, OptionValue *d_CallValue){
// Parameters for shared memory
int sumIndex = threadIdx.x;
int sum2Index = sumIndex + blockDim.x;
/* - SHARED MEMORY - */
extern __shared__ double s_Sum[];
// Global thread index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Copy random number state to local memory
curandState threadState = randseed[tid];
double dt = OPTION.t / N_GRID;
// Calcolo di un CVA
// Step 1: simulare traiettoria sottostante, ad ogni istante dt calcolare prezzo opzione attualizzato con B&S
// Step 2: calcolo CVA per ogni traiettoria e sommarlo alla variabile mean_price
// Step 3: salvare nella memoria condivisa i CVA calcolati
OptionValue sum = {0, 0};
int i,j;
for( i=sumIndex; i<N_PATH; i+=blockDim.x){
double s, ee, t;
double mean_price = 0;
s = OPTION.s;
t = OPTION.t;
ee = device_bsCall(s,t);
for(j=1; j <= N_GRID; j++){
double dp = exp(-(dt*(j-1)) * INTDEF) - exp(-(dt*j) * INTDEF);
if( (t -= dt)>=0 ){
double z = curand_normal(&threadState);
s = geomBrownian(s, dt, z);
ee = device_bsCall(s,t);
}
else{
ee = 0;
}
mean_price += dp * ee;
}
mean_price *= LGD;
sum.Expected += mean_price;
sum.Confidence += mean_price * mean_price;
}
// Copy to the shared memory
s_Sum[sumIndex] = sum.Expected;
s_Sum[sum2Index] = sum.Confidence;
__syncthreads();
// Reduce shared memory accumulators and write final result to global memory
int halfblock = blockDim.x/2;
// Reduction in log2(threadBlocks) steps, so threadBlock must be power of 2
do{
if ( sumIndex < halfblock ){
s_Sum[sumIndex] += s_Sum[sumIndex+halfblock];
s_Sum[sum2Index] += s_Sum[sum2Index+halfblock];
}
__syncthreads();
halfblock /= 2;
}while ( halfblock != 0 );
// Copy to the global memory
if (sumIndex == 0){
d_CallValue[blockIdx.x].Expected = s_Sum[sumIndex];
d_CallValue[blockIdx.x].Confidence = s_Sum[sum2Index];
}
}
__global__ void randomSetup( curandState *randSeed ){
// Global thread index
int tid = threadIdx.x + blockIdx.x * blockDim.x;
// Each thread block gets different seed, threads within a thread block get different sequence numbers
curand_init(blockIdx.x + gridDim.x, threadIdx.x, 0, &randSeed[tid]);
}
////////////////////////////////////////////////////////////////
//////////////// HOST FUNCTIONS ////////////////////////////
////////////////////////////////////////////////////////////////
void MonteCarlo_init(dev_MonteCarloData *data){
cudaEvent_t start, stop;
CudaCheck( cudaEventCreate( &start ));
CudaCheck( cudaEventCreate( &stop ));
float time;
/*--------------- CONSTANT MEMORY ----------------*/
if( data->numOpt > 1){
int n_option = data->numOpt;
CudaCheck(cudaMemcpyToSymbol(N_OPTION,&n_option,sizeof(int)));
}
int n_path = data->path;
printf("Numero di simulazioni per blocco: \t %d\n",n_path);
printf("Numero di simulazioni per processo: \t %d\n",n_path/data->numThreads);
CudaCheck(cudaMemcpyToSymbol(N_PATH,&n_path,sizeof(int)));
// RANDOM NUMBER GENERATION KERNEL
//Allocate states for pseudo random number generators
CudaCheck(cudaMalloc((void **) &data->RNG, data->numBlocks * data->numThreads * sizeof(curandState)));
//Setup for the random number sequence
CudaCheck( cudaEventRecord( start, 0 ));
randomSetup<<<data->numBlocks, data->numThreads>>>(data->RNG);
cuda_error_check("\Errore nel lancio randomSetup: ","\n");
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "RNG done in ms \t %f\n", time);
// Host Memory Allocation
CudaCheck( cudaEventRecord( start, 0 ));
CudaCheck(cudaMallocHost(&data->h_CallValue, sizeof(OptionValue)*data->numBlocks));
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "Host memory allocation done in ms \t %f\n", time);
// Device Memory Allocation
CudaCheck( cudaEventRecord( start, 0 ));
CudaCheck(cudaMalloc(&data->d_CallValue, sizeof(OptionValue)*data->numBlocks));
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "Device memory allocation done in ms \t %f\n", time);
CudaCheck( cudaEventDestroy( start ));
CudaCheck( cudaEventDestroy( stop ));
}
void MonteCarlo_closing(dev_MonteCarloData *data){
cudaEvent_t start, stop;
CudaCheck( cudaEventCreate( &start ));
CudaCheck( cudaEventCreate( &stop ));
float time;
CudaCheck( cudaEventRecord( start, 0 ));
//Free memory space
CudaCheck(cudaFree(data->RNG));
CudaCheck(cudaFreeHost(data->h_CallValue));
CudaCheck(cudaFree(data->d_CallValue));
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "Free memory done in ms \t %f\n", time);
CudaCheck( cudaEventDestroy( start ));
CudaCheck( cudaEventDestroy( stop ));
}
void MonteCarlo(dev_MonteCarloData *data){
cudaEvent_t start, stop;
CudaCheck( cudaEventCreate( &start ));
CudaCheck( cudaEventCreate( &stop ));
float time, r,t;
/*----------------- SHARED MEMORY -------------------*/
int i, numShared = sizeof(double) * data->numThreads * 2;
/*--------------- CONSTANT MEMORY ----------------*/
if( data->numOpt == 1){
r = data->sopt.r;
t = data->sopt.t;
CudaCheck(cudaMemcpyToSymbol(OPTION,&data->sopt,sizeof(OptionData)));
// Time
CudaCheck( cudaEventRecord( start, 0 ));
vanillaOptMonteCarlo<<<data->numBlocks, data->numThreads, numShared>>>(data->RNG,(OptionValue *)(data->d_CallValue));
cuda_error_check("\Errore nel lancio vanillaOptMonteCarlo: ","\n");
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "Kernel done in ms \t %f\n", time);
}
else{
r = data->mopt.r;
t = data->mopt.t;
CudaCheck(cudaMemcpyToSymbol(MOPTION,&data->mopt,sizeof(MultiOptionData)));
// Time
CudaCheck( cudaEventRecord( start, 0 ));
basketOptMonteCarlo<<<data->numBlocks, data->numThreads, numShared>>>(data->RNG,(OptionValue *)(data->d_CallValue));
cuda_error_check("\Errore nel lancio basketOptMonteCarlo: ","\n");
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "Kernel done in ms \t %f\n", time);
}
//MEMORY CPY: prices per block
// Time
CudaCheck( cudaEventRecord( start, 0 ));
CudaCheck(cudaMemcpy(data->h_CallValue, data->d_CallValue, data->numBlocks * sizeof(OptionValue), cudaMemcpyDeviceToHost));
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "Copy from device-to-host done in ms \t %f\n", time);
// Closing Monte Carlo
double sum=0, sum2=0, price, empstd;
long int nSim = data->numBlocks * data->path;
// Time
CudaCheck( cudaEventRecord( start, 0 ));
for ( i = 0; i < data->numBlocks; i++ ){
sum += data->h_CallValue[i].Expected;
sum2 += data->h_CallValue[i].Confidence;
}
price = exp(-r*t) * (sum/(double)nSim);
empstd = sqrt((double)((double)nSim * sum2 - sum * sum)/((double)nSim * (double)(nSim - 1)));
data->callValue.Confidence = 1.96 * empstd / (double)sqrt((double)nSim);
data->callValue.Expected = price;
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "Call price done in ms \t %f\n", time);
CudaCheck( cudaEventDestroy( start ));
CudaCheck( cudaEventDestroy( stop ));
}
void cvaMonteCarlo(dev_MonteCarloData *data, double intdef, double lgd, int n_grid){
cudaEvent_t start, stop;
CudaCheck( cudaEventCreate( &start ));
CudaCheck( cudaEventCreate( &stop ));
float time;
/*----------------- SHARED MEMORY -------------------*/
int i, numShared = sizeof(double) * data->numThreads * 2;
/*--------------- CONSTANT MEMORY ----------------*/
CudaCheck(cudaMemcpyToSymbol(INTDEF, &intdef, sizeof(double)));
CudaCheck(cudaMemcpyToSymbol(LGD, &lgd, sizeof(double)));
CudaCheck(cudaMemcpyToSymbol(N_GRID, &n_grid, sizeof(int)));
CudaCheck(cudaMemcpyToSymbol(OPTION, &data->sopt, sizeof(OptionData)));
//Time
CudaCheck( cudaEventRecord( start, 0 ));
cvaCallOptMC<<<data->numBlocks, data->numThreads, numShared>>>(data->RNG,(OptionValue *)(data->d_CallValue));
cuda_error_check("\Errore nel lancio cvaCallOptMC: ","\n");
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "Kernel done in ms \t %f\n", time);
//MEMORY CPY: prices per block
CudaCheck(cudaMemcpy(data->h_CallValue, data->d_CallValue, data->numBlocks * sizeof(OptionValue), cudaMemcpyDeviceToHost));
// Closing Monte Carlo
double sum=0, sum2=0, price, empstd;
long int nSim = data->numBlocks * data->path;
CudaCheck( cudaEventRecord( start, 0 ));
for ( i = 0; i < data->numBlocks; i++ ){
sum += data->h_CallValue[i].Expected;
sum2 += data->h_CallValue[i].Confidence;
}
price = sum/(double)nSim;
empstd = sqrt((double)((double)nSim * sum2 - sum * sum)/((double)nSim * (double)(nSim - 1)));
data->callValue.Confidence = 1.96 * empstd / (double)sqrt((double)nSim);
data->callValue.Expected = price;
CudaCheck( cudaEventRecord( stop, 0));
CudaCheck( cudaEventSynchronize( stop ));
CudaCheck( cudaEventElapsedTime( &time, start, stop ));
printf( "CVA price done in ms \t %f\n", time);
CudaCheck( cudaEventDestroy( start ));
CudaCheck( cudaEventDestroy( stop ));
}
////////////////////////////////////////////////
//////////////// WRAPPERS ////////////////
////////////////////////////////////////////////
extern "C" OptionValue dev_basketOpt(MultiOptionData *option, int numBlocks, int numThreads, int sims){
dev_MonteCarloData data;
// Option
data.mopt = *option;
// Kernel parameters
data.numBlocks = numBlocks;
data.numThreads = numThreads;
data.numOpt = N;
data.path = sims / numBlocks;
// Core
MonteCarlo_init(&data);
MonteCarlo(&data);
MonteCarlo_closing(&data);
return data.callValue;
}
extern "C" OptionValue dev_vanillaOpt(OptionData *opt, int numBlocks, int numThreads, int sims){
dev_MonteCarloData data;
// Option
data.sopt = *opt;
// Kernel parameters
data.numBlocks = numBlocks;
data.numThreads = numThreads;
data.numOpt = 1;
data.path = sims / numBlocks;
// Core
MonteCarlo_init(&data);
MonteCarlo(&data);
MonteCarlo_closing(&data);
return data.callValue;
}
extern "C" OptionValue dev_cvaEquityOption(CVA *cva, int numBlocks, int numThreads, int sims){
dev_MonteCarloData data;
data.sopt = cva->option;
// Kernel parameters
data.numBlocks = numBlocks;
data.numThreads = numThreads;
data.numOpt = 1;
data.path = sims / numBlocks;
// Core
MonteCarlo_init(&data);
cvaMonteCarlo(&data, (double)cva->defInt, (double)cva->lgd, cva->n);
// Closing
MonteCarlo_closing(&data);
return data.callValue;
}
|
800f9d21366eef1af79bd67c4d2937dd0cb139d2.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <chrono>
#include <algorithm>
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
// -------------------------------------------------
// Forward
// -------------------------------------------------
__global__ void kernal_fp32_StochasticLut6_Forward(
float const *x_buf,
float *y_buf,
int const *input_index,
float const *W_buf,
int frame_size,
int frame_stride,
int binary_mode
)
{
int node = blockIdx.x;
int id = threadIdx.x;
int id_step = blockDim.x;
// read W
__shared__ float W[64];
for ( int i = id; i < 64; i += id_step ) {
W[i] = W_buf[node * 64 + i];
if ( binary_mode ) {
W[i] = W[i] > 0.5 ? 1.0 : 0.0;
}
}
// read input index
__shared__ float const *x_ptr[6];
for ( int i = id; i < 6; i += id_step ) {
x_ptr[i] = &x_buf[frame_stride * input_index[6*node + i]];
}
float *y_ptr = &y_buf[node * frame_stride];
__syncthreads();
for (int frame = id; frame < frame_size; frame += id_step) {
float xp[6], xn[6];
for ( int i = 0; i < 6; ++i) {
xp[i] = x_ptr[i][frame];
xn[i] = 1.0 - xp[i];
}
float x0_00 = xn[1] * xn[0];
float x0_01 = xn[1] * xp[0];
float x0_10 = xp[1] * xn[0];
float x0_11 = xp[1] * xp[0];
float x1_00 = xn[3] * xn[2];
float x1_01 = xn[3] * xp[2];
float x1_10 = xp[3] * xn[2];
float x1_11 = xp[3] * xp[2];
float x2_00 = xn[5] * xn[4];
float x2_01 = xn[5] * xp[4];
float x2_10 = xp[5] * xn[4];
float x2_11 = xp[5] * xp[4];
float y = 0;
y += W[0 ] * x2_00 * x1_00 * x0_00;
y += W[1 ] * x2_00 * x1_00 * x0_01;
y += W[2 ] * x2_00 * x1_00 * x0_10;
y += W[3 ] * x2_00 * x1_00 * x0_11;
y += W[4 ] * x2_00 * x1_01 * x0_00;
y += W[5 ] * x2_00 * x1_01 * x0_01;
y += W[6 ] * x2_00 * x1_01 * x0_10;
y += W[7 ] * x2_00 * x1_01 * x0_11;
y += W[8 ] * x2_00 * x1_10 * x0_00;
y += W[9 ] * x2_00 * x1_10 * x0_01;
y += W[10] * x2_00 * x1_10 * x0_10;
y += W[11] * x2_00 * x1_10 * x0_11;
y += W[12] * x2_00 * x1_11 * x0_00;
y += W[13] * x2_00 * x1_11 * x0_01;
y += W[14] * x2_00 * x1_11 * x0_10;
y += W[15] * x2_00 * x1_11 * x0_11;
y += W[16] * x2_01 * x1_00 * x0_00;
y += W[17] * x2_01 * x1_00 * x0_01;
y += W[18] * x2_01 * x1_00 * x0_10;
y += W[19] * x2_01 * x1_00 * x0_11;
y += W[20] * x2_01 * x1_01 * x0_00;
y += W[21] * x2_01 * x1_01 * x0_01;
y += W[22] * x2_01 * x1_01 * x0_10;
y += W[23] * x2_01 * x1_01 * x0_11;
y += W[24] * x2_01 * x1_10 * x0_00;
y += W[25] * x2_01 * x1_10 * x0_01;
y += W[26] * x2_01 * x1_10 * x0_10;
y += W[27] * x2_01 * x1_10 * x0_11;
y += W[28] * x2_01 * x1_11 * x0_00;
y += W[29] * x2_01 * x1_11 * x0_01;
y += W[30] * x2_01 * x1_11 * x0_10;
y += W[31] * x2_01 * x1_11 * x0_11;
y += W[32] * x2_10 * x1_00 * x0_00;
y += W[33] * x2_10 * x1_00 * x0_01;
y += W[34] * x2_10 * x1_00 * x0_10;
y += W[35] * x2_10 * x1_00 * x0_11;
y += W[36] * x2_10 * x1_01 * x0_00;
y += W[37] * x2_10 * x1_01 * x0_01;
y += W[38] * x2_10 * x1_01 * x0_10;
y += W[39] * x2_10 * x1_01 * x0_11;
y += W[40] * x2_10 * x1_10 * x0_00;
y += W[41] * x2_10 * x1_10 * x0_01;
y += W[42] * x2_10 * x1_10 * x0_10;
y += W[43] * x2_10 * x1_10 * x0_11;
y += W[44] * x2_10 * x1_11 * x0_00;
y += W[45] * x2_10 * x1_11 * x0_01;
y += W[46] * x2_10 * x1_11 * x0_10;
y += W[47] * x2_10 * x1_11 * x0_11;
y += W[48] * x2_11 * x1_00 * x0_00;
y += W[49] * x2_11 * x1_00 * x0_01;
y += W[50] * x2_11 * x1_00 * x0_10;
y += W[51] * x2_11 * x1_00 * x0_11;
y += W[52] * x2_11 * x1_01 * x0_00;
y += W[53] * x2_11 * x1_01 * x0_01;
y += W[54] * x2_11 * x1_01 * x0_10;
y += W[55] * x2_11 * x1_01 * x0_11;
y += W[56] * x2_11 * x1_10 * x0_00;
y += W[57] * x2_11 * x1_10 * x0_01;
y += W[58] * x2_11 * x1_10 * x0_10;
y += W[59] * x2_11 * x1_10 * x0_11;
y += W[60] * x2_11 * x1_11 * x0_00;
y += W[61] * x2_11 * x1_11 * x0_01;
y += W[62] * x2_11 * x1_11 * x0_10;
y += W[63] * x2_11 * x1_11 * x0_11;
// clamp
y = max(0.0, y);
y = min(1.0, y);
y_ptr[frame] = y;
}
}
int bbcu_fp32_StochasticLut6_Forward
(
const float *dev_x_buf,
float *dev_y_buf,
int const *dev_input_index,
float const *dev_W,
int node_size,
int frame_size,
int frame_stride,
int binary_mode,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(512);
dim3 grid(node_size);
while ( frame_size < (int)block.x / 2 ) {
block.x /= 2;
}
hipLaunchKernelGGL(( kernal_fp32_StochasticLut6_Forward), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_y_buf,
dev_input_index,
dev_W,
frame_size,
frame_stride,
binary_mode
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// -------------------------------------------------
// Backward
// -------------------------------------------------
__device__ __forceinline__ float device_fp32_LocalSum(float v, float *buf)
{
buf[threadIdx.x] = v;
__syncthreads();
//
int comb = 1;
while (comb < blockDim.x) {
int next = comb * 2;
int mask = next - 1;
if ((threadIdx.x & mask) == 0) {
buf[threadIdx.x] += buf[threadIdx.x + comb];
}
comb = next;
__syncthreads();
}
float sum = buf[0];
__syncthreads();
return sum;
}
// kernel
template<int THREAD_SIZE=256>
__global__ void kernal_fp32_StochasticLut6_Backward
(
float const *x_buf,
float const *dy_buf,
float *dx_buf,
int const *input_index,
float const *W_buf,
float *dW_buf,
int frame_size,
int frame_stride,
int binary_mode
)
{
__shared__ float buf[THREAD_SIZE];
int node = blockIdx.x;
int id = threadIdx.x;
int id_step = blockDim.x;
// initialize dW
float dW[64];
for ( int i = 0; i < 64; ++i) {
dW[i] = 0;
}
// read W
__shared__ float W[64];
for ( int i = id; i < 64; i += id_step ) {
W[i] = W_buf[node * 64 + i];
if ( binary_mode ) {
W[i] = W[i] > 0.5 ? 1.0 : 0.0;
}
}
// init pointer
__shared__ float const *x_ptr[6];
for ( int i = id; i < 6; i += id_step ) {
int input_node = input_index[6*node + i];
x_ptr[i] = &x_buf[frame_stride * input_node];
}
float const *dy_ptr = &dy_buf[node*frame_stride];
__syncthreads();
for ( int frame = id; frame < frame_size; frame += id_step ) {
float xp[6], xn[6];
for ( int i = 0; i < 6; ++i) {
xp[i] = x_ptr[i][frame];
xn[i] = 1.0 - xp[i];
}
float x0_00 = xn[1] * xn[0];
float x0_01 = xn[1] * xp[0];
float x0_10 = xp[1] * xn[0];
float x0_11 = xp[1] * xp[0];
float x1_00 = xn[3] * xn[2];
float x1_01 = xn[3] * xp[2];
float x1_10 = xp[3] * xn[2];
float x1_11 = xp[3] * xp[2];
float x2_00 = xn[5] * xn[4];
float x2_01 = xn[5] * xp[4];
float x2_10 = xp[5] * xn[4];
float x2_11 = xp[5] * xp[4];
float grad = dy_ptr[frame];
dW[0] += x2_00 * x1_00 * x0_00 * grad;
dW[1] += x2_00 * x1_00 * x0_01 * grad;
dW[2] += x2_00 * x1_00 * x0_10 * grad;
dW[3] += x2_00 * x1_00 * x0_11 * grad;
dW[4] += x2_00 * x1_01 * x0_00 * grad;
dW[5] += x2_00 * x1_01 * x0_01 * grad;
dW[6] += x2_00 * x1_01 * x0_10 * grad;
dW[7] += x2_00 * x1_01 * x0_11 * grad;
dW[8] += x2_00 * x1_10 * x0_00 * grad;
dW[9] += x2_00 * x1_10 * x0_01 * grad;
dW[10] += x2_00 * x1_10 * x0_10 * grad;
dW[11] += x2_00 * x1_10 * x0_11 * grad;
dW[12] += x2_00 * x1_11 * x0_00 * grad;
dW[13] += x2_00 * x1_11 * x0_01 * grad;
dW[14] += x2_00 * x1_11 * x0_10 * grad;
dW[15] += x2_00 * x1_11 * x0_11 * grad;
dW[16] += x2_01 * x1_00 * x0_00 * grad;
dW[17] += x2_01 * x1_00 * x0_01 * grad;
dW[18] += x2_01 * x1_00 * x0_10 * grad;
dW[19] += x2_01 * x1_00 * x0_11 * grad;
dW[20] += x2_01 * x1_01 * x0_00 * grad;
dW[21] += x2_01 * x1_01 * x0_01 * grad;
dW[22] += x2_01 * x1_01 * x0_10 * grad;
dW[23] += x2_01 * x1_01 * x0_11 * grad;
dW[24] += x2_01 * x1_10 * x0_00 * grad;
dW[25] += x2_01 * x1_10 * x0_01 * grad;
dW[26] += x2_01 * x1_10 * x0_10 * grad;
dW[27] += x2_01 * x1_10 * x0_11 * grad;
dW[28] += x2_01 * x1_11 * x0_00 * grad;
dW[29] += x2_01 * x1_11 * x0_01 * grad;
dW[30] += x2_01 * x1_11 * x0_10 * grad;
dW[31] += x2_01 * x1_11 * x0_11 * grad;
dW[32] += x2_10 * x1_00 * x0_00 * grad;
dW[33] += x2_10 * x1_00 * x0_01 * grad;
dW[34] += x2_10 * x1_00 * x0_10 * grad;
dW[35] += x2_10 * x1_00 * x0_11 * grad;
dW[36] += x2_10 * x1_01 * x0_00 * grad;
dW[37] += x2_10 * x1_01 * x0_01 * grad;
dW[38] += x2_10 * x1_01 * x0_10 * grad;
dW[39] += x2_10 * x1_01 * x0_11 * grad;
dW[40] += x2_10 * x1_10 * x0_00 * grad;
dW[41] += x2_10 * x1_10 * x0_01 * grad;
dW[42] += x2_10 * x1_10 * x0_10 * grad;
dW[43] += x2_10 * x1_10 * x0_11 * grad;
dW[44] += x2_10 * x1_11 * x0_00 * grad;
dW[45] += x2_10 * x1_11 * x0_01 * grad;
dW[46] += x2_10 * x1_11 * x0_10 * grad;
dW[47] += x2_10 * x1_11 * x0_11 * grad;
dW[48] += x2_11 * x1_00 * x0_00 * grad;
dW[49] += x2_11 * x1_00 * x0_01 * grad;
dW[50] += x2_11 * x1_00 * x0_10 * grad;
dW[51] += x2_11 * x1_00 * x0_11 * grad;
dW[52] += x2_11 * x1_01 * x0_00 * grad;
dW[53] += x2_11 * x1_01 * x0_01 * grad;
dW[54] += x2_11 * x1_01 * x0_10 * grad;
dW[55] += x2_11 * x1_01 * x0_11 * grad;
dW[56] += x2_11 * x1_10 * x0_00 * grad;
dW[57] += x2_11 * x1_10 * x0_01 * grad;
dW[58] += x2_11 * x1_10 * x0_10 * grad;
dW[59] += x2_11 * x1_10 * x0_11 * grad;
dW[60] += x2_11 * x1_11 * x0_00 * grad;
dW[61] += x2_11 * x1_11 * x0_01 * grad;
dW[62] += x2_11 * x1_11 * x0_10 * grad;
dW[63] += x2_11 * x1_11 * x0_11 * grad;
float dxi;
float dx0_00 = 0;
float dx0_01 = 0;
float dx0_10 = 0;
float dx0_11 = 0;
float dx1_00 = 0;
float dx1_01 = 0;
float dx1_10 = 0;
float dx1_11 = 0;
float dx2_00 = 0;
float dx2_01 = 0;
float dx2_10 = 0;
float dx2_11 = 0;
dxi = W[ 0] * grad; dx0_00 += dxi * x2_00 * x1_00; dx1_00 += dxi * x2_00 * x0_00; dx2_00 += dxi * x1_00 * x0_00;
dxi = W[ 1] * grad; dx0_01 += dxi * x2_00 * x1_00; dx1_00 += dxi * x2_00 * x0_01; dx2_00 += dxi * x1_00 * x0_01;
dxi = W[ 2] * grad; dx0_10 += dxi * x2_00 * x1_00; dx1_00 += dxi * x2_00 * x0_10; dx2_00 += dxi * x1_00 * x0_10;
dxi = W[ 3] * grad; dx0_11 += dxi * x2_00 * x1_00; dx1_00 += dxi * x2_00 * x0_11; dx2_00 += dxi * x1_00 * x0_11;
dxi = W[ 4] * grad; dx0_00 += dxi * x2_00 * x1_01; dx1_01 += dxi * x2_00 * x0_00; dx2_00 += dxi * x1_01 * x0_00;
dxi = W[ 5] * grad; dx0_01 += dxi * x2_00 * x1_01; dx1_01 += dxi * x2_00 * x0_01; dx2_00 += dxi * x1_01 * x0_01;
dxi = W[ 6] * grad; dx0_10 += dxi * x2_00 * x1_01; dx1_01 += dxi * x2_00 * x0_10; dx2_00 += dxi * x1_01 * x0_10;
dxi = W[ 7] * grad; dx0_11 += dxi * x2_00 * x1_01; dx1_01 += dxi * x2_00 * x0_11; dx2_00 += dxi * x1_01 * x0_11;
dxi = W[ 8] * grad; dx0_00 += dxi * x2_00 * x1_10; dx1_10 += dxi * x2_00 * x0_00; dx2_00 += dxi * x1_10 * x0_00;
dxi = W[ 9] * grad; dx0_01 += dxi * x2_00 * x1_10; dx1_10 += dxi * x2_00 * x0_01; dx2_00 += dxi * x1_10 * x0_01;
dxi = W[10] * grad; dx0_10 += dxi * x2_00 * x1_10; dx1_10 += dxi * x2_00 * x0_10; dx2_00 += dxi * x1_10 * x0_10;
dxi = W[11] * grad; dx0_11 += dxi * x2_00 * x1_10; dx1_10 += dxi * x2_00 * x0_11; dx2_00 += dxi * x1_10 * x0_11;
dxi = W[12] * grad; dx0_00 += dxi * x2_00 * x1_11; dx1_11 += dxi * x2_00 * x0_00; dx2_00 += dxi * x1_11 * x0_00;
dxi = W[13] * grad; dx0_01 += dxi * x2_00 * x1_11; dx1_11 += dxi * x2_00 * x0_01; dx2_00 += dxi * x1_11 * x0_01;
dxi = W[14] * grad; dx0_10 += dxi * x2_00 * x1_11; dx1_11 += dxi * x2_00 * x0_10; dx2_00 += dxi * x1_11 * x0_10;
dxi = W[15] * grad; dx0_11 += dxi * x2_00 * x1_11; dx1_11 += dxi * x2_00 * x0_11; dx2_00 += dxi * x1_11 * x0_11;
dxi = W[16] * grad; dx0_00 += dxi * x2_01 * x1_00; dx1_00 += dxi * x2_01 * x0_00; dx2_01 += dxi * x1_00 * x0_00;
dxi = W[17] * grad; dx0_01 += dxi * x2_01 * x1_00; dx1_00 += dxi * x2_01 * x0_01; dx2_01 += dxi * x1_00 * x0_01;
dxi = W[18] * grad; dx0_10 += dxi * x2_01 * x1_00; dx1_00 += dxi * x2_01 * x0_10; dx2_01 += dxi * x1_00 * x0_10;
dxi = W[19] * grad; dx0_11 += dxi * x2_01 * x1_00; dx1_00 += dxi * x2_01 * x0_11; dx2_01 += dxi * x1_00 * x0_11;
dxi = W[20] * grad; dx0_00 += dxi * x2_01 * x1_01; dx1_01 += dxi * x2_01 * x0_00; dx2_01 += dxi * x1_01 * x0_00;
dxi = W[21] * grad; dx0_01 += dxi * x2_01 * x1_01; dx1_01 += dxi * x2_01 * x0_01; dx2_01 += dxi * x1_01 * x0_01;
dxi = W[22] * grad; dx0_10 += dxi * x2_01 * x1_01; dx1_01 += dxi * x2_01 * x0_10; dx2_01 += dxi * x1_01 * x0_10;
dxi = W[23] * grad; dx0_11 += dxi * x2_01 * x1_01; dx1_01 += dxi * x2_01 * x0_11; dx2_01 += dxi * x1_01 * x0_11;
dxi = W[24] * grad; dx0_00 += dxi * x2_01 * x1_10; dx1_10 += dxi * x2_01 * x0_00; dx2_01 += dxi * x1_10 * x0_00;
dxi = W[25] * grad; dx0_01 += dxi * x2_01 * x1_10; dx1_10 += dxi * x2_01 * x0_01; dx2_01 += dxi * x1_10 * x0_01;
dxi = W[26] * grad; dx0_10 += dxi * x2_01 * x1_10; dx1_10 += dxi * x2_01 * x0_10; dx2_01 += dxi * x1_10 * x0_10;
dxi = W[27] * grad; dx0_11 += dxi * x2_01 * x1_10; dx1_10 += dxi * x2_01 * x0_11; dx2_01 += dxi * x1_10 * x0_11;
dxi = W[28] * grad; dx0_00 += dxi * x2_01 * x1_11; dx1_11 += dxi * x2_01 * x0_00; dx2_01 += dxi * x1_11 * x0_00;
dxi = W[29] * grad; dx0_01 += dxi * x2_01 * x1_11; dx1_11 += dxi * x2_01 * x0_01; dx2_01 += dxi * x1_11 * x0_01;
dxi = W[30] * grad; dx0_10 += dxi * x2_01 * x1_11; dx1_11 += dxi * x2_01 * x0_10; dx2_01 += dxi * x1_11 * x0_10;
dxi = W[31] * grad; dx0_11 += dxi * x2_01 * x1_11; dx1_11 += dxi * x2_01 * x0_11; dx2_01 += dxi * x1_11 * x0_11;
dxi = W[32] * grad; dx0_00 += dxi * x2_10 * x1_00; dx1_00 += dxi * x2_10 * x0_00; dx2_10 += dxi * x1_00 * x0_00;
dxi = W[33] * grad; dx0_01 += dxi * x2_10 * x1_00; dx1_00 += dxi * x2_10 * x0_01; dx2_10 += dxi * x1_00 * x0_01;
dxi = W[34] * grad; dx0_10 += dxi * x2_10 * x1_00; dx1_00 += dxi * x2_10 * x0_10; dx2_10 += dxi * x1_00 * x0_10;
dxi = W[35] * grad; dx0_11 += dxi * x2_10 * x1_00; dx1_00 += dxi * x2_10 * x0_11; dx2_10 += dxi * x1_00 * x0_11;
dxi = W[36] * grad; dx0_00 += dxi * x2_10 * x1_01; dx1_01 += dxi * x2_10 * x0_00; dx2_10 += dxi * x1_01 * x0_00;
dxi = W[37] * grad; dx0_01 += dxi * x2_10 * x1_01; dx1_01 += dxi * x2_10 * x0_01; dx2_10 += dxi * x1_01 * x0_01;
dxi = W[38] * grad; dx0_10 += dxi * x2_10 * x1_01; dx1_01 += dxi * x2_10 * x0_10; dx2_10 += dxi * x1_01 * x0_10;
dxi = W[39] * grad; dx0_11 += dxi * x2_10 * x1_01; dx1_01 += dxi * x2_10 * x0_11; dx2_10 += dxi * x1_01 * x0_11;
dxi = W[40] * grad; dx0_00 += dxi * x2_10 * x1_10; dx1_10 += dxi * x2_10 * x0_00; dx2_10 += dxi * x1_10 * x0_00;
dxi = W[41] * grad; dx0_01 += dxi * x2_10 * x1_10; dx1_10 += dxi * x2_10 * x0_01; dx2_10 += dxi * x1_10 * x0_01;
dxi = W[42] * grad; dx0_10 += dxi * x2_10 * x1_10; dx1_10 += dxi * x2_10 * x0_10; dx2_10 += dxi * x1_10 * x0_10;
dxi = W[43] * grad; dx0_11 += dxi * x2_10 * x1_10; dx1_10 += dxi * x2_10 * x0_11; dx2_10 += dxi * x1_10 * x0_11;
dxi = W[44] * grad; dx0_00 += dxi * x2_10 * x1_11; dx1_11 += dxi * x2_10 * x0_00; dx2_10 += dxi * x1_11 * x0_00;
dxi = W[45] * grad; dx0_01 += dxi * x2_10 * x1_11; dx1_11 += dxi * x2_10 * x0_01; dx2_10 += dxi * x1_11 * x0_01;
dxi = W[46] * grad; dx0_10 += dxi * x2_10 * x1_11; dx1_11 += dxi * x2_10 * x0_10; dx2_10 += dxi * x1_11 * x0_10;
dxi = W[47] * grad; dx0_11 += dxi * x2_10 * x1_11; dx1_11 += dxi * x2_10 * x0_11; dx2_10 += dxi * x1_11 * x0_11;
dxi = W[48] * grad; dx0_00 += dxi * x2_11 * x1_00; dx1_00 += dxi * x2_11 * x0_00; dx2_11 += dxi * x1_00 * x0_00;
dxi = W[49] * grad; dx0_01 += dxi * x2_11 * x1_00; dx1_00 += dxi * x2_11 * x0_01; dx2_11 += dxi * x1_00 * x0_01;
dxi = W[50] * grad; dx0_10 += dxi * x2_11 * x1_00; dx1_00 += dxi * x2_11 * x0_10; dx2_11 += dxi * x1_00 * x0_10;
dxi = W[51] * grad; dx0_11 += dxi * x2_11 * x1_00; dx1_00 += dxi * x2_11 * x0_11; dx2_11 += dxi * x1_00 * x0_11;
dxi = W[52] * grad; dx0_00 += dxi * x2_11 * x1_01; dx1_01 += dxi * x2_11 * x0_00; dx2_11 += dxi * x1_01 * x0_00;
dxi = W[53] * grad; dx0_01 += dxi * x2_11 * x1_01; dx1_01 += dxi * x2_11 * x0_01; dx2_11 += dxi * x1_01 * x0_01;
dxi = W[54] * grad; dx0_10 += dxi * x2_11 * x1_01; dx1_01 += dxi * x2_11 * x0_10; dx2_11 += dxi * x1_01 * x0_10;
dxi = W[55] * grad; dx0_11 += dxi * x2_11 * x1_01; dx1_01 += dxi * x2_11 * x0_11; dx2_11 += dxi * x1_01 * x0_11;
dxi = W[56] * grad; dx0_00 += dxi * x2_11 * x1_10; dx1_10 += dxi * x2_11 * x0_00; dx2_11 += dxi * x1_10 * x0_00;
dxi = W[57] * grad; dx0_01 += dxi * x2_11 * x1_10; dx1_10 += dxi * x2_11 * x0_01; dx2_11 += dxi * x1_10 * x0_01;
dxi = W[58] * grad; dx0_10 += dxi * x2_11 * x1_10; dx1_10 += dxi * x2_11 * x0_10; dx2_11 += dxi * x1_10 * x0_10;
dxi = W[59] * grad; dx0_11 += dxi * x2_11 * x1_10; dx1_10 += dxi * x2_11 * x0_11; dx2_11 += dxi * x1_10 * x0_11;
dxi = W[60] * grad; dx0_00 += dxi * x2_11 * x1_11; dx1_11 += dxi * x2_11 * x0_00; dx2_11 += dxi * x1_11 * x0_00;
dxi = W[61] * grad; dx0_01 += dxi * x2_11 * x1_11; dx1_11 += dxi * x2_11 * x0_01; dx2_11 += dxi * x1_11 * x0_01;
dxi = W[62] * grad; dx0_10 += dxi * x2_11 * x1_11; dx1_11 += dxi * x2_11 * x0_10; dx2_11 += dxi * x1_11 * x0_10;
dxi = W[63] * grad; dx0_11 += dxi * x2_11 * x1_11; dx1_11 += dxi * x2_11 * x0_11; dx2_11 += dxi * x1_11 * x0_11;
float *dx_ptr = &dx_buf[(node*6)*frame_stride + frame];
float dxn;
float dxp;
dxn = dx0_00 * xn[1]; dxn += dx0_10 * xp[1];
dxp = dx0_01 * xn[1]; dxp += dx0_11 * xp[1];
dx_ptr[0 * frame_stride] = (dxp - dxn);
dxn = dx0_00 * xn[0];
dxn += dx0_01 * xp[0];
dxp = dx0_10 * xn[0];
dxp += dx0_11 * xp[0];
dx_ptr[1 * frame_stride] = (dxp - dxn);
dxn = dx1_00 * xn[3];
dxp = dx1_01 * xn[3];
dxn += dx1_10 * xp[3];
dxp += dx1_11 * xp[3];
dx_ptr[2 * frame_stride] = (dxp - dxn);
dxn = dx1_00 * xn[2];
dxn += dx1_01 * xp[2];
dxp = dx1_10 * xn[2];
dxp += dx1_11 * xp[2];
dx_ptr[3 * frame_stride] = (dxp - dxn);
dxn = dx2_00 * xn[5];
dxp = dx2_01 * xn[5];
dxn += dx2_10 * xp[5];
dxp += dx2_11 * xp[5];
dx_ptr[4 * frame_stride] = (dxp - dxn);
dxn = dx2_00 * xn[4];
dxn += dx2_01 * xp[4];
dxp = dx2_10 * xn[4];
dxp += dx2_11 * xp[4];
dx_ptr[5 * frame_stride] = (dxp - dxn);
}
for ( int i = 0; i < 64; ++i) {
dW[i] = device_fp32_LocalSum(dW[i], buf);
if ( threadIdx.x == 0 ) {
dW_buf[node*64 + i] = dW[i];
}
}
}
__global__ void kernal_fp32_StochasticLut6_BackwardMarge(
const float* src_buf,
float* dst_buf,
const int* input_index,
int node_size,
int frame_size,
int frame_stride
)
{
int frame = blockDim.x * blockIdx.x + threadIdx.x;
for ( int node = 0; node < node_size; ++node ) {
if ( frame < frame_size ) {
for ( int n = 0; n < 6; ++n ) {
int in_idx = input_index[node*6 + n];
float* dst_buf_ptr = &dst_buf[frame_stride * in_idx];
float prev_data = dst_buf_ptr[frame];
const float* src_buf_ptr = &src_buf[(6 * node + n) * frame_stride];
dst_buf_ptr[frame] = prev_data + src_buf_ptr[frame];
}
}
__syncthreads();
}
}
int bbcu_fp32_StochasticLut6_Backward(
float const *dev_x_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
float *dev_dx_tmp,
int const *dev_input_index,
float const *dev_W,
float *dev_dW,
int input_node_size,
int output_node_size,
int frame_size,
int frame_stride,
int binary_mode,
hipStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
{
int const thread_size = 256;
dim3 block(thread_size);
dim3 grid(output_node_size);
while ( frame_size < (int)block.x / 2 ) {
block.x /= 2;
}
hipLaunchKernelGGL(( kernal_fp32_StochasticLut6_Backward<thread_size>), dim3(grid), dim3(block), 0, streamId,
dev_x_buf,
dev_dy_buf,
dev_dx_tmp,
dev_input_index,
dev_W,
dev_dW,
frame_size,
frame_stride,
binary_mode
);
BB_CUDA_CHECK_LAST_ERROR();
}
{
BB_CUDA_SAFE_CALL(hipMemset(dev_dx_buf, 0, input_node_size * frame_stride * sizeof(float)));
int block_x = frame_size;
while ( block_x > 1024 ) { block_x /= 2; }
dim3 grid((frame_size + block_x - 1) /block_x, 1);
dim3 block(block_x, 1, 1);
hipLaunchKernelGGL(( kernal_fp32_StochasticLut6_BackwardMarge), dim3(grid), dim3(block), 0, 0,
dev_dx_tmp,
dev_dx_buf,
dev_input_index,
output_node_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
return 0;
}
// end of file
|
800f9d21366eef1af79bd67c4d2937dd0cb139d2.cu
|
#include <iostream>
#include <chrono>
#include <algorithm>
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include "bbcu/bbcu.h"
#include "bbcu/bbcu_util.h"
// -------------------------------------------------
// Forward
// -------------------------------------------------
__global__ void kernal_fp32_StochasticLut6_Forward(
float const *x_buf,
float *y_buf,
int const *input_index,
float const *W_buf,
int frame_size,
int frame_stride,
int binary_mode
)
{
int node = blockIdx.x;
int id = threadIdx.x;
int id_step = blockDim.x;
// read W
__shared__ float W[64];
for ( int i = id; i < 64; i += id_step ) {
W[i] = W_buf[node * 64 + i];
if ( binary_mode ) {
W[i] = W[i] > 0.5 ? 1.0 : 0.0;
}
}
// read input index
__shared__ float const *x_ptr[6];
for ( int i = id; i < 6; i += id_step ) {
x_ptr[i] = &x_buf[frame_stride * input_index[6*node + i]];
}
float *y_ptr = &y_buf[node * frame_stride];
__syncthreads();
for (int frame = id; frame < frame_size; frame += id_step) {
float xp[6], xn[6];
for ( int i = 0; i < 6; ++i) {
xp[i] = x_ptr[i][frame];
xn[i] = 1.0 - xp[i];
}
float x0_00 = xn[1] * xn[0];
float x0_01 = xn[1] * xp[0];
float x0_10 = xp[1] * xn[0];
float x0_11 = xp[1] * xp[0];
float x1_00 = xn[3] * xn[2];
float x1_01 = xn[3] * xp[2];
float x1_10 = xp[3] * xn[2];
float x1_11 = xp[3] * xp[2];
float x2_00 = xn[5] * xn[4];
float x2_01 = xn[5] * xp[4];
float x2_10 = xp[5] * xn[4];
float x2_11 = xp[5] * xp[4];
float y = 0;
y += W[0 ] * x2_00 * x1_00 * x0_00;
y += W[1 ] * x2_00 * x1_00 * x0_01;
y += W[2 ] * x2_00 * x1_00 * x0_10;
y += W[3 ] * x2_00 * x1_00 * x0_11;
y += W[4 ] * x2_00 * x1_01 * x0_00;
y += W[5 ] * x2_00 * x1_01 * x0_01;
y += W[6 ] * x2_00 * x1_01 * x0_10;
y += W[7 ] * x2_00 * x1_01 * x0_11;
y += W[8 ] * x2_00 * x1_10 * x0_00;
y += W[9 ] * x2_00 * x1_10 * x0_01;
y += W[10] * x2_00 * x1_10 * x0_10;
y += W[11] * x2_00 * x1_10 * x0_11;
y += W[12] * x2_00 * x1_11 * x0_00;
y += W[13] * x2_00 * x1_11 * x0_01;
y += W[14] * x2_00 * x1_11 * x0_10;
y += W[15] * x2_00 * x1_11 * x0_11;
y += W[16] * x2_01 * x1_00 * x0_00;
y += W[17] * x2_01 * x1_00 * x0_01;
y += W[18] * x2_01 * x1_00 * x0_10;
y += W[19] * x2_01 * x1_00 * x0_11;
y += W[20] * x2_01 * x1_01 * x0_00;
y += W[21] * x2_01 * x1_01 * x0_01;
y += W[22] * x2_01 * x1_01 * x0_10;
y += W[23] * x2_01 * x1_01 * x0_11;
y += W[24] * x2_01 * x1_10 * x0_00;
y += W[25] * x2_01 * x1_10 * x0_01;
y += W[26] * x2_01 * x1_10 * x0_10;
y += W[27] * x2_01 * x1_10 * x0_11;
y += W[28] * x2_01 * x1_11 * x0_00;
y += W[29] * x2_01 * x1_11 * x0_01;
y += W[30] * x2_01 * x1_11 * x0_10;
y += W[31] * x2_01 * x1_11 * x0_11;
y += W[32] * x2_10 * x1_00 * x0_00;
y += W[33] * x2_10 * x1_00 * x0_01;
y += W[34] * x2_10 * x1_00 * x0_10;
y += W[35] * x2_10 * x1_00 * x0_11;
y += W[36] * x2_10 * x1_01 * x0_00;
y += W[37] * x2_10 * x1_01 * x0_01;
y += W[38] * x2_10 * x1_01 * x0_10;
y += W[39] * x2_10 * x1_01 * x0_11;
y += W[40] * x2_10 * x1_10 * x0_00;
y += W[41] * x2_10 * x1_10 * x0_01;
y += W[42] * x2_10 * x1_10 * x0_10;
y += W[43] * x2_10 * x1_10 * x0_11;
y += W[44] * x2_10 * x1_11 * x0_00;
y += W[45] * x2_10 * x1_11 * x0_01;
y += W[46] * x2_10 * x1_11 * x0_10;
y += W[47] * x2_10 * x1_11 * x0_11;
y += W[48] * x2_11 * x1_00 * x0_00;
y += W[49] * x2_11 * x1_00 * x0_01;
y += W[50] * x2_11 * x1_00 * x0_10;
y += W[51] * x2_11 * x1_00 * x0_11;
y += W[52] * x2_11 * x1_01 * x0_00;
y += W[53] * x2_11 * x1_01 * x0_01;
y += W[54] * x2_11 * x1_01 * x0_10;
y += W[55] * x2_11 * x1_01 * x0_11;
y += W[56] * x2_11 * x1_10 * x0_00;
y += W[57] * x2_11 * x1_10 * x0_01;
y += W[58] * x2_11 * x1_10 * x0_10;
y += W[59] * x2_11 * x1_10 * x0_11;
y += W[60] * x2_11 * x1_11 * x0_00;
y += W[61] * x2_11 * x1_11 * x0_01;
y += W[62] * x2_11 * x1_11 * x0_10;
y += W[63] * x2_11 * x1_11 * x0_11;
// clamp
y = max(0.0, y);
y = min(1.0, y);
y_ptr[frame] = y;
}
}
int bbcu_fp32_StochasticLut6_Forward
(
const float *dev_x_buf,
float *dev_y_buf,
int const *dev_input_index,
float const *dev_W,
int node_size,
int frame_size,
int frame_stride,
int binary_mode,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
dim3 block(512);
dim3 grid(node_size);
while ( frame_size < (int)block.x / 2 ) {
block.x /= 2;
}
kernal_fp32_StochasticLut6_Forward<<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_y_buf,
dev_input_index,
dev_W,
frame_size,
frame_stride,
binary_mode
);
BB_CUDA_CHECK_LAST_ERROR();
return 0;
}
// -------------------------------------------------
// Backward
// -------------------------------------------------
__device__ __forceinline__ float device_fp32_LocalSum(float v, float *buf)
{
buf[threadIdx.x] = v;
__syncthreads();
// スレッド間集計
int comb = 1;
while (comb < blockDim.x) {
int next = comb * 2;
int mask = next - 1;
if ((threadIdx.x & mask) == 0) {
buf[threadIdx.x] += buf[threadIdx.x + comb];
}
comb = next;
__syncthreads();
}
float sum = buf[0];
__syncthreads();
return sum;
}
// kernel
template<int THREAD_SIZE=256>
__global__ void kernal_fp32_StochasticLut6_Backward
(
float const *x_buf,
float const *dy_buf,
float *dx_buf,
int const *input_index,
float const *W_buf,
float *dW_buf,
int frame_size,
int frame_stride,
int binary_mode
)
{
__shared__ float buf[THREAD_SIZE];
int node = blockIdx.x;
int id = threadIdx.x;
int id_step = blockDim.x;
// initialize dW
float dW[64];
for ( int i = 0; i < 64; ++i) {
dW[i] = 0;
}
// read W
__shared__ float W[64];
for ( int i = id; i < 64; i += id_step ) {
W[i] = W_buf[node * 64 + i];
if ( binary_mode ) {
W[i] = W[i] > 0.5 ? 1.0 : 0.0;
}
}
// init pointer
__shared__ float const *x_ptr[6];
for ( int i = id; i < 6; i += id_step ) {
int input_node = input_index[6*node + i];
x_ptr[i] = &x_buf[frame_stride * input_node];
}
float const *dy_ptr = &dy_buf[node*frame_stride];
__syncthreads();
for ( int frame = id; frame < frame_size; frame += id_step ) {
float xp[6], xn[6];
for ( int i = 0; i < 6; ++i) {
xp[i] = x_ptr[i][frame];
xn[i] = 1.0 - xp[i];
}
float x0_00 = xn[1] * xn[0];
float x0_01 = xn[1] * xp[0];
float x0_10 = xp[1] * xn[0];
float x0_11 = xp[1] * xp[0];
float x1_00 = xn[3] * xn[2];
float x1_01 = xn[3] * xp[2];
float x1_10 = xp[3] * xn[2];
float x1_11 = xp[3] * xp[2];
float x2_00 = xn[5] * xn[4];
float x2_01 = xn[5] * xp[4];
float x2_10 = xp[5] * xn[4];
float x2_11 = xp[5] * xp[4];
float grad = dy_ptr[frame];
dW[0] += x2_00 * x1_00 * x0_00 * grad;
dW[1] += x2_00 * x1_00 * x0_01 * grad;
dW[2] += x2_00 * x1_00 * x0_10 * grad;
dW[3] += x2_00 * x1_00 * x0_11 * grad;
dW[4] += x2_00 * x1_01 * x0_00 * grad;
dW[5] += x2_00 * x1_01 * x0_01 * grad;
dW[6] += x2_00 * x1_01 * x0_10 * grad;
dW[7] += x2_00 * x1_01 * x0_11 * grad;
dW[8] += x2_00 * x1_10 * x0_00 * grad;
dW[9] += x2_00 * x1_10 * x0_01 * grad;
dW[10] += x2_00 * x1_10 * x0_10 * grad;
dW[11] += x2_00 * x1_10 * x0_11 * grad;
dW[12] += x2_00 * x1_11 * x0_00 * grad;
dW[13] += x2_00 * x1_11 * x0_01 * grad;
dW[14] += x2_00 * x1_11 * x0_10 * grad;
dW[15] += x2_00 * x1_11 * x0_11 * grad;
dW[16] += x2_01 * x1_00 * x0_00 * grad;
dW[17] += x2_01 * x1_00 * x0_01 * grad;
dW[18] += x2_01 * x1_00 * x0_10 * grad;
dW[19] += x2_01 * x1_00 * x0_11 * grad;
dW[20] += x2_01 * x1_01 * x0_00 * grad;
dW[21] += x2_01 * x1_01 * x0_01 * grad;
dW[22] += x2_01 * x1_01 * x0_10 * grad;
dW[23] += x2_01 * x1_01 * x0_11 * grad;
dW[24] += x2_01 * x1_10 * x0_00 * grad;
dW[25] += x2_01 * x1_10 * x0_01 * grad;
dW[26] += x2_01 * x1_10 * x0_10 * grad;
dW[27] += x2_01 * x1_10 * x0_11 * grad;
dW[28] += x2_01 * x1_11 * x0_00 * grad;
dW[29] += x2_01 * x1_11 * x0_01 * grad;
dW[30] += x2_01 * x1_11 * x0_10 * grad;
dW[31] += x2_01 * x1_11 * x0_11 * grad;
dW[32] += x2_10 * x1_00 * x0_00 * grad;
dW[33] += x2_10 * x1_00 * x0_01 * grad;
dW[34] += x2_10 * x1_00 * x0_10 * grad;
dW[35] += x2_10 * x1_00 * x0_11 * grad;
dW[36] += x2_10 * x1_01 * x0_00 * grad;
dW[37] += x2_10 * x1_01 * x0_01 * grad;
dW[38] += x2_10 * x1_01 * x0_10 * grad;
dW[39] += x2_10 * x1_01 * x0_11 * grad;
dW[40] += x2_10 * x1_10 * x0_00 * grad;
dW[41] += x2_10 * x1_10 * x0_01 * grad;
dW[42] += x2_10 * x1_10 * x0_10 * grad;
dW[43] += x2_10 * x1_10 * x0_11 * grad;
dW[44] += x2_10 * x1_11 * x0_00 * grad;
dW[45] += x2_10 * x1_11 * x0_01 * grad;
dW[46] += x2_10 * x1_11 * x0_10 * grad;
dW[47] += x2_10 * x1_11 * x0_11 * grad;
dW[48] += x2_11 * x1_00 * x0_00 * grad;
dW[49] += x2_11 * x1_00 * x0_01 * grad;
dW[50] += x2_11 * x1_00 * x0_10 * grad;
dW[51] += x2_11 * x1_00 * x0_11 * grad;
dW[52] += x2_11 * x1_01 * x0_00 * grad;
dW[53] += x2_11 * x1_01 * x0_01 * grad;
dW[54] += x2_11 * x1_01 * x0_10 * grad;
dW[55] += x2_11 * x1_01 * x0_11 * grad;
dW[56] += x2_11 * x1_10 * x0_00 * grad;
dW[57] += x2_11 * x1_10 * x0_01 * grad;
dW[58] += x2_11 * x1_10 * x0_10 * grad;
dW[59] += x2_11 * x1_10 * x0_11 * grad;
dW[60] += x2_11 * x1_11 * x0_00 * grad;
dW[61] += x2_11 * x1_11 * x0_01 * grad;
dW[62] += x2_11 * x1_11 * x0_10 * grad;
dW[63] += x2_11 * x1_11 * x0_11 * grad;
float dxi;
float dx0_00 = 0;
float dx0_01 = 0;
float dx0_10 = 0;
float dx0_11 = 0;
float dx1_00 = 0;
float dx1_01 = 0;
float dx1_10 = 0;
float dx1_11 = 0;
float dx2_00 = 0;
float dx2_01 = 0;
float dx2_10 = 0;
float dx2_11 = 0;
dxi = W[ 0] * grad; dx0_00 += dxi * x2_00 * x1_00; dx1_00 += dxi * x2_00 * x0_00; dx2_00 += dxi * x1_00 * x0_00;
dxi = W[ 1] * grad; dx0_01 += dxi * x2_00 * x1_00; dx1_00 += dxi * x2_00 * x0_01; dx2_00 += dxi * x1_00 * x0_01;
dxi = W[ 2] * grad; dx0_10 += dxi * x2_00 * x1_00; dx1_00 += dxi * x2_00 * x0_10; dx2_00 += dxi * x1_00 * x0_10;
dxi = W[ 3] * grad; dx0_11 += dxi * x2_00 * x1_00; dx1_00 += dxi * x2_00 * x0_11; dx2_00 += dxi * x1_00 * x0_11;
dxi = W[ 4] * grad; dx0_00 += dxi * x2_00 * x1_01; dx1_01 += dxi * x2_00 * x0_00; dx2_00 += dxi * x1_01 * x0_00;
dxi = W[ 5] * grad; dx0_01 += dxi * x2_00 * x1_01; dx1_01 += dxi * x2_00 * x0_01; dx2_00 += dxi * x1_01 * x0_01;
dxi = W[ 6] * grad; dx0_10 += dxi * x2_00 * x1_01; dx1_01 += dxi * x2_00 * x0_10; dx2_00 += dxi * x1_01 * x0_10;
dxi = W[ 7] * grad; dx0_11 += dxi * x2_00 * x1_01; dx1_01 += dxi * x2_00 * x0_11; dx2_00 += dxi * x1_01 * x0_11;
dxi = W[ 8] * grad; dx0_00 += dxi * x2_00 * x1_10; dx1_10 += dxi * x2_00 * x0_00; dx2_00 += dxi * x1_10 * x0_00;
dxi = W[ 9] * grad; dx0_01 += dxi * x2_00 * x1_10; dx1_10 += dxi * x2_00 * x0_01; dx2_00 += dxi * x1_10 * x0_01;
dxi = W[10] * grad; dx0_10 += dxi * x2_00 * x1_10; dx1_10 += dxi * x2_00 * x0_10; dx2_00 += dxi * x1_10 * x0_10;
dxi = W[11] * grad; dx0_11 += dxi * x2_00 * x1_10; dx1_10 += dxi * x2_00 * x0_11; dx2_00 += dxi * x1_10 * x0_11;
dxi = W[12] * grad; dx0_00 += dxi * x2_00 * x1_11; dx1_11 += dxi * x2_00 * x0_00; dx2_00 += dxi * x1_11 * x0_00;
dxi = W[13] * grad; dx0_01 += dxi * x2_00 * x1_11; dx1_11 += dxi * x2_00 * x0_01; dx2_00 += dxi * x1_11 * x0_01;
dxi = W[14] * grad; dx0_10 += dxi * x2_00 * x1_11; dx1_11 += dxi * x2_00 * x0_10; dx2_00 += dxi * x1_11 * x0_10;
dxi = W[15] * grad; dx0_11 += dxi * x2_00 * x1_11; dx1_11 += dxi * x2_00 * x0_11; dx2_00 += dxi * x1_11 * x0_11;
dxi = W[16] * grad; dx0_00 += dxi * x2_01 * x1_00; dx1_00 += dxi * x2_01 * x0_00; dx2_01 += dxi * x1_00 * x0_00;
dxi = W[17] * grad; dx0_01 += dxi * x2_01 * x1_00; dx1_00 += dxi * x2_01 * x0_01; dx2_01 += dxi * x1_00 * x0_01;
dxi = W[18] * grad; dx0_10 += dxi * x2_01 * x1_00; dx1_00 += dxi * x2_01 * x0_10; dx2_01 += dxi * x1_00 * x0_10;
dxi = W[19] * grad; dx0_11 += dxi * x2_01 * x1_00; dx1_00 += dxi * x2_01 * x0_11; dx2_01 += dxi * x1_00 * x0_11;
dxi = W[20] * grad; dx0_00 += dxi * x2_01 * x1_01; dx1_01 += dxi * x2_01 * x0_00; dx2_01 += dxi * x1_01 * x0_00;
dxi = W[21] * grad; dx0_01 += dxi * x2_01 * x1_01; dx1_01 += dxi * x2_01 * x0_01; dx2_01 += dxi * x1_01 * x0_01;
dxi = W[22] * grad; dx0_10 += dxi * x2_01 * x1_01; dx1_01 += dxi * x2_01 * x0_10; dx2_01 += dxi * x1_01 * x0_10;
dxi = W[23] * grad; dx0_11 += dxi * x2_01 * x1_01; dx1_01 += dxi * x2_01 * x0_11; dx2_01 += dxi * x1_01 * x0_11;
dxi = W[24] * grad; dx0_00 += dxi * x2_01 * x1_10; dx1_10 += dxi * x2_01 * x0_00; dx2_01 += dxi * x1_10 * x0_00;
dxi = W[25] * grad; dx0_01 += dxi * x2_01 * x1_10; dx1_10 += dxi * x2_01 * x0_01; dx2_01 += dxi * x1_10 * x0_01;
dxi = W[26] * grad; dx0_10 += dxi * x2_01 * x1_10; dx1_10 += dxi * x2_01 * x0_10; dx2_01 += dxi * x1_10 * x0_10;
dxi = W[27] * grad; dx0_11 += dxi * x2_01 * x1_10; dx1_10 += dxi * x2_01 * x0_11; dx2_01 += dxi * x1_10 * x0_11;
dxi = W[28] * grad; dx0_00 += dxi * x2_01 * x1_11; dx1_11 += dxi * x2_01 * x0_00; dx2_01 += dxi * x1_11 * x0_00;
dxi = W[29] * grad; dx0_01 += dxi * x2_01 * x1_11; dx1_11 += dxi * x2_01 * x0_01; dx2_01 += dxi * x1_11 * x0_01;
dxi = W[30] * grad; dx0_10 += dxi * x2_01 * x1_11; dx1_11 += dxi * x2_01 * x0_10; dx2_01 += dxi * x1_11 * x0_10;
dxi = W[31] * grad; dx0_11 += dxi * x2_01 * x1_11; dx1_11 += dxi * x2_01 * x0_11; dx2_01 += dxi * x1_11 * x0_11;
dxi = W[32] * grad; dx0_00 += dxi * x2_10 * x1_00; dx1_00 += dxi * x2_10 * x0_00; dx2_10 += dxi * x1_00 * x0_00;
dxi = W[33] * grad; dx0_01 += dxi * x2_10 * x1_00; dx1_00 += dxi * x2_10 * x0_01; dx2_10 += dxi * x1_00 * x0_01;
dxi = W[34] * grad; dx0_10 += dxi * x2_10 * x1_00; dx1_00 += dxi * x2_10 * x0_10; dx2_10 += dxi * x1_00 * x0_10;
dxi = W[35] * grad; dx0_11 += dxi * x2_10 * x1_00; dx1_00 += dxi * x2_10 * x0_11; dx2_10 += dxi * x1_00 * x0_11;
dxi = W[36] * grad; dx0_00 += dxi * x2_10 * x1_01; dx1_01 += dxi * x2_10 * x0_00; dx2_10 += dxi * x1_01 * x0_00;
dxi = W[37] * grad; dx0_01 += dxi * x2_10 * x1_01; dx1_01 += dxi * x2_10 * x0_01; dx2_10 += dxi * x1_01 * x0_01;
dxi = W[38] * grad; dx0_10 += dxi * x2_10 * x1_01; dx1_01 += dxi * x2_10 * x0_10; dx2_10 += dxi * x1_01 * x0_10;
dxi = W[39] * grad; dx0_11 += dxi * x2_10 * x1_01; dx1_01 += dxi * x2_10 * x0_11; dx2_10 += dxi * x1_01 * x0_11;
dxi = W[40] * grad; dx0_00 += dxi * x2_10 * x1_10; dx1_10 += dxi * x2_10 * x0_00; dx2_10 += dxi * x1_10 * x0_00;
dxi = W[41] * grad; dx0_01 += dxi * x2_10 * x1_10; dx1_10 += dxi * x2_10 * x0_01; dx2_10 += dxi * x1_10 * x0_01;
dxi = W[42] * grad; dx0_10 += dxi * x2_10 * x1_10; dx1_10 += dxi * x2_10 * x0_10; dx2_10 += dxi * x1_10 * x0_10;
dxi = W[43] * grad; dx0_11 += dxi * x2_10 * x1_10; dx1_10 += dxi * x2_10 * x0_11; dx2_10 += dxi * x1_10 * x0_11;
dxi = W[44] * grad; dx0_00 += dxi * x2_10 * x1_11; dx1_11 += dxi * x2_10 * x0_00; dx2_10 += dxi * x1_11 * x0_00;
dxi = W[45] * grad; dx0_01 += dxi * x2_10 * x1_11; dx1_11 += dxi * x2_10 * x0_01; dx2_10 += dxi * x1_11 * x0_01;
dxi = W[46] * grad; dx0_10 += dxi * x2_10 * x1_11; dx1_11 += dxi * x2_10 * x0_10; dx2_10 += dxi * x1_11 * x0_10;
dxi = W[47] * grad; dx0_11 += dxi * x2_10 * x1_11; dx1_11 += dxi * x2_10 * x0_11; dx2_10 += dxi * x1_11 * x0_11;
dxi = W[48] * grad; dx0_00 += dxi * x2_11 * x1_00; dx1_00 += dxi * x2_11 * x0_00; dx2_11 += dxi * x1_00 * x0_00;
dxi = W[49] * grad; dx0_01 += dxi * x2_11 * x1_00; dx1_00 += dxi * x2_11 * x0_01; dx2_11 += dxi * x1_00 * x0_01;
dxi = W[50] * grad; dx0_10 += dxi * x2_11 * x1_00; dx1_00 += dxi * x2_11 * x0_10; dx2_11 += dxi * x1_00 * x0_10;
dxi = W[51] * grad; dx0_11 += dxi * x2_11 * x1_00; dx1_00 += dxi * x2_11 * x0_11; dx2_11 += dxi * x1_00 * x0_11;
dxi = W[52] * grad; dx0_00 += dxi * x2_11 * x1_01; dx1_01 += dxi * x2_11 * x0_00; dx2_11 += dxi * x1_01 * x0_00;
dxi = W[53] * grad; dx0_01 += dxi * x2_11 * x1_01; dx1_01 += dxi * x2_11 * x0_01; dx2_11 += dxi * x1_01 * x0_01;
dxi = W[54] * grad; dx0_10 += dxi * x2_11 * x1_01; dx1_01 += dxi * x2_11 * x0_10; dx2_11 += dxi * x1_01 * x0_10;
dxi = W[55] * grad; dx0_11 += dxi * x2_11 * x1_01; dx1_01 += dxi * x2_11 * x0_11; dx2_11 += dxi * x1_01 * x0_11;
dxi = W[56] * grad; dx0_00 += dxi * x2_11 * x1_10; dx1_10 += dxi * x2_11 * x0_00; dx2_11 += dxi * x1_10 * x0_00;
dxi = W[57] * grad; dx0_01 += dxi * x2_11 * x1_10; dx1_10 += dxi * x2_11 * x0_01; dx2_11 += dxi * x1_10 * x0_01;
dxi = W[58] * grad; dx0_10 += dxi * x2_11 * x1_10; dx1_10 += dxi * x2_11 * x0_10; dx2_11 += dxi * x1_10 * x0_10;
dxi = W[59] * grad; dx0_11 += dxi * x2_11 * x1_10; dx1_10 += dxi * x2_11 * x0_11; dx2_11 += dxi * x1_10 * x0_11;
dxi = W[60] * grad; dx0_00 += dxi * x2_11 * x1_11; dx1_11 += dxi * x2_11 * x0_00; dx2_11 += dxi * x1_11 * x0_00;
dxi = W[61] * grad; dx0_01 += dxi * x2_11 * x1_11; dx1_11 += dxi * x2_11 * x0_01; dx2_11 += dxi * x1_11 * x0_01;
dxi = W[62] * grad; dx0_10 += dxi * x2_11 * x1_11; dx1_11 += dxi * x2_11 * x0_10; dx2_11 += dxi * x1_11 * x0_10;
dxi = W[63] * grad; dx0_11 += dxi * x2_11 * x1_11; dx1_11 += dxi * x2_11 * x0_11; dx2_11 += dxi * x1_11 * x0_11;
float *dx_ptr = &dx_buf[(node*6)*frame_stride + frame];
float dxn;
float dxp;
dxn = dx0_00 * xn[1]; dxn += dx0_10 * xp[1];
dxp = dx0_01 * xn[1]; dxp += dx0_11 * xp[1];
dx_ptr[0 * frame_stride] = (dxp - dxn);
dxn = dx0_00 * xn[0];
dxn += dx0_01 * xp[0];
dxp = dx0_10 * xn[0];
dxp += dx0_11 * xp[0];
dx_ptr[1 * frame_stride] = (dxp - dxn);
dxn = dx1_00 * xn[3];
dxp = dx1_01 * xn[3];
dxn += dx1_10 * xp[3];
dxp += dx1_11 * xp[3];
dx_ptr[2 * frame_stride] = (dxp - dxn);
dxn = dx1_00 * xn[2];
dxn += dx1_01 * xp[2];
dxp = dx1_10 * xn[2];
dxp += dx1_11 * xp[2];
dx_ptr[3 * frame_stride] = (dxp - dxn);
dxn = dx2_00 * xn[5];
dxp = dx2_01 * xn[5];
dxn += dx2_10 * xp[5];
dxp += dx2_11 * xp[5];
dx_ptr[4 * frame_stride] = (dxp - dxn);
dxn = dx2_00 * xn[4];
dxn += dx2_01 * xp[4];
dxp = dx2_10 * xn[4];
dxp += dx2_11 * xp[4];
dx_ptr[5 * frame_stride] = (dxp - dxn);
}
for ( int i = 0; i < 64; ++i) {
dW[i] = device_fp32_LocalSum(dW[i], buf);
if ( threadIdx.x == 0 ) {
dW_buf[node*64 + i] = dW[i];
}
}
}
__global__ void kernal_fp32_StochasticLut6_BackwardMarge(
const float* src_buf,
float* dst_buf,
const int* input_index,
int node_size,
int frame_size,
int frame_stride
)
{
int frame = blockDim.x * blockIdx.x + threadIdx.x;
for ( int node = 0; node < node_size; ++node ) {
if ( frame < frame_size ) {
for ( int n = 0; n < 6; ++n ) {
int in_idx = input_index[node*6 + n];
float* dst_buf_ptr = &dst_buf[frame_stride * in_idx];
float prev_data = dst_buf_ptr[frame];
const float* src_buf_ptr = &src_buf[(6 * node + n) * frame_stride];
dst_buf_ptr[frame] = prev_data + src_buf_ptr[frame];
}
}
__syncthreads();
}
}
int bbcu_fp32_StochasticLut6_Backward(
float const *dev_x_buf,
float const *dev_dy_buf,
float *dev_dx_buf,
float *dev_dx_tmp,
int const *dev_input_index,
float const *dev_W,
float *dev_dW,
int input_node_size,
int output_node_size,
int frame_size,
int frame_stride,
int binary_mode,
cudaStream_t streamId
)
{
BBCU_DEBUG_ASSERT(bbcu_IsDeviceAvailable());
{
int const thread_size = 256;
dim3 block(thread_size);
dim3 grid(output_node_size);
while ( frame_size < (int)block.x / 2 ) {
block.x /= 2;
}
kernal_fp32_StochasticLut6_Backward<thread_size><<<grid, block, 0, streamId>>>(
dev_x_buf,
dev_dy_buf,
dev_dx_tmp,
dev_input_index,
dev_W,
dev_dW,
frame_size,
frame_stride,
binary_mode
);
BB_CUDA_CHECK_LAST_ERROR();
}
{
BB_CUDA_SAFE_CALL(cudaMemset(dev_dx_buf, 0, input_node_size * frame_stride * sizeof(float)));
int block_x = frame_size;
while ( block_x > 1024 ) { block_x /= 2; }
dim3 grid((frame_size + block_x - 1) /block_x, 1);
dim3 block(block_x, 1, 1);
kernal_fp32_StochasticLut6_BackwardMarge<<<grid, block>>>(
dev_dx_tmp,
dev_dx_buf,
dev_input_index,
output_node_size,
frame_size,
frame_stride
);
BB_CUDA_CHECK_LAST_ERROR();
}
return 0;
}
// end of file
|
57342520d557c8ab6622978fcaa661e92257c1f7.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: PandaSort.cu
First Version: 2012-07-01 V0.1
Current Version: 2012-09-01 V0.3
Last Updates: 2012-09-02
Developer: Hui Li ([email protected])
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//includes CUDA
#include <hip/hip_runtime.h>
#ifndef _PANDASORT_CU_
#define _PANDASORT_CU_
#include "Panda.h"
#include "compare.cu"
#define NUM_BLOCK_PER_CHUNK_BITONIC_SORT 512//b256
#define SHARED_MEM_INT2 256
#define NUM_BLOCKS_CHUNK 256 //(512)
#define NUM_THREADS_CHUNK 256 //(256)
#define CHUNK_SIZE (NUM_BLOCKS_CHUNK*NUM_THREADS_CHUNK)
#define NUM_CHUNKS_R (NUM_RECORDS_R/CHUNK_SIZE)
__device__ int getCompareValue(void *d_rawData, cmp_type_t value1, cmp_type_t value2)
{
int compareValue=0;
int v1=value1.x;
int v2=value2.x;
if((v1==-1) || (v2==-1))
{
if(v1==v2)
compareValue=0;
else
if(v1==-1)
compareValue=-1;
else
compareValue=1;
}//if
else
compareValue=compare((void*)(((char*)d_rawData)+v1), value1.y, (void*)(((char*)d_rawData)+v2), value2.y);
return compareValue;
}//__device__
void * s_qsRawData=NULL;
__global__ void
partBitonicSortKernel( void* d_rawData, int totalLenInBytes,cmp_type_t* d_R, unsigned int numRecords, int chunkIdx, int unitSize)
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
int unitIdx = ((NUM_BLOCKS_CHUNK*chunkIdx + bx)/unitSize)&1;
shared[tx] = d_R[dataIdx];
__syncthreads();
int ixj=0;
int a=0;
cmp_type_t temp1;
cmp_type_t temp2;
int k = NUM_THREADS_CHUNK;
if(unitIdx == 0)
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
//a = (shared[tx].y - shared[ixj].y);
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
else
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
unitBitonicSortKernel(void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int chunkIdx )
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
int unitIdx = (NUM_BLOCKS_CHUNK*chunkIdx + bx)&1;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
shared[tx] = d_R[dataIdx];
__syncthreads();
cmp_type_t temp1;
cmp_type_t temp2;
int ixj=0;
int a=0;
if(unitIdx == 0)
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
else
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
bitonicKernel( void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int k, int j)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tid = threadIdx.x;
int dataIdx = by*gridDim.x*blockDim.x + bx*blockDim.x + tid;
int ixj = dataIdx^j;
if( ixj > dataIdx )
{
cmp_type_t tmpR = d_R[dataIdx];
cmp_type_t tmpIxj = d_R[ixj];
if( (dataIdx&k) == 0 )
{
//if( tmpR.y > tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
else
{
//if( tmpR.y < tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==-1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==-1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
}
}
__device__ inline void swap(cmp_type_t & a, cmp_type_t & b)
{
// Alternative swap doesn't use a temporary register:
// a ^= b;
// b ^= a;
// a ^= b;
cmp_type_t tmp = a;
a = b;
b = tmp;
}
__global__ void bitonicSortSingleBlock_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t* d_output)
{
__shared__ cmp_type_t bs_cmpbuf[SHARED_MEM_INT2];
//const int by = blockIdx.y;
//const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
//const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(tid<rLen)
{
bs_cmpbuf[tid] = d_values[tid];
}
else
{
bs_cmpbuf[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
/*if(tid<rLen)
{
d_output[tid] = bs_cmpbuf[tid+SHARED_MEM_INT2-rLen];
}*/
int start_row_idCopy=SHARED_MEM_INT2-rLen;
if(tid>=start_row_idCopy)
{
d_output[tid-start_row_idCopy]=bs_cmpbuf[tid];
}
}
__global__ void bitonicSortMultipleBlocks_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int start_row_idBlock, int numBlock, cmp_type_t *d_output)
{
__shared__ int bs_pStart;
__shared__ int bs_pEnd;
__shared__ int bs_numElement;
__shared__ cmp_type_t bs_shared[SHARED_MEM_INT2];
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(bid>=numBlock) return;
if(tid==0)
{
bs_pStart=d_bound[(bid+start_row_idBlock)<<1];
bs_pEnd=d_bound[((bid+start_row_idBlock)<<1)+1];
bs_numElement=bs_pEnd-bs_pStart;
}
__syncthreads();
// Copy input to shared mem.
if(tid<bs_numElement)
{
bs_shared[tid] = d_values[tid+bs_pStart];
}
else
{
bs_shared[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
//if(tid<bs_numElement)
//{
// d_output[tid+bs_pStart] = bs_shared[tid+SHARED_MEM_INT2-bs_numElement];
//}
//int start_row_idCopy=SHARED_MEM_INT2-bs_numElement;
if(tid>=bs_numElement)
{
d_output[tid-bs_numElement]=bs_shared[tid];
}
}
__global__ void initialize_kernel(cmp_type_t* d_data, int start_row_idPos, int rLen, cmp_type_t value)
{
}
void bitonicSortMultipleBlocks(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int numBlock, cmp_type_t * d_output)
{
}
void bitonicSortSingleBlock(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=1;
int numBlock_y=1;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
hipLaunchKernelGGL(( bitonicSortSingleBlock_kernel), dim3(grid),dim3(thread), 0, 0, d_rawData, totalLenInBytes, d_values, rLen, d_output);
hipDeviceSynchronize();
}
void initialize(cmp_type_t *d_data, int rLen, cmp_type_t value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start_row_id=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start_row_id=i*chunkSize;
end=start_row_id+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( initialize_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start_row_id, rLen, value);
}
hipDeviceSynchronize();
}
void bitonicSortGPU(void* d_rawData, int totalLenInBytes, cmp_type_t* d_Rin, int rLen, void *d_Rout)
{
unsigned int numRecordsR;
unsigned int size = rLen;
unsigned int level = 0;
while( size != 1 )
{
size = size/2;
level++;
}
if( (1<<level) < rLen )
{
level++;
}
numRecordsR = (1<<level);
if(rLen<=NUM_THREADS_CHUNK)
{
bitonicSortSingleBlock((void*)d_rawData, totalLenInBytes, d_Rin, rLen, (cmp_type_t*)d_Rout);
}
else
if( rLen <= 256*1024 )
{
//unsigned int numRecordsR = rLen;
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
if(numRecordsR<NUM_THREADS_CHUNK)
numRecordsR=NUM_THREADS_CHUNK;
unsigned int numBlocksXSort = numRecordsR/numThreadsSort;
unsigned int numBlocksYSort = 1;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
//copy the <offset, length> pairs.
cmp_type_t* d_R;
//checkCudaErrors
( hipMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
( hipMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), hipMemcpyDeviceToDevice) );
for( int k = 2; k <= numRecordsR; k *= 2 )
{
for( int j = k/2; j > 0; j /= 2 )
{
hipLaunchKernelGGL(( bitonicKernel), dim3(gridSort), dim3(numThreadsSort), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
}
( hipMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, hipMemcpyDeviceToDevice) );
hipFree( d_R );
hipDeviceSynchronize();
}
else
{
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
unsigned int numBlocksYSort = 1;
unsigned int numBlocksXSort = (numRecordsR/numThreadsSort)/numBlocksYSort;
if(numBlocksXSort>=(1<<16))
{
numBlocksXSort=(1<<15);
numBlocksYSort=(numRecordsR/numThreadsSort)/numBlocksXSort;
}
unsigned int numBlocksChunk = NUM_BLOCKS_CHUNK;
unsigned int numThreadsChunk = NUM_THREADS_CHUNK;
unsigned int chunkSize = numBlocksChunk*numThreadsChunk;
unsigned int numChunksR = numRecordsR/chunkSize;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
cmp_type_t* d_R;
( hipMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
( hipMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), hipMemcpyDeviceToDevice) );
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
hipLaunchKernelGGL(( unitBitonicSortKernel), dim3(numBlocksChunk), dim3(numThreadsChunk), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx );
}
int j;
for( int k = numThreadsChunk*2; k <= numRecordsR; k *= 2 )
{
for( j = k/2; j > numThreadsChunk/2; j /= 2 )
{
hipLaunchKernelGGL(( bitonicKernel), dim3(gridSort), dim3(numThreadsSort), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
hipLaunchKernelGGL(( partBitonicSortKernel), dim3(numBlocksChunk), dim3(numThreadsChunk), 0, 0, (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx, k/numThreadsSort );
}
}
( hipMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, hipMemcpyDeviceToDevice) );
hipFree( d_R );
hipDeviceSynchronize();
}
}
__global__ void getIntYArray_kernel(int2* d_input, int start_row_idPos, int rLen, int* d_output)
{
}
__global__ void getXYArray_kernel(cmp_type_t* d_input, int start_row_idPos, int rLen, int2* d_output)
{
}
__global__ void getZWArray_kernel(cmp_type_t* d_input, int start_row_idPos, int rLen, int2* d_output)
{
}
__global__ void setXYArray_kernel(cmp_type_t* d_input, int start_row_idPos, int rLen, int2* d_value)
{
}
__global__ void setZWArray_kernel(cmp_type_t* d_input, int start_row_idPos, int rLen, int2* d_value)
{
}
void getIntYArray(int2 *d_data, int rLen, int* d_output)
{
}
void getXYArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
}
void getZWArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
hipDeviceSynchronize();
}
void setXYArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
hipDeviceSynchronize();
}
void setZWArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
hipDeviceSynchronize();
}
__global__ void copyChunks_kernel(void *d_source, int start_row_idPos, int2* d_Rin, int rLen, int *d_sum, void *d_dest)
{
}
__global__ void getChunkBoundary_kernel(void* d_rawData, int start_row_idPos, cmp_type_t *d_Rin,
int rLen, int* d_start_row_idArray)
{
}
__global__ void setBoundaryInt2_kernel(int* d_boundary, int start_row_idPos, int numKey, int rLen,
int2* d_boundaryRange)
{
}
__global__ void writeBoundary_kernel(int start_row_idPos, int rLen, int* d_start_row_idArray,
int* d_start_row_idSumArray, int* d_bounary)
{
}
void copyChunks(void *d_source, int2* d_Rin, int rLen, void *d_dest)
{
}
//return the number of chunks.
int getChunkBoundary(void *d_source, cmp_type_t* d_Rin, int rLen, int2 ** h_outputKeyListRange)
{
return 0;
}
__global__ void copyDataFromDevice2Host1(gpu_context d_g_state)
{
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_idx = block_start_row_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE;
//if (TID>=d_g_state.num_input_record)return;
if(thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
int begin=0;
int end=0;
for (int i=0;i<map_task_idx;i++){
begin += d_g_state.d_intermediate_keyval_arr_arr_p[i]->arr_len;
}//for
end = begin + (d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx]->arr_len);
//printf("map_task_idx:%d begin:%d end:%d\n",map_task_idx, begin,end);
for(int i=begin;i<end;i++){
keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]);
keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx]->arr[i-begin]);
memcpy(p1,p2,sizeof(keyval_t));
//printf("copyData1: TID:%d keySize %d valSize:%d p2->key:%s p1->key:%s\n",map_task_idx,p1->keySize,p1->valSize,p2->key,p1->key);
}//for
}//for int map_task_idx;
}
__global__ void copyDataFromDevice2Host3(gpu_context d_g_state)
{
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_idx = block_start_row_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE;
//if (TID>=d_g_state.num_input_record)return;
if(thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
int begin, end, val_pos, key_pos;
char *val_p,*key_p;
for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
begin=0;
end=0;
for (int i=0;i<map_task_idx;i++){
begin += (d_g_state.d_intermediate_keyval_arr_arr_p[i]->arr_len);
}//for
end = begin + (d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx]->arr_len);
//printf("copyData:%d begin:%d, end:%d\n",TID,begin,end);
for(int i=begin;i<end;i++){
//keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]);
val_pos = d_g_state.d_intermediate_keyval_pos_arr[i].valPos;
key_pos = d_g_state.d_intermediate_keyval_pos_arr[i].keyPos;
/*if (key_pos>=d_g_state.totalKeySize){
printf("keyPos2:%d totalKeySize:%d begin:%d end:%d i:%d map_task_idx:%d\n",key_pos,d_g_state.totalKeySize, begin, end, i, map_task_idx);
key_pos = 0;
}
if (val_pos>=d_g_state.totalValSize){
//printf("keyPos:%d totalKeySize:%d begin:%d end:%d i:%d map_task_idx:%d\n",key_pos,d_g_state.totalKeySize, begin, end, i, map_task_idx);
val_pos = 0;
}*/
val_p = (char*)(d_g_state.d_intermediate_vals_shared_buff)+val_pos;
key_p = (char*)(d_g_state.d_intermediate_keys_shared_buff)+key_pos;
keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx]->arr[i-begin]);
memcpy(key_p,p2->key,p2->keySize);
//int totalKeySize;
//int totalValSize;
memcpy(val_p,p2->val,p2->valSize);
//added by Hui
//free(p2->key);
//free(p2->val);
//free(p2);
//printf("copyDataFromDevice2Host2: TID:%d key: %s val:%d\n",TID,p2->key,*(int *)p2->val);
}//for
//free(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr);
//if (index*recordsPerTask >= recordNum) return;
//free(&d_g_state.d_intermediate_keyval_arr_arr[map_task_idx])
}//for
//free(d_g_state.d_intermediate_keyval_pos_arr);
//
}//__global__
#ifdef REMOVE
__global__ void copyDataFromDevice2Host2(gpu_context d_g_state)
{
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_idx = block_start_row_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE;
//if (TID>=d_g_state.num_input_record)return;
if(thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
int begin=0;
int end=0;
for (int i=0;i<map_task_idx;i++){
begin += (d_g_state.d_intermediate_keyval_arr_arr[i].arr_len);
}//for
end = begin + (d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr_len);
//printf("copyData:%d begin:%d, end:%d\n",TID,begin,end);
for(int i=begin;i<end;i++){
keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]);
keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr[i-begin]);
memcpy(p1->key,p2->key,p2->keySize);
memcpy(p1->val,p2->val,p2->valSize);
//printf("copyDataFromDevice2Host2: TID:%d key: %s val:%d\n",TID,p2->key,*(int *)p2->val);
}//for
//if (index*recordsPerTask >= recordNum) return;
}//for
}//__global__
#endif
void StartCPUShuffle2(thread_info_t *thread_info){
cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state);
job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf);
//TODO put all jobs related object to job_conf
bool configured;
int cpu_group_id;
int num_input_record;
int num_cpus;
keyval_t * input_keyval_arr;
keyval_arr_t *intermediate_keyval_arr_arr_p = d_g_state->intermediate_keyval_arr_arr_p;
long total_count = 0;
int index = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
total_count += intermediate_keyval_arr_arr_p[i].arr_len;
}//for
d_g_state->sorted_intermediate_keyvals_arr = NULL;
keyvals_t * sorted_intermediate_keyvals_arr = d_g_state->sorted_intermediate_keyvals_arr;
int sorted_key_arr_len = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
int len = intermediate_keyval_arr_arr_p[i].arr_len;
for (int j=0;j<len;j++){
char *key_i = (char *)(intermediate_keyval_arr_arr_p[i].arr[j].key);
int keySize_i = (intermediate_keyval_arr_arr_p[i].arr[j].keySize);
char *val_i = (char *)(intermediate_keyval_arr_arr_p[i].arr[j].val);
int valSize_i = (intermediate_keyval_arr_arr_p[i].arr[j].valSize);
int k = 0;
for (; k<sorted_key_arr_len; k++){
char *key_k = (char *)(sorted_intermediate_keyvals_arr[k].key);
int keySize_k = sorted_intermediate_keyvals_arr[k].keySize;
if ( cpu_compare(key_i, keySize_i, key_k, keySize_k) != 0 )
continue;
//found the match
val_t *vals = sorted_intermediate_keyvals_arr[k].vals;
sorted_intermediate_keyvals_arr[k].val_arr_len++;
sorted_intermediate_keyvals_arr[k].vals = (val_t*)realloc(vals, sizeof(val_t)*(sorted_intermediate_keyvals_arr[k].val_arr_len));
int index = sorted_intermediate_keyvals_arr[k].val_arr_len - 1;
sorted_intermediate_keyvals_arr[k].vals[index].valSize = valSize_i;
sorted_intermediate_keyvals_arr[k].vals[index].val = (char *)malloc(sizeof(char)*valSize_i);
memcpy(sorted_intermediate_keyvals_arr[k].vals[index].val,val_i,valSize_i);
break;
}//for
if (k == sorted_key_arr_len){
if (sorted_key_arr_len == 0)
sorted_intermediate_keyvals_arr = NULL;
sorted_key_arr_len++;
sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(sorted_intermediate_keyvals_arr, sizeof(keyvals_t)*sorted_key_arr_len);
int index = sorted_key_arr_len-1;
keyvals_t* kvals_p = (keyvals_t *)&(sorted_intermediate_keyvals_arr[index]);
kvals_p->keySize = keySize_i;
kvals_p->key = malloc(sizeof(char)*keySize_i);
memcpy(kvals_p->key, key_i, keySize_i);
kvals_p->vals = (val_t *)malloc(sizeof(val_t));
kvals_p->val_arr_len = 1;
kvals_p->vals[0].valSize = valSize_i;
kvals_p->vals[0].val = (char *)malloc(sizeof(char)*valSize_i);
memcpy(kvals_p->vals[0].val,val_i, valSize_i);
}//if
}//for j;
}//for i;
d_g_state->sorted_intermediate_keyvals_arr = sorted_intermediate_keyvals_arr;
d_g_state->sorted_keyvals_arr_len = sorted_key_arr_len;
DoLog("CPU_GROUP_ID:[%d] #Intermediate Records:%d; #Intermediate Records:%d After Shuffle",d_g_state->cpu_group_id, total_count,sorted_key_arr_len);
}
void StartCPUShuffle(cpu_context *d_g_state){
#ifdef DEV_MODE
bool configured;
int cpu_group_id;
int num_input_record;
int num_cpus;
keyval_t * input_keyval_arr;
keyval_arr_t *intermediate_keyval_arr_arr_p = d_g_state->intermediate_keyval_arr_arr_p;
long total_count = 0;
int index = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
total_count += intermediate_keyval_arr_arr_p[i].arr_len;
}//for
DoLog("total intermediate record count:%d\n",total_count);
d_g_state->sorted_intermediate_keyvals_arr = NULL;
keyvals_t * sorted_intermediate_keyvals_arr = d_g_state->sorted_intermediate_keyvals_arr;
int sorted_key_arr_len = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
int len = intermediate_keyval_arr_arr_p[i].arr_len;
for (int j=0;j<len;j++){
char *key_i = (char *)(intermediate_keyval_arr_arr_p[i].arr[j].key);
int keySize_i = (intermediate_keyval_arr_arr_p[i].arr[j].keySize);
char *val_i = (char *)(intermediate_keyval_arr_arr_p[i].arr[j].val);
int valSize_i = (intermediate_keyval_arr_arr_p[i].arr[j].valSize);
int k = 0;
for (; k<sorted_key_arr_len; k++){
char *key_k = (char *)(sorted_intermediate_keyvals_arr[k].key);
int keySize_k = sorted_intermediate_keyvals_arr[k].keySize;
if ( cpu_compare(key_i, keySize_i, key_k, keySize_k) != 0 )
continue;
//found the match
val_t *vals = sorted_intermediate_keyvals_arr[k].vals;
sorted_intermediate_keyvals_arr[k].val_arr_len++;
sorted_intermediate_keyvals_arr[k].vals = (val_t*)realloc(vals, sizeof(val_t)*(sorted_intermediate_keyvals_arr[k].val_arr_len));
int index = sorted_intermediate_keyvals_arr[k].val_arr_len - 1;
sorted_intermediate_keyvals_arr[k].vals[index].valSize = valSize_i;
sorted_intermediate_keyvals_arr[k].vals[index].val = (char *)malloc(sizeof(char)*valSize_i);
memcpy(sorted_intermediate_keyvals_arr[k].vals[index].val,val_i,valSize_i);
break;
}//for
if (k == sorted_key_arr_len){
if (sorted_key_arr_len == 0)
sorted_intermediate_keyvals_arr = NULL;
sorted_key_arr_len++;
sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(sorted_intermediate_keyvals_arr, sizeof(keyvals_t)*sorted_key_arr_len);
int index = sorted_key_arr_len-1;
keyvals_t* kvals_p = (keyvals_t *)&(sorted_intermediate_keyvals_arr[index]);
kvals_p->keySize = keySize_i;
kvals_p->key = malloc(sizeof(char)*keySize_i);
memcpy(kvals_p->key, key_i, keySize_i);
kvals_p->vals = (val_t *)malloc(sizeof(val_t));
kvals_p->val_arr_len = 1;
kvals_p->vals[0].valSize = valSize_i;
kvals_p->vals[0].val = (char *)malloc(sizeof(char)*valSize_i);
memcpy(kvals_p->vals[0].val,val_i, valSize_i);
}//if
}//for j;
}//for i;
d_g_state->sorted_intermediate_keyvals_arr = sorted_intermediate_keyvals_arr;
d_g_state->sorted_keyvals_arr_len = sorted_key_arr_len;
DoLog("total number of different intermediate records:%d",sorted_key_arr_len);
#endif
}
void Shuffle4GPUOutput(gpu_context* d_g_state){
hipDeviceSynchronize();
int *count_arr = (int *)malloc(sizeof(int) * d_g_state->num_input_record);
//DoLog("begin to copy data from device to host memory num_input_record:%d",d_g_state->num_input_record);
//DoLog("allocate memory for d_intermediate_keyval_total_count size:%d\n",sizeof(int)*d_g_state->num_input_record);
checkCudaErrors(hipMemcpy(count_arr, d_g_state->d_intermediate_keyval_total_count, sizeof(int)*d_g_state->num_input_record, hipMemcpyDeviceToHost));
long total_count = 0;
int index = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
//printf("arr_len[%d]=:%d\n",i,count_arr[i]);
total_count += count_arr[i];
index++;
}//for
free(count_arr);
checkCudaErrors(hipMalloc((void **)&(d_g_state->d_intermediate_keyval_arr),sizeof(keyval_t)*total_count));
int num_blocks = (d_g_state->num_mappers + (NUM_THREADS)-1)/(NUM_THREADS);
hipLaunchKernelGGL(( copyDataFromDevice2Host1), dim3(num_blocks),dim3(NUM_THREADS), 0, 0, *d_g_state);
//copyDataFromDevice2Host1<<<NUM_BLOCKS,NUM_THREADS>>>(*d_g_state);
hipDeviceSynchronize();
keyval_t * h_keyval_buff = (keyval_t *)malloc(sizeof(keyval_t)*total_count);
checkCudaErrors(hipMemcpy(h_keyval_buff, d_g_state->d_intermediate_keyval_arr, sizeof(keyval_t)*total_count, hipMemcpyDeviceToHost));
d_g_state->h_intermediate_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count);
keyval_pos_t *h_intermediate_keyvals_pos_arr = d_g_state->h_intermediate_keyval_pos_arr;
int totalKeySize = 0;
int totalValSize = 0;
for (int i=0;i<total_count;i++){
h_intermediate_keyvals_pos_arr[i].valPos= totalValSize;
h_intermediate_keyvals_pos_arr[i].keyPos = totalKeySize;
h_intermediate_keyvals_pos_arr[i].keySize = h_keyval_buff[i].keySize;
h_intermediate_keyvals_pos_arr[i].valSize = h_keyval_buff[i].valSize;
totalKeySize += h_keyval_buff[i].keySize;
totalValSize += h_keyval_buff[i].valSize;
}//for
d_g_state->totalValSize = totalValSize;
d_g_state->totalKeySize = totalKeySize;
//DoLog("totalKeySize:%d totalValSize:%d ",totalKeySize,totalValSize);
checkCudaErrors(hipMalloc((void **)&d_g_state->d_intermediate_keys_shared_buff,totalKeySize));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_intermediate_vals_shared_buff,totalValSize));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_intermediate_keyval_pos_arr,sizeof(keyval_pos_t)*total_count));
checkCudaErrors(hipMemcpy(d_g_state->d_intermediate_keyval_pos_arr, h_intermediate_keyvals_pos_arr, sizeof(keyval_pos_t)*total_count, hipMemcpyHostToDevice));
//DoLog("copyDataFromDevice2Host3");
hipDeviceSynchronize();
hipLaunchKernelGGL(( copyDataFromDevice2Host3), dim3(num_blocks),dim3(NUM_THREADS), 0, 0, *d_g_state);
//printData<<<NUM_BLOCKS,NUM_THREADS>>>(*d_g_state);
hipDeviceSynchronize();
d_g_state->h_intermediate_keys_shared_buff = malloc(sizeof(char)*totalKeySize);
d_g_state->h_intermediate_vals_shared_buff = malloc(sizeof(char)*totalValSize);
checkCudaErrors(hipMemcpy(d_g_state->h_intermediate_keys_shared_buff,d_g_state->d_intermediate_keys_shared_buff,sizeof(char)*totalKeySize,hipMemcpyDeviceToHost));
checkCudaErrors(hipMemcpy(d_g_state->h_intermediate_vals_shared_buff,d_g_state->d_intermediate_vals_shared_buff,sizeof(char)*totalValSize,hipMemcpyDeviceToHost));
/* for(int i=0;i<total_count;i++){
printf("keySize:%d, valSize:%d key:%s val:%d\n",h_buff[i].keySize,h_buff[i].valSize,(char *)h_buff[i].key,*(int *)h_buff[i].val);
}//for */
//////////////////////////////////////////////
checkCudaErrors(hipMalloc((void **)&d_g_state->d_sorted_keys_shared_buff,totalKeySize));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_sorted_vals_shared_buff,totalValSize));
checkCudaErrors(hipMalloc((void **)&d_g_state->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count));
d_g_state->h_sorted_keys_shared_buff = malloc(sizeof(char)*totalKeySize);
d_g_state->h_sorted_vals_shared_buff = malloc(sizeof(char)*totalValSize);
//d_g_state->h_sorted_keyval_pos_arr = (sorted_keyval_pos_t *)malloc(sizeof(sorted_keyval_pos_t)*total_count);
char *sorted_keys_shared_buff = (char *)d_g_state->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff = (char *)d_g_state->h_sorted_vals_shared_buff;
//sorted_keyval_pos_t * h_sorted_keyval_pos_arr = d_g_state->h_sorted_keyval_pos_arr;
char *intermediate_key_shared_buff = (char *)d_g_state->h_intermediate_keys_shared_buff;
char *intermediate_val_shared_buff = (char *)d_g_state->h_intermediate_vals_shared_buff;
memcpy(sorted_keys_shared_buff, intermediate_key_shared_buff, totalKeySize);
memcpy(sorted_vals_shared_buff, intermediate_val_shared_buff, totalValSize);
int sorted_key_arr_len = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////
//transfer the d_sorted_keyval_pos_arr to h_sorted_keyval_pos_arr
//DoLog("transfer the d_sorted_keyval_pos_arr to h_sorted_keyval_pos_arr");
sorted_keyval_pos_t * h_sorted_keyval_pos_arr = NULL;
for (int i=0; i<total_count; i++){
int iKeySize = h_intermediate_keyvals_pos_arr[i].keySize;
int j = 0;
for (; j<sorted_key_arr_len; j++){
int jKeySize = h_sorted_keyval_pos_arr[j].keySize;
char *key_i = (char *)(intermediate_key_shared_buff + h_intermediate_keyvals_pos_arr[i].keyPos);
char *key_j = (char *)(sorted_keys_shared_buff + h_sorted_keyval_pos_arr[j].keyPos);
if (cpu_compare(key_i,iKeySize,key_j,jKeySize)!=0)
continue;
//found the match
int arr_len = h_sorted_keyval_pos_arr[j].val_arr_len;
h_sorted_keyval_pos_arr[j].val_pos_arr = (val_pos_t *)realloc(h_sorted_keyval_pos_arr[j].val_pos_arr, sizeof(val_pos_t)*(arr_len+1));
h_sorted_keyval_pos_arr[j].val_pos_arr[arr_len].valSize = h_intermediate_keyvals_pos_arr[i].valSize;
h_sorted_keyval_pos_arr[j].val_pos_arr[arr_len].valPos = h_intermediate_keyvals_pos_arr[i].valPos;
h_sorted_keyval_pos_arr[j].val_arr_len += 1;
break;
}//for
if(j==sorted_key_arr_len){
sorted_key_arr_len++;
//printf("d_g_state->d_sorted_keyvals_arr_len:%d\n",d_g_state->d_sorted_keyvals_arr_len);
h_sorted_keyval_pos_arr = (sorted_keyval_pos_t *)realloc(h_sorted_keyval_pos_arr,sorted_key_arr_len*sizeof(sorted_keyval_pos_t));
sorted_keyval_pos_t *p = &(h_sorted_keyval_pos_arr[sorted_key_arr_len - 1]);
p->keySize = iKeySize;
p->keyPos = h_intermediate_keyvals_pos_arr[i].keyPos;
p->val_arr_len = 1;
p->val_pos_arr = (val_pos_t*)malloc(sizeof(val_pos_t));
p->val_pos_arr[0].valSize = h_intermediate_keyvals_pos_arr[i].valSize;
p->val_pos_arr[0].valPos = h_intermediate_keyvals_pos_arr[i].valPos;
}//if
}
d_g_state->h_sorted_keyval_pos_arr = h_sorted_keyval_pos_arr;
d_g_state->d_sorted_keyvals_arr_len = sorted_key_arr_len;
keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count);
DoLog("GPU_ID:[%d] #input_records:%d #intermediate_records:%lu #different_intermediate_records:%d totalKeySize:%d totalValSize:%d",
d_g_state->gpu_id, d_g_state->num_input_record, total_count, sorted_key_arr_len,totalKeySize,totalValSize);
int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*sorted_key_arr_len);
memset(pos_arr_4_pos_arr,0,sizeof(int)*sorted_key_arr_len);
index = 0;
for (int i=0;i<sorted_key_arr_len;i++){
sorted_keyval_pos_t *p = (sorted_keyval_pos_t *)&(h_sorted_keyval_pos_arr[i]);
for (int j=0;j<p->val_arr_len;j++){
tmp_keyval_pos_arr[index].keyPos = p->keyPos;
tmp_keyval_pos_arr[index].keySize = p->keySize;
tmp_keyval_pos_arr[index].valPos = p->val_pos_arr[j].valPos;
tmp_keyval_pos_arr[index].valSize = p->val_pos_arr[j].valSize;
//printf("tmp_keyval_pos_arr[%d].keyPos:%d\n",index,p->keyPos);
index++;
}//for
pos_arr_4_pos_arr[i] = index;
}
checkCudaErrors(hipMemcpy(d_g_state->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,hipMemcpyHostToDevice));
d_g_state->d_sorted_keyvals_arr_len = sorted_key_arr_len;
checkCudaErrors(hipMalloc((void**)&d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,sizeof(int)*sorted_key_arr_len));
checkCudaErrors(hipMemcpy(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*sorted_key_arr_len,hipMemcpyHostToDevice));
/*verify the d_sorted_keyval_arr_len results
for (int i=0;i<d_g_state->d_sorted_keyvals_arr_len;i++){
keyvals_t *p = &(d_g_state->h_sorted_keyvals_arr[i]);
printf("sort CPU 3 key:%s len:%d",p->key,p->val_arr_len);
for (int j=0;j<p->val_arr_len;j++)
printf("\t%d",*(int*)p->vals[j].val);
printf("\n");
}//for */
//start_row_id sorting
//partition
}
//host function sort_CPU
//copy intermediate records from device memory to host memory and sort the intermediate records there.
//The host API cannot copy from dynamically allocated addresses on device runtime heap, only device code can access them
void sort_CPU(gpu_context* d_g_state){
#ifdef REMOVE
//start_row_id sorting
//partition
#endif
}
void PandaShuffleMergeCPU(panda_context *d_g_state_0, cpu_context *d_g_state_1){
DoLog("PandaShuffleMergeCPU CPU_GROUP_ID:[%d]", d_g_state_1->cpu_group_id);
keyvals_t * panda_sorted_intermediate_keyvals_arr = d_g_state_0->sorted_intermediate_keyvals_arr;
keyvals_t * cpu_sorted_intermediate_keyvals_arr = d_g_state_1->sorted_intermediate_keyvals_arr;
void *key_0, *key_1;
int keySize_0, keySize_1;
bool equal;
for (int i=0; i<d_g_state_1->sorted_keyvals_arr_len; i++){
key_1 = cpu_sorted_intermediate_keyvals_arr[i].key;
keySize_1 = cpu_sorted_intermediate_keyvals_arr[i].keySize;
int j;
for (j=0; j<d_g_state_0->sorted_keyvals_arr_len; j++){
key_0 = panda_sorted_intermediate_keyvals_arr[j].key;
keySize_0 = panda_sorted_intermediate_keyvals_arr[j].keySize;
if(cpu_compare(key_0,keySize_0,key_1,keySize_1)!=0)
continue;
//copy values from cpu_contex to panda context
int val_arr_len_1 = cpu_sorted_intermediate_keyvals_arr[i].val_arr_len;
int index = panda_sorted_intermediate_keyvals_arr[j].val_arr_len;
if (panda_sorted_intermediate_keyvals_arr[j].val_arr_len ==0)
panda_sorted_intermediate_keyvals_arr[j].vals = NULL;
panda_sorted_intermediate_keyvals_arr[j].val_arr_len += val_arr_len_1;
val_t *vals = panda_sorted_intermediate_keyvals_arr[j].vals;
panda_sorted_intermediate_keyvals_arr[j].vals = (val_t*)realloc(vals, sizeof(val_t)*(panda_sorted_intermediate_keyvals_arr[j].val_arr_len));
for (int k=0;k<val_arr_len_1;k++){
char *val_0 = (char *)(cpu_sorted_intermediate_keyvals_arr[i].vals[k].val);
int valSize_0 = cpu_sorted_intermediate_keyvals_arr[i].vals[k].valSize;
panda_sorted_intermediate_keyvals_arr[j].vals[index+k].val = malloc(sizeof(char)*valSize_0);
panda_sorted_intermediate_keyvals_arr[j].vals[index+k].valSize = valSize_0;
memcpy(panda_sorted_intermediate_keyvals_arr[j].vals[index+k].val, val_0, valSize_0);
}//for
break;
}//for
if (j == d_g_state_0->sorted_keyvals_arr_len){
if (d_g_state_0->sorted_keyvals_arr_len == 0) panda_sorted_intermediate_keyvals_arr = NULL;
val_t *vals = cpu_sorted_intermediate_keyvals_arr[i].vals;
int val_arr_len = cpu_sorted_intermediate_keyvals_arr[i].val_arr_len;
d_g_state_0->sorted_keyvals_arr_len++;
panda_sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(panda_sorted_intermediate_keyvals_arr,
sizeof(keyvals_t)*(d_g_state_0->sorted_keyvals_arr_len));
int index = d_g_state_0->sorted_keyvals_arr_len-1;
keyvals_t* kvals_p = (keyvals_t *)&(panda_sorted_intermediate_keyvals_arr[index]);
kvals_p->keySize = keySize_1;
kvals_p->key = malloc(sizeof(char)*keySize_1);
memcpy(kvals_p->key, key_1, keySize_1);
kvals_p->vals = (val_t *)malloc(sizeof(val_t)*val_arr_len);
kvals_p->val_arr_len = val_arr_len;
for (int k=0; k < val_arr_len; k++){
char *val_0 = (char *)(cpu_sorted_intermediate_keyvals_arr[i].vals[k].val);
int valSize_0 = cpu_sorted_intermediate_keyvals_arr[i].vals[k].valSize;
kvals_p->vals[k].valSize = valSize_0;
kvals_p->vals[k].val = (char *)malloc(sizeof(char)*valSize_0);
memcpy(kvals_p->vals[k].val,val_0, valSize_0);
}//for
}//if (j == sorted_key_arr_len){
}//if
d_g_state_0->sorted_intermediate_keyvals_arr = cpu_sorted_intermediate_keyvals_arr;
DoLog("CPU_GROUP_ID:[%d] DONE.",d_g_state_1->cpu_group_id);
}
void PandaShuffleMergeGPU(panda_context *d_g_state_1, gpu_context *d_g_state_0){
DoLog("PandaShuffleMergeGPU GPU_ID:[%d]",d_g_state_0->gpu_id);
char *sorted_keys_shared_buff_0 = (char *)d_g_state_0->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff_0 = (char *)d_g_state_0->h_sorted_vals_shared_buff;
sorted_keyval_pos_t *keyval_pos_arr_0 = d_g_state_0->h_sorted_keyval_pos_arr;
keyvals_t * sorted_intermediate_keyvals_arr = d_g_state_1->sorted_intermediate_keyvals_arr;
void *key_0, *key_1;
int keySize_0, keySize_1;
bool equal;
for (int i=0;i<d_g_state_0->d_sorted_keyvals_arr_len;i++){
//DoLog("keyPos:%d",keyval_pos_arr_0[i].keyPos);
key_0 = sorted_keys_shared_buff_0 + keyval_pos_arr_0[i].keyPos;
keySize_0 = keyval_pos_arr_0[i].keySize;
int j = 0;
for (; j<d_g_state_1->sorted_keyvals_arr_len; j++){
key_1 = sorted_intermediate_keyvals_arr[j].key;
keySize_1 = sorted_intermediate_keyvals_arr[j].keySize;
if(cpu_compare(key_0,keySize_0,key_1,keySize_1)!=0)
continue;
val_t *vals = sorted_intermediate_keyvals_arr[j].vals;
//copy values from gpu to cpu context
int val_arr_len_0 =keyval_pos_arr_0[i].val_arr_len;
val_pos_t * val_pos_arr =keyval_pos_arr_0[i].val_pos_arr;
int index = sorted_intermediate_keyvals_arr[j].val_arr_len;
sorted_intermediate_keyvals_arr[j].val_arr_len += val_arr_len_0;
sorted_intermediate_keyvals_arr[j].vals = (val_t*)realloc(vals, sizeof(val_t)*(sorted_intermediate_keyvals_arr[j].val_arr_len));
for (int k=0;k<val_arr_len_0;k++){
char *val_0 = sorted_vals_shared_buff_0 + val_pos_arr[k].valPos;
int valSize_0 = val_pos_arr[k].valSize;
sorted_intermediate_keyvals_arr[j].vals[index+k].val = malloc(sizeof(char)*valSize_0);
sorted_intermediate_keyvals_arr[j].vals[index+k].valSize = valSize_0;
memcpy(sorted_intermediate_keyvals_arr[j].vals[index+k].val, val_0, valSize_0);
}//for
break;
}//for
if (j == d_g_state_1->sorted_keyvals_arr_len){
if (d_g_state_1->sorted_keyvals_arr_len == 0) sorted_intermediate_keyvals_arr = NULL;
//val_t *vals = sorted_intermediate_keyvals_arr[j].vals;
int val_arr_len =keyval_pos_arr_0[i].val_arr_len;
val_pos_t * val_pos_arr =keyval_pos_arr_0[i].val_pos_arr;
d_g_state_1->sorted_keyvals_arr_len++;
sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(sorted_intermediate_keyvals_arr, sizeof(keyvals_t)*(d_g_state_1->sorted_keyvals_arr_len));
int index = d_g_state_1->sorted_keyvals_arr_len-1;
keyvals_t* kvals_p = (keyvals_t *)&(sorted_intermediate_keyvals_arr[index]);
kvals_p->keySize = keySize_0;
kvals_p->key = malloc(sizeof(char)*keySize_0);
memcpy(kvals_p->key, key_0, keySize_0);
kvals_p->vals = (val_t *)malloc(sizeof(val_t)*val_arr_len);
kvals_p->val_arr_len = val_arr_len;
for (int k=0; k < val_arr_len; k++){
char *val_0 = sorted_vals_shared_buff_0 + val_pos_arr[k].valPos;
int valSize_0 = val_pos_arr[k].valSize;
kvals_p->vals[k].valSize = valSize_0;
kvals_p->vals[k].val = (char *)malloc(sizeof(char)*valSize_0);
memcpy(kvals_p->vals[k].val,val_0, valSize_0);
}//for
}//if (j == sorted_key_arr_len){
}//if
d_g_state_1->sorted_intermediate_keyvals_arr = sorted_intermediate_keyvals_arr;
DoLog("GPU_ID:[%d] DONE",d_g_state_0->gpu_id);
}
void Panda_Shuffle_Merge(gpu_context *d_g_state_0, gpu_context *d_g_state_1){
char *sorted_keys_shared_buff_0 = (char *)d_g_state_0->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff_0 = (char *)d_g_state_0->h_sorted_vals_shared_buff;
char *sorted_keys_shared_buff_1 = (char *)d_g_state_1->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff_1 = (char *)d_g_state_1->h_sorted_vals_shared_buff;
sorted_keyval_pos_t *keyval_pos_arr_0 = d_g_state_0->h_sorted_keyval_pos_arr;
sorted_keyval_pos_t *keyval_pos_arr_1 = d_g_state_1->h_sorted_keyval_pos_arr;
int totalValSize_1 = d_g_state_1->totalValSize;
int totalKeySize_1 = d_g_state_1->totalKeySize;
void *key_0,*key_1;
int keySize_0,keySize_1;
bool equal;
//DoLog("len1:%d len2:%d\n",d_g_state_0->d_sorted_keyvals_arr_len, d_g_state_1->d_sorted_keyvals_arr_len);
for (int i=0;i<d_g_state_0->d_sorted_keyvals_arr_len;i++){
key_0 = sorted_keys_shared_buff_0 + keyval_pos_arr_0[i].keyPos;
keySize_0 = keyval_pos_arr_0[i].keySize;
int j;
for (j=0;j<d_g_state_1->d_sorted_keyvals_arr_len;j++){
key_1 = sorted_keys_shared_buff_1 + keyval_pos_arr_1[j].keyPos;
keySize_1 = keyval_pos_arr_1[j].keySize;
if(cpu_compare(key_0,keySize_0,key_1,keySize_1)!=0)
continue;
//copy all vals in d_g_state_0->h_sorted_keyval_pos_arr[i] to d_g_state_1->h_sorted_keyval_pos_arr[j];
int incValSize = 0;
int len0 = keyval_pos_arr_0[i].val_arr_len;
int len1 = keyval_pos_arr_1[j].val_arr_len;
//DoLog("i:%d j:%d compare: key_0:%s key_1:%s true:%s len0:%d len1:%d\n", i, j, key_0,key_1,(equal ? "true":"false"),len0,len1);
keyval_pos_arr_1[j].val_pos_arr = (val_pos_t*)realloc(keyval_pos_arr_1[j].val_pos_arr,sizeof(val_pos_t)*(len0+len1));
keyval_pos_arr_1[j].val_arr_len = len0+len1;
for (int k = len1; k < len1 + len0; k++){
keyval_pos_arr_1[j].val_pos_arr[k].valSize = keyval_pos_arr_0[i].val_pos_arr[k-len1].valSize;
keyval_pos_arr_1[j].val_pos_arr[k].valPos = keyval_pos_arr_0[i].val_pos_arr[k-len1].valPos;
incValSize += keyval_pos_arr_0[i].val_pos_arr[k-len1].valSize;
}//for
sorted_vals_shared_buff_1 = (char*)realloc(sorted_vals_shared_buff_1, totalValSize_1 + incValSize);
for (int k = len1; k < len1 + len0; k++){
void *val_1 = sorted_vals_shared_buff_1 + totalValSize_1;
void *val_0 = sorted_vals_shared_buff_0+keyval_pos_arr_0[i].val_pos_arr[k-len1].valPos;
memcpy(val_1, val_0, keyval_pos_arr_0[i].val_pos_arr[k-len1].valSize);
totalValSize_1 += keyval_pos_arr_0[i].val_pos_arr[k-len1].valSize;
}//for
break;
}//for (int j = 0;
//key_0 is not exist in d_g_state_1->h_sorted_keyval_pos_arr, create new keyval pair position there
if(j==d_g_state_1->d_sorted_keyvals_arr_len){
sorted_keys_shared_buff_1 = (char*)realloc(sorted_keys_shared_buff_1, (totalKeySize_1 + keySize_0));
//assert(keySize_0 == keyval_pos_arr_0[i].keySize);
void *key_0 = sorted_keys_shared_buff_0 + keyval_pos_arr_0[i].keyPos;
void *key_1 = sorted_keys_shared_buff_1 + totalKeySize_1;
memcpy(key_1, key_0, keySize_0);
totalKeySize_1 += keySize_0;
keyval_pos_arr_1 = (sorted_keyval_pos_t *)realloc(keyval_pos_arr_1, sizeof(sorted_keyval_pos_t)*(d_g_state_1->d_sorted_keyvals_arr_len+1));
sorted_keyval_pos_t *new_p = &(keyval_pos_arr_1[d_g_state_1->d_sorted_keyvals_arr_len]);
d_g_state_1->d_sorted_keyvals_arr_len += 1;
new_p->keySize = keySize_0;
new_p->keyPos = totalKeySize_1 - keySize_0;
int len0 = keyval_pos_arr_0[i].val_arr_len;
new_p->val_arr_len = len0;
new_p->val_pos_arr = (val_pos_t *)malloc(sizeof(val_pos_t)*len0);
int incValSize = 0;
for (int k = 0; k < len0; k++){
new_p->val_pos_arr[k].valSize = keyval_pos_arr_0[i].val_pos_arr[k].valSize;
new_p->val_pos_arr[k].valPos = keyval_pos_arr_0[i].val_pos_arr[k].valPos;
incValSize += keyval_pos_arr_0[i].val_pos_arr[k].valSize;
}//for
sorted_vals_shared_buff_1 = (char*)realloc(sorted_vals_shared_buff_1,(totalValSize_1 + incValSize));
for (int k = 0; k < len0; k++){
void *val_1 = sorted_vals_shared_buff_1 + totalValSize_1;
void *val_0 = sorted_vals_shared_buff_0 + keyval_pos_arr_0[i].val_pos_arr[k].valPos;
memcpy(val_1,val_0,keyval_pos_arr_0[i].val_pos_arr[k].valSize);
totalValSize_1 += keyval_pos_arr_0[i].val_pos_arr[k].valSize;
}//for
}//if(j==arr_len)
}//for (int i = 0;
d_g_state_1->h_sorted_keyval_pos_arr = keyval_pos_arr_1;
int total_count = 0;
for (int i=0; i<d_g_state_1->d_sorted_keyvals_arr_len; i++){
total_count += d_g_state_1->h_sorted_keyval_pos_arr[i].val_arr_len;
}//for
DoLog("total number of intermeidate records on two GPU's:%d",total_count);
keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count);
DoLog("total number of different intermediate records on two GPU's:%d",d_g_state_1->d_sorted_keyvals_arr_len);
int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*d_g_state_1->d_sorted_keyvals_arr_len);
memset(pos_arr_4_pos_arr,0,sizeof(int)*d_g_state_1->d_sorted_keyvals_arr_len);
int index = 0;
for (int i=0; i<d_g_state_1->d_sorted_keyvals_arr_len; i++){
sorted_keyval_pos_t *p = (sorted_keyval_pos_t *)&(d_g_state_1->h_sorted_keyval_pos_arr[i]);
for (int j=0;j<p->val_arr_len;j++){
tmp_keyval_pos_arr[index].keyPos = p->keyPos;
tmp_keyval_pos_arr[index].keySize = p->keySize;
tmp_keyval_pos_arr[index].valPos = p->val_pos_arr[j].valPos;
tmp_keyval_pos_arr[index].valSize = p->val_pos_arr[j].valSize;
//printf("tmp_keyval_pos_arr[%d].keyPos:%d keySize:%d valPos:%d valSize:%d\n",
//index,p->keyPos,p->keySize,p->val_pos_arr[j].valPos,p->val_pos_arr[j].valSize);
//printf("key:%s val:%d\n",(char*)(sorted_keys_shared_buff_1+p->keyPos), *(int*)(sorted_vals_shared_buff_1+p->val_pos_arr[j].valPos));
index++;
}//for
pos_arr_4_pos_arr[i] = index;
}
//printf("totalKeySize_1:%d totalValSize_1:%d\n",totalKeySize_1,totalValSize_1);
//printf("%s\n",sorted_keys_shared_buff_1);
checkCudaErrors(hipMalloc((void**)&d_g_state_1->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count));
checkCudaErrors(hipMemcpy(d_g_state_1->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,hipMemcpyHostToDevice));
//d_g_state_1->d_sorted_keyvals_arr_len = d_g_state_1->d_sorted_keyvals_arr_len;
checkCudaErrors(hipMalloc((void**)&d_g_state_1->d_pos_arr_4_sorted_keyval_pos_arr,sizeof(int)*d_g_state_1->d_sorted_keyvals_arr_len));
checkCudaErrors(hipMemcpy(d_g_state_1->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*d_g_state_1->d_sorted_keyvals_arr_len,hipMemcpyHostToDevice));
//TODO release these buffer bebore allocate
checkCudaErrors(hipMalloc((void **)&d_g_state_1->d_sorted_keys_shared_buff,totalKeySize_1));
checkCudaErrors(hipMalloc((void **)&d_g_state_1->d_sorted_vals_shared_buff,totalValSize_1));
checkCudaErrors(hipMemcpy(d_g_state_1->d_sorted_keys_shared_buff,sorted_keys_shared_buff_1,totalKeySize_1,hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_g_state_1->d_sorted_vals_shared_buff,sorted_vals_shared_buff_1,totalValSize_1,hipMemcpyHostToDevice));
//d_g_state_1->d_sorted_keys_shared_buff = sorted_keys_shared_buff_1;
//d_g_state_1->d_sorted_vals_shared_buff = sorted_vals_shared_buff_1;
d_g_state_1->totalKeySize = totalKeySize_1;
d_g_state_1->totalValSize = totalValSize_1;
}
#endif
|
57342520d557c8ab6622978fcaa661e92257c1f7.cu
|
/*
Copyright 2012 The Trustees of Indiana University. All rights reserved.
CGL MapReduce Framework on GPUs and CPUs
Code Name: Panda
File: PandaSort.cu
First Version: 2012-07-01 V0.1
Current Version: 2012-09-01 V0.3
Last Updates: 2012-09-02
Developer: Hui Li ([email protected])
This is the source code for Panda, a MapReduce runtime on GPUs and CPUs.
*/
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
//includes CUDA
#include <cuda_runtime.h>
#ifndef _PANDASORT_CU_
#define _PANDASORT_CU_
#include "Panda.h"
#include "compare.cu"
#define NUM_BLOCK_PER_CHUNK_BITONIC_SORT 512//b256
#define SHARED_MEM_INT2 256
#define NUM_BLOCKS_CHUNK 256 //(512)
#define NUM_THREADS_CHUNK 256 //(256)
#define CHUNK_SIZE (NUM_BLOCKS_CHUNK*NUM_THREADS_CHUNK)
#define NUM_CHUNKS_R (NUM_RECORDS_R/CHUNK_SIZE)
__device__ int getCompareValue(void *d_rawData, cmp_type_t value1, cmp_type_t value2)
{
int compareValue=0;
int v1=value1.x;
int v2=value2.x;
if((v1==-1) || (v2==-1))
{
if(v1==v2)
compareValue=0;
else
if(v1==-1)
compareValue=-1;
else
compareValue=1;
}//if
else
compareValue=compare((void*)(((char*)d_rawData)+v1), value1.y, (void*)(((char*)d_rawData)+v2), value2.y);
return compareValue;
}//__device__
void * s_qsRawData=NULL;
__global__ void
partBitonicSortKernel( void* d_rawData, int totalLenInBytes,cmp_type_t* d_R, unsigned int numRecords, int chunkIdx, int unitSize)
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
int unitIdx = ((NUM_BLOCKS_CHUNK*chunkIdx + bx)/unitSize)&1;
shared[tx] = d_R[dataIdx];
__syncthreads();
int ixj=0;
int a=0;
cmp_type_t temp1;
cmp_type_t temp2;
int k = NUM_THREADS_CHUNK;
if(unitIdx == 0)
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
//a = (shared[tx].y - shared[ixj].y);
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
else
{
for (int j = (k>>1); j>0; j =(j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
unitBitonicSortKernel(void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int chunkIdx )
{
__shared__ cmp_type_t shared[NUM_THREADS_CHUNK];
int tx = threadIdx.x;
int bx = blockIdx.x;
int unitIdx = (NUM_BLOCKS_CHUNK*chunkIdx + bx)&1;
//load the data
int dataIdx = chunkIdx*CHUNK_SIZE+bx*blockDim.x+tx;
shared[tx] = d_R[dataIdx];
__syncthreads();
cmp_type_t temp1;
cmp_type_t temp2;
int ixj=0;
int a=0;
if(unitIdx == 0)
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if ( (a>0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if ( (a<0)) {
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
else
{
for (int k = 2; k <= NUM_THREADS_CHUNK; (k =k<<1))
{
// bitonic merge:
for (int j = (k>>1); j>0; (j=j>>1))
{
ixj = tx ^ j;
temp1=shared[tx];
temp2= shared[ixj];
if (ixj > tx) {
//a=temp1.y-temp2.y;
//a=compareString((void*)(((char4*)d_rawData)+temp1.x),(void*)(((char4*)d_rawData)+temp2.x));
a=getCompareValue(d_rawData, temp1, temp2);
if ((tx & k) == 0) {
if( (a<0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
else {
if( (a>0))
{
shared[tx]=temp2;
shared[ixj]=temp1;
}
}
}
__syncthreads();
}
}
}
d_R[dataIdx] = shared[tx];
}
__global__ void
bitonicKernel( void* d_rawData, int totalLenInBytes, cmp_type_t* d_R, unsigned int numRecords, int k, int j)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tid = threadIdx.x;
int dataIdx = by*gridDim.x*blockDim.x + bx*blockDim.x + tid;
int ixj = dataIdx^j;
if( ixj > dataIdx )
{
cmp_type_t tmpR = d_R[dataIdx];
cmp_type_t tmpIxj = d_R[ixj];
if( (dataIdx&k) == 0 )
{
//if( tmpR.y > tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
else
{
//if( tmpR.y < tmpIxj.y )
//if(compareString((void*)(((char4*)d_rawData)+tmpR.x),(void*)(((char4*)d_rawData)+tmpIxj.x))==-1)
if(getCompareValue(d_rawData, tmpR, tmpIxj)==-1)
{
d_R[dataIdx] = tmpIxj;
d_R[ixj] = tmpR;
}
}
}
}
__device__ inline void swap(cmp_type_t & a, cmp_type_t & b)
{
// Alternative swap doesn't use a temporary register:
// a ^= b;
// b ^= a;
// a ^= b;
cmp_type_t tmp = a;
a = b;
b = tmp;
}
__global__ void bitonicSortSingleBlock_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t* d_output)
{
__shared__ cmp_type_t bs_cmpbuf[SHARED_MEM_INT2];
//const int by = blockIdx.y;
//const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
//const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(tid<rLen)
{
bs_cmpbuf[tid] = d_values[tid];
}
else
{
bs_cmpbuf[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_cmpbuf[tid], bs_cmpbuf[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_cmpbuf[tid], bs_cmpbuf[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
/*if(tid<rLen)
{
d_output[tid] = bs_cmpbuf[tid+SHARED_MEM_INT2-rLen];
}*/
int start_row_idCopy=SHARED_MEM_INT2-rLen;
if(tid>=start_row_idCopy)
{
d_output[tid-start_row_idCopy]=bs_cmpbuf[tid];
}
}
__global__ void bitonicSortMultipleBlocks_kernel(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int start_row_idBlock, int numBlock, cmp_type_t *d_output)
{
__shared__ int bs_pStart;
__shared__ int bs_pEnd;
__shared__ int bs_numElement;
__shared__ cmp_type_t bs_shared[SHARED_MEM_INT2];
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
//const int numThread=blockDim.x;
//const int resultID=(bx)*numThread+tid;
if(bid>=numBlock) return;
if(tid==0)
{
bs_pStart=d_bound[(bid+start_row_idBlock)<<1];
bs_pEnd=d_bound[((bid+start_row_idBlock)<<1)+1];
bs_numElement=bs_pEnd-bs_pStart;
}
__syncthreads();
// Copy input to shared mem.
if(tid<bs_numElement)
{
bs_shared[tid] = d_values[tid+bs_pStart];
}
else
{
bs_shared[tid].x =-1;
}
__syncthreads();
// Parallel bitonic sort.
int compareValue=0;
for (int k = 2; k <= SHARED_MEM_INT2; k *= 2)
{
// Bitonic merge:
for (int j = k / 2; j>0; j /= 2)
{
int ixj = tid ^ j;
if (ixj > tid)
{
if ((tid & k) == 0)
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] > shared[ixj])
if(compareValue>0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
else
{
compareValue=getCompareValue(d_rawData, bs_shared[tid], bs_shared[ixj]);
//if (shared[tid] < shared[ixj])
if(compareValue<0)
{
swap(bs_shared[tid], bs_shared[ixj]);
}
}
}
__syncthreads();
}
}
// Write result.
//if(tid<bs_numElement)
//{
// d_output[tid+bs_pStart] = bs_shared[tid+SHARED_MEM_INT2-bs_numElement];
//}
//int start_row_idCopy=SHARED_MEM_INT2-bs_numElement;
if(tid>=bs_numElement)
{
d_output[tid-bs_numElement]=bs_shared[tid];
}
}
__global__ void initialize_kernel(cmp_type_t* d_data, int start_row_idPos, int rLen, cmp_type_t value)
{
}
void bitonicSortMultipleBlocks(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int numBlock, cmp_type_t * d_output)
{
}
void bitonicSortSingleBlock(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int rLen, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=1;
int numBlock_y=1;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
bitonicSortSingleBlock_kernel<<<grid,thread>>>(d_rawData, totalLenInBytes, d_values, rLen, d_output);
cudaThreadSynchronize();
}
void initialize(cmp_type_t *d_data, int rLen, cmp_type_t value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start_row_id=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start_row_id=i*chunkSize;
end=start_row_id+chunkSize;
if(end>rLen)
end=rLen;
initialize_kernel<<<grid,thread>>>(d_data, start_row_id, rLen, value);
}
cudaThreadSynchronize();
}
void bitonicSortGPU(void* d_rawData, int totalLenInBytes, cmp_type_t* d_Rin, int rLen, void *d_Rout)
{
unsigned int numRecordsR;
unsigned int size = rLen;
unsigned int level = 0;
while( size != 1 )
{
size = size/2;
level++;
}
if( (1<<level) < rLen )
{
level++;
}
numRecordsR = (1<<level);
if(rLen<=NUM_THREADS_CHUNK)
{
bitonicSortSingleBlock((void*)d_rawData, totalLenInBytes, d_Rin, rLen, (cmp_type_t*)d_Rout);
}
else
if( rLen <= 256*1024 )
{
//unsigned int numRecordsR = rLen;
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
if(numRecordsR<NUM_THREADS_CHUNK)
numRecordsR=NUM_THREADS_CHUNK;
unsigned int numBlocksXSort = numRecordsR/numThreadsSort;
unsigned int numBlocksYSort = 1;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
//copy the <offset, length> pairs.
cmp_type_t* d_R;
//checkCudaErrors
( cudaMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
( cudaMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), cudaMemcpyDeviceToDevice) );
for( int k = 2; k <= numRecordsR; k *= 2 )
{
for( int j = k/2; j > 0; j /= 2 )
{
bitonicKernel<<<gridSort, numThreadsSort>>>((void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
}
( cudaMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, cudaMemcpyDeviceToDevice) );
cudaFree( d_R );
cudaThreadSynchronize();
}
else
{
unsigned int numThreadsSort = NUM_THREADS_CHUNK;
unsigned int numBlocksYSort = 1;
unsigned int numBlocksXSort = (numRecordsR/numThreadsSort)/numBlocksYSort;
if(numBlocksXSort>=(1<<16))
{
numBlocksXSort=(1<<15);
numBlocksYSort=(numRecordsR/numThreadsSort)/numBlocksXSort;
}
unsigned int numBlocksChunk = NUM_BLOCKS_CHUNK;
unsigned int numThreadsChunk = NUM_THREADS_CHUNK;
unsigned int chunkSize = numBlocksChunk*numThreadsChunk;
unsigned int numChunksR = numRecordsR/chunkSize;
dim3 gridSort( numBlocksXSort, numBlocksYSort );
unsigned int memSizeRecordsR = sizeof( cmp_type_t ) * numRecordsR;
cmp_type_t* d_R;
( cudaMalloc( (void**) &d_R, memSizeRecordsR) );
cmp_type_t tempValue;
tempValue.x=tempValue.y=-1;
initialize(d_R, numRecordsR, tempValue);
( cudaMemcpy( d_R, d_Rin, rLen*sizeof(cmp_type_t), cudaMemcpyDeviceToDevice) );
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
unitBitonicSortKernel<<< numBlocksChunk, numThreadsChunk>>>( (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx );
}
int j;
for( int k = numThreadsChunk*2; k <= numRecordsR; k *= 2 )
{
for( j = k/2; j > numThreadsChunk/2; j /= 2 )
{
bitonicKernel<<<gridSort, numThreadsSort>>>( (void*)d_rawData, totalLenInBytes, d_R, numRecordsR, k, j);
}
for( int chunkIdx = 0; chunkIdx < numChunksR; chunkIdx++ )
{
partBitonicSortKernel<<< numBlocksChunk, numThreadsChunk>>>((void*)d_rawData, totalLenInBytes, d_R, numRecordsR, chunkIdx, k/numThreadsSort );
}
}
( cudaMemcpy( d_Rout, d_R+(numRecordsR-rLen), sizeof(cmp_type_t)*rLen, cudaMemcpyDeviceToDevice) );
cudaFree( d_R );
cudaThreadSynchronize();
}
}
__global__ void getIntYArray_kernel(int2* d_input, int start_row_idPos, int rLen, int* d_output)
{
}
__global__ void getXYArray_kernel(cmp_type_t* d_input, int start_row_idPos, int rLen, int2* d_output)
{
}
__global__ void getZWArray_kernel(cmp_type_t* d_input, int start_row_idPos, int rLen, int2* d_output)
{
}
__global__ void setXYArray_kernel(cmp_type_t* d_input, int start_row_idPos, int rLen, int2* d_value)
{
}
__global__ void setZWArray_kernel(cmp_type_t* d_input, int start_row_idPos, int rLen, int2* d_value)
{
}
void getIntYArray(int2 *d_data, int rLen, int* d_output)
{
}
void getXYArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
}
void getZWArray(cmp_type_t *d_data, int rLen, int2* d_output)
{
cudaThreadSynchronize();
}
void setXYArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
cudaThreadSynchronize();
}
void setZWArray(cmp_type_t *d_data, int rLen, int2* d_value)
{
cudaThreadSynchronize();
}
__global__ void copyChunks_kernel(void *d_source, int start_row_idPos, int2* d_Rin, int rLen, int *d_sum, void *d_dest)
{
}
__global__ void getChunkBoundary_kernel(void* d_rawData, int start_row_idPos, cmp_type_t *d_Rin,
int rLen, int* d_start_row_idArray)
{
}
__global__ void setBoundaryInt2_kernel(int* d_boundary, int start_row_idPos, int numKey, int rLen,
int2* d_boundaryRange)
{
}
__global__ void writeBoundary_kernel(int start_row_idPos, int rLen, int* d_start_row_idArray,
int* d_start_row_idSumArray, int* d_bounary)
{
}
void copyChunks(void *d_source, int2* d_Rin, int rLen, void *d_dest)
{
}
//return the number of chunks.
int getChunkBoundary(void *d_source, cmp_type_t* d_Rin, int rLen, int2 ** h_outputKeyListRange)
{
return 0;
}
__global__ void copyDataFromDevice2Host1(gpu_context d_g_state)
{
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_idx = block_start_row_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE;
//if (TID>=d_g_state.num_input_record)return;
if(thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
int begin=0;
int end=0;
for (int i=0;i<map_task_idx;i++){
begin += d_g_state.d_intermediate_keyval_arr_arr_p[i]->arr_len;
}//for
end = begin + (d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx]->arr_len);
//printf("map_task_idx:%d begin:%d end:%d\n",map_task_idx, begin,end);
for(int i=begin;i<end;i++){
keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]);
keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx]->arr[i-begin]);
memcpy(p1,p2,sizeof(keyval_t));
//printf("copyData1: TID:%d keySize %d valSize:%d p2->key:%s p1->key:%s\n",map_task_idx,p1->keySize,p1->valSize,p2->key,p1->key);
}//for
}//for int map_task_idx;
}
__global__ void copyDataFromDevice2Host3(gpu_context d_g_state)
{
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_idx = block_start_row_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE;
//if (TID>=d_g_state.num_input_record)return;
if(thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
int begin, end, val_pos, key_pos;
char *val_p,*key_p;
for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
begin=0;
end=0;
for (int i=0;i<map_task_idx;i++){
begin += (d_g_state.d_intermediate_keyval_arr_arr_p[i]->arr_len);
}//for
end = begin + (d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx]->arr_len);
//printf("copyData:%d begin:%d, end:%d\n",TID,begin,end);
for(int i=begin;i<end;i++){
//keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]);
val_pos = d_g_state.d_intermediate_keyval_pos_arr[i].valPos;
key_pos = d_g_state.d_intermediate_keyval_pos_arr[i].keyPos;
/*if (key_pos>=d_g_state.totalKeySize){
printf("keyPos2:%d totalKeySize:%d begin:%d end:%d i:%d map_task_idx:%d\n",key_pos,d_g_state.totalKeySize, begin, end, i, map_task_idx);
key_pos = 0;
}
if (val_pos>=d_g_state.totalValSize){
//printf("keyPos:%d totalKeySize:%d begin:%d end:%d i:%d map_task_idx:%d\n",key_pos,d_g_state.totalKeySize, begin, end, i, map_task_idx);
val_pos = 0;
}*/
val_p = (char*)(d_g_state.d_intermediate_vals_shared_buff)+val_pos;
key_p = (char*)(d_g_state.d_intermediate_keys_shared_buff)+key_pos;
keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr_p[map_task_idx]->arr[i-begin]);
memcpy(key_p,p2->key,p2->keySize);
//int totalKeySize;
//int totalValSize;
memcpy(val_p,p2->val,p2->valSize);
//added by Hui
//free(p2->key);
//free(p2->val);
//free(p2);
//printf("copyDataFromDevice2Host2: TID:%d key: %s val:%d\n",TID,p2->key,*(int *)p2->val);
}//for
//free(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr);
//if (index*recordsPerTask >= recordNum) return;
//free(&d_g_state.d_intermediate_keyval_arr_arr[map_task_idx])
}//for
//free(d_g_state.d_intermediate_keyval_pos_arr);
//
}//__global__
#ifdef REMOVE
__global__ void copyDataFromDevice2Host2(gpu_context d_g_state)
{
int num_records_per_thread = (d_g_state.num_input_record+(gridDim.x*blockDim.x)-1)/(gridDim.x*blockDim.x);
int block_start_row_idx = num_records_per_thread*blockIdx.x*blockDim.x;
int thread_start_row_idx = block_start_row_idx
+ (threadIdx.x/STRIDE)*num_records_per_thread*STRIDE
+ (threadIdx.x%STRIDE);
int thread_end_idx = thread_start_row_idx+num_records_per_thread*STRIDE;
//if (TID>=d_g_state.num_input_record)return;
if(thread_end_idx>d_g_state.num_input_record)
thread_end_idx = d_g_state.num_input_record;
for(int map_task_idx=thread_start_row_idx; map_task_idx < thread_end_idx; map_task_idx+=STRIDE){
int begin=0;
int end=0;
for (int i=0;i<map_task_idx;i++){
begin += (d_g_state.d_intermediate_keyval_arr_arr[i].arr_len);
}//for
end = begin + (d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr_len);
//printf("copyData:%d begin:%d, end:%d\n",TID,begin,end);
for(int i=begin;i<end;i++){
keyval_t * p1 = &(d_g_state.d_intermediate_keyval_arr[i]);
keyval_t * p2 = &(d_g_state.d_intermediate_keyval_arr_arr[map_task_idx].arr[i-begin]);
memcpy(p1->key,p2->key,p2->keySize);
memcpy(p1->val,p2->val,p2->valSize);
//printf("copyDataFromDevice2Host2: TID:%d key: %s val:%d\n",TID,p2->key,*(int *)p2->val);
}//for
//if (index*recordsPerTask >= recordNum) return;
}//for
}//__global__
#endif
void StartCPUShuffle2(thread_info_t *thread_info){
cpu_context *d_g_state = (cpu_context*)(thread_info->d_g_state);
job_configuration *cpu_job_conf = (job_configuration*)(thread_info->job_conf);
//TODO put all jobs related object to job_conf
bool configured;
int cpu_group_id;
int num_input_record;
int num_cpus;
keyval_t * input_keyval_arr;
keyval_arr_t *intermediate_keyval_arr_arr_p = d_g_state->intermediate_keyval_arr_arr_p;
long total_count = 0;
int index = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
total_count += intermediate_keyval_arr_arr_p[i].arr_len;
}//for
d_g_state->sorted_intermediate_keyvals_arr = NULL;
keyvals_t * sorted_intermediate_keyvals_arr = d_g_state->sorted_intermediate_keyvals_arr;
int sorted_key_arr_len = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
int len = intermediate_keyval_arr_arr_p[i].arr_len;
for (int j=0;j<len;j++){
char *key_i = (char *)(intermediate_keyval_arr_arr_p[i].arr[j].key);
int keySize_i = (intermediate_keyval_arr_arr_p[i].arr[j].keySize);
char *val_i = (char *)(intermediate_keyval_arr_arr_p[i].arr[j].val);
int valSize_i = (intermediate_keyval_arr_arr_p[i].arr[j].valSize);
int k = 0;
for (; k<sorted_key_arr_len; k++){
char *key_k = (char *)(sorted_intermediate_keyvals_arr[k].key);
int keySize_k = sorted_intermediate_keyvals_arr[k].keySize;
if ( cpu_compare(key_i, keySize_i, key_k, keySize_k) != 0 )
continue;
//found the match
val_t *vals = sorted_intermediate_keyvals_arr[k].vals;
sorted_intermediate_keyvals_arr[k].val_arr_len++;
sorted_intermediate_keyvals_arr[k].vals = (val_t*)realloc(vals, sizeof(val_t)*(sorted_intermediate_keyvals_arr[k].val_arr_len));
int index = sorted_intermediate_keyvals_arr[k].val_arr_len - 1;
sorted_intermediate_keyvals_arr[k].vals[index].valSize = valSize_i;
sorted_intermediate_keyvals_arr[k].vals[index].val = (char *)malloc(sizeof(char)*valSize_i);
memcpy(sorted_intermediate_keyvals_arr[k].vals[index].val,val_i,valSize_i);
break;
}//for
if (k == sorted_key_arr_len){
if (sorted_key_arr_len == 0)
sorted_intermediate_keyvals_arr = NULL;
sorted_key_arr_len++;
sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(sorted_intermediate_keyvals_arr, sizeof(keyvals_t)*sorted_key_arr_len);
int index = sorted_key_arr_len-1;
keyvals_t* kvals_p = (keyvals_t *)&(sorted_intermediate_keyvals_arr[index]);
kvals_p->keySize = keySize_i;
kvals_p->key = malloc(sizeof(char)*keySize_i);
memcpy(kvals_p->key, key_i, keySize_i);
kvals_p->vals = (val_t *)malloc(sizeof(val_t));
kvals_p->val_arr_len = 1;
kvals_p->vals[0].valSize = valSize_i;
kvals_p->vals[0].val = (char *)malloc(sizeof(char)*valSize_i);
memcpy(kvals_p->vals[0].val,val_i, valSize_i);
}//if
}//for j;
}//for i;
d_g_state->sorted_intermediate_keyvals_arr = sorted_intermediate_keyvals_arr;
d_g_state->sorted_keyvals_arr_len = sorted_key_arr_len;
DoLog("CPU_GROUP_ID:[%d] #Intermediate Records:%d; #Intermediate Records:%d After Shuffle",d_g_state->cpu_group_id, total_count,sorted_key_arr_len);
}
void StartCPUShuffle(cpu_context *d_g_state){
#ifdef DEV_MODE
bool configured;
int cpu_group_id;
int num_input_record;
int num_cpus;
keyval_t * input_keyval_arr;
keyval_arr_t *intermediate_keyval_arr_arr_p = d_g_state->intermediate_keyval_arr_arr_p;
long total_count = 0;
int index = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
total_count += intermediate_keyval_arr_arr_p[i].arr_len;
}//for
DoLog("total intermediate record count:%d\n",total_count);
d_g_state->sorted_intermediate_keyvals_arr = NULL;
keyvals_t * sorted_intermediate_keyvals_arr = d_g_state->sorted_intermediate_keyvals_arr;
int sorted_key_arr_len = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
int len = intermediate_keyval_arr_arr_p[i].arr_len;
for (int j=0;j<len;j++){
char *key_i = (char *)(intermediate_keyval_arr_arr_p[i].arr[j].key);
int keySize_i = (intermediate_keyval_arr_arr_p[i].arr[j].keySize);
char *val_i = (char *)(intermediate_keyval_arr_arr_p[i].arr[j].val);
int valSize_i = (intermediate_keyval_arr_arr_p[i].arr[j].valSize);
int k = 0;
for (; k<sorted_key_arr_len; k++){
char *key_k = (char *)(sorted_intermediate_keyvals_arr[k].key);
int keySize_k = sorted_intermediate_keyvals_arr[k].keySize;
if ( cpu_compare(key_i, keySize_i, key_k, keySize_k) != 0 )
continue;
//found the match
val_t *vals = sorted_intermediate_keyvals_arr[k].vals;
sorted_intermediate_keyvals_arr[k].val_arr_len++;
sorted_intermediate_keyvals_arr[k].vals = (val_t*)realloc(vals, sizeof(val_t)*(sorted_intermediate_keyvals_arr[k].val_arr_len));
int index = sorted_intermediate_keyvals_arr[k].val_arr_len - 1;
sorted_intermediate_keyvals_arr[k].vals[index].valSize = valSize_i;
sorted_intermediate_keyvals_arr[k].vals[index].val = (char *)malloc(sizeof(char)*valSize_i);
memcpy(sorted_intermediate_keyvals_arr[k].vals[index].val,val_i,valSize_i);
break;
}//for
if (k == sorted_key_arr_len){
if (sorted_key_arr_len == 0)
sorted_intermediate_keyvals_arr = NULL;
sorted_key_arr_len++;
sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(sorted_intermediate_keyvals_arr, sizeof(keyvals_t)*sorted_key_arr_len);
int index = sorted_key_arr_len-1;
keyvals_t* kvals_p = (keyvals_t *)&(sorted_intermediate_keyvals_arr[index]);
kvals_p->keySize = keySize_i;
kvals_p->key = malloc(sizeof(char)*keySize_i);
memcpy(kvals_p->key, key_i, keySize_i);
kvals_p->vals = (val_t *)malloc(sizeof(val_t));
kvals_p->val_arr_len = 1;
kvals_p->vals[0].valSize = valSize_i;
kvals_p->vals[0].val = (char *)malloc(sizeof(char)*valSize_i);
memcpy(kvals_p->vals[0].val,val_i, valSize_i);
}//if
}//for j;
}//for i;
d_g_state->sorted_intermediate_keyvals_arr = sorted_intermediate_keyvals_arr;
d_g_state->sorted_keyvals_arr_len = sorted_key_arr_len;
DoLog("total number of different intermediate records:%d",sorted_key_arr_len);
#endif
}
void Shuffle4GPUOutput(gpu_context* d_g_state){
cudaThreadSynchronize();
int *count_arr = (int *)malloc(sizeof(int) * d_g_state->num_input_record);
//DoLog("begin to copy data from device to host memory num_input_record:%d",d_g_state->num_input_record);
//DoLog("allocate memory for d_intermediate_keyval_total_count size:%d\n",sizeof(int)*d_g_state->num_input_record);
checkCudaErrors(cudaMemcpy(count_arr, d_g_state->d_intermediate_keyval_total_count, sizeof(int)*d_g_state->num_input_record, cudaMemcpyDeviceToHost));
long total_count = 0;
int index = 0;
for(int i=0;i<d_g_state->num_input_record;i++){
//printf("arr_len[%d]=:%d\n",i,count_arr[i]);
total_count += count_arr[i];
index++;
}//for
free(count_arr);
checkCudaErrors(cudaMalloc((void **)&(d_g_state->d_intermediate_keyval_arr),sizeof(keyval_t)*total_count));
int num_blocks = (d_g_state->num_mappers + (NUM_THREADS)-1)/(NUM_THREADS);
copyDataFromDevice2Host1<<<num_blocks,NUM_THREADS>>>(*d_g_state);
//copyDataFromDevice2Host1<<<NUM_BLOCKS,NUM_THREADS>>>(*d_g_state);
cudaThreadSynchronize();
keyval_t * h_keyval_buff = (keyval_t *)malloc(sizeof(keyval_t)*total_count);
checkCudaErrors(cudaMemcpy(h_keyval_buff, d_g_state->d_intermediate_keyval_arr, sizeof(keyval_t)*total_count, cudaMemcpyDeviceToHost));
d_g_state->h_intermediate_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count);
keyval_pos_t *h_intermediate_keyvals_pos_arr = d_g_state->h_intermediate_keyval_pos_arr;
int totalKeySize = 0;
int totalValSize = 0;
for (int i=0;i<total_count;i++){
h_intermediate_keyvals_pos_arr[i].valPos= totalValSize;
h_intermediate_keyvals_pos_arr[i].keyPos = totalKeySize;
h_intermediate_keyvals_pos_arr[i].keySize = h_keyval_buff[i].keySize;
h_intermediate_keyvals_pos_arr[i].valSize = h_keyval_buff[i].valSize;
totalKeySize += h_keyval_buff[i].keySize;
totalValSize += h_keyval_buff[i].valSize;
}//for
d_g_state->totalValSize = totalValSize;
d_g_state->totalKeySize = totalKeySize;
//DoLog("totalKeySize:%d totalValSize:%d ",totalKeySize,totalValSize);
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_intermediate_keys_shared_buff,totalKeySize));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_intermediate_vals_shared_buff,totalValSize));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_intermediate_keyval_pos_arr,sizeof(keyval_pos_t)*total_count));
checkCudaErrors(cudaMemcpy(d_g_state->d_intermediate_keyval_pos_arr, h_intermediate_keyvals_pos_arr, sizeof(keyval_pos_t)*total_count, cudaMemcpyHostToDevice));
//DoLog("copyDataFromDevice2Host3");
cudaThreadSynchronize();
copyDataFromDevice2Host3<<<num_blocks,NUM_THREADS>>>(*d_g_state);
//printData<<<NUM_BLOCKS,NUM_THREADS>>>(*d_g_state);
cudaThreadSynchronize();
d_g_state->h_intermediate_keys_shared_buff = malloc(sizeof(char)*totalKeySize);
d_g_state->h_intermediate_vals_shared_buff = malloc(sizeof(char)*totalValSize);
checkCudaErrors(cudaMemcpy(d_g_state->h_intermediate_keys_shared_buff,d_g_state->d_intermediate_keys_shared_buff,sizeof(char)*totalKeySize,cudaMemcpyDeviceToHost));
checkCudaErrors(cudaMemcpy(d_g_state->h_intermediate_vals_shared_buff,d_g_state->d_intermediate_vals_shared_buff,sizeof(char)*totalValSize,cudaMemcpyDeviceToHost));
/* for(int i=0;i<total_count;i++){
printf("keySize:%d, valSize:%d key:%s val:%d\n",h_buff[i].keySize,h_buff[i].valSize,(char *)h_buff[i].key,*(int *)h_buff[i].val);
}//for */
//////////////////////////////////////////////
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_sorted_keys_shared_buff,totalKeySize));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_sorted_vals_shared_buff,totalValSize));
checkCudaErrors(cudaMalloc((void **)&d_g_state->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count));
d_g_state->h_sorted_keys_shared_buff = malloc(sizeof(char)*totalKeySize);
d_g_state->h_sorted_vals_shared_buff = malloc(sizeof(char)*totalValSize);
//d_g_state->h_sorted_keyval_pos_arr = (sorted_keyval_pos_t *)malloc(sizeof(sorted_keyval_pos_t)*total_count);
char *sorted_keys_shared_buff = (char *)d_g_state->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff = (char *)d_g_state->h_sorted_vals_shared_buff;
//sorted_keyval_pos_t * h_sorted_keyval_pos_arr = d_g_state->h_sorted_keyval_pos_arr;
char *intermediate_key_shared_buff = (char *)d_g_state->h_intermediate_keys_shared_buff;
char *intermediate_val_shared_buff = (char *)d_g_state->h_intermediate_vals_shared_buff;
memcpy(sorted_keys_shared_buff, intermediate_key_shared_buff, totalKeySize);
memcpy(sorted_vals_shared_buff, intermediate_val_shared_buff, totalValSize);
int sorted_key_arr_len = 0;
///////////////////////////////////////////////////////////////////////////////////////////////////
//transfer the d_sorted_keyval_pos_arr to h_sorted_keyval_pos_arr
//DoLog("transfer the d_sorted_keyval_pos_arr to h_sorted_keyval_pos_arr");
sorted_keyval_pos_t * h_sorted_keyval_pos_arr = NULL;
for (int i=0; i<total_count; i++){
int iKeySize = h_intermediate_keyvals_pos_arr[i].keySize;
int j = 0;
for (; j<sorted_key_arr_len; j++){
int jKeySize = h_sorted_keyval_pos_arr[j].keySize;
char *key_i = (char *)(intermediate_key_shared_buff + h_intermediate_keyvals_pos_arr[i].keyPos);
char *key_j = (char *)(sorted_keys_shared_buff + h_sorted_keyval_pos_arr[j].keyPos);
if (cpu_compare(key_i,iKeySize,key_j,jKeySize)!=0)
continue;
//found the match
int arr_len = h_sorted_keyval_pos_arr[j].val_arr_len;
h_sorted_keyval_pos_arr[j].val_pos_arr = (val_pos_t *)realloc(h_sorted_keyval_pos_arr[j].val_pos_arr, sizeof(val_pos_t)*(arr_len+1));
h_sorted_keyval_pos_arr[j].val_pos_arr[arr_len].valSize = h_intermediate_keyvals_pos_arr[i].valSize;
h_sorted_keyval_pos_arr[j].val_pos_arr[arr_len].valPos = h_intermediate_keyvals_pos_arr[i].valPos;
h_sorted_keyval_pos_arr[j].val_arr_len += 1;
break;
}//for
if(j==sorted_key_arr_len){
sorted_key_arr_len++;
//printf("d_g_state->d_sorted_keyvals_arr_len:%d\n",d_g_state->d_sorted_keyvals_arr_len);
h_sorted_keyval_pos_arr = (sorted_keyval_pos_t *)realloc(h_sorted_keyval_pos_arr,sorted_key_arr_len*sizeof(sorted_keyval_pos_t));
sorted_keyval_pos_t *p = &(h_sorted_keyval_pos_arr[sorted_key_arr_len - 1]);
p->keySize = iKeySize;
p->keyPos = h_intermediate_keyvals_pos_arr[i].keyPos;
p->val_arr_len = 1;
p->val_pos_arr = (val_pos_t*)malloc(sizeof(val_pos_t));
p->val_pos_arr[0].valSize = h_intermediate_keyvals_pos_arr[i].valSize;
p->val_pos_arr[0].valPos = h_intermediate_keyvals_pos_arr[i].valPos;
}//if
}
d_g_state->h_sorted_keyval_pos_arr = h_sorted_keyval_pos_arr;
d_g_state->d_sorted_keyvals_arr_len = sorted_key_arr_len;
keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count);
DoLog("GPU_ID:[%d] #input_records:%d #intermediate_records:%lu #different_intermediate_records:%d totalKeySize:%d totalValSize:%d",
d_g_state->gpu_id, d_g_state->num_input_record, total_count, sorted_key_arr_len,totalKeySize,totalValSize);
int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*sorted_key_arr_len);
memset(pos_arr_4_pos_arr,0,sizeof(int)*sorted_key_arr_len);
index = 0;
for (int i=0;i<sorted_key_arr_len;i++){
sorted_keyval_pos_t *p = (sorted_keyval_pos_t *)&(h_sorted_keyval_pos_arr[i]);
for (int j=0;j<p->val_arr_len;j++){
tmp_keyval_pos_arr[index].keyPos = p->keyPos;
tmp_keyval_pos_arr[index].keySize = p->keySize;
tmp_keyval_pos_arr[index].valPos = p->val_pos_arr[j].valPos;
tmp_keyval_pos_arr[index].valSize = p->val_pos_arr[j].valSize;
//printf("tmp_keyval_pos_arr[%d].keyPos:%d\n",index,p->keyPos);
index++;
}//for
pos_arr_4_pos_arr[i] = index;
}
checkCudaErrors(cudaMemcpy(d_g_state->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,cudaMemcpyHostToDevice));
d_g_state->d_sorted_keyvals_arr_len = sorted_key_arr_len;
checkCudaErrors(cudaMalloc((void**)&d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,sizeof(int)*sorted_key_arr_len));
checkCudaErrors(cudaMemcpy(d_g_state->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*sorted_key_arr_len,cudaMemcpyHostToDevice));
/*verify the d_sorted_keyval_arr_len results
for (int i=0;i<d_g_state->d_sorted_keyvals_arr_len;i++){
keyvals_t *p = &(d_g_state->h_sorted_keyvals_arr[i]);
printf("sort CPU 3 key:%s len:%d",p->key,p->val_arr_len);
for (int j=0;j<p->val_arr_len;j++)
printf("\t%d",*(int*)p->vals[j].val);
printf("\n");
}//for */
//start_row_id sorting
//partition
}
//host function sort_CPU
//copy intermediate records from device memory to host memory and sort the intermediate records there.
//The host API cannot copy from dynamically allocated addresses on device runtime heap, only device code can access them
void sort_CPU(gpu_context* d_g_state){
#ifdef REMOVE
//start_row_id sorting
//partition
#endif
}
void PandaShuffleMergeCPU(panda_context *d_g_state_0, cpu_context *d_g_state_1){
DoLog("PandaShuffleMergeCPU CPU_GROUP_ID:[%d]", d_g_state_1->cpu_group_id);
keyvals_t * panda_sorted_intermediate_keyvals_arr = d_g_state_0->sorted_intermediate_keyvals_arr;
keyvals_t * cpu_sorted_intermediate_keyvals_arr = d_g_state_1->sorted_intermediate_keyvals_arr;
void *key_0, *key_1;
int keySize_0, keySize_1;
bool equal;
for (int i=0; i<d_g_state_1->sorted_keyvals_arr_len; i++){
key_1 = cpu_sorted_intermediate_keyvals_arr[i].key;
keySize_1 = cpu_sorted_intermediate_keyvals_arr[i].keySize;
int j;
for (j=0; j<d_g_state_0->sorted_keyvals_arr_len; j++){
key_0 = panda_sorted_intermediate_keyvals_arr[j].key;
keySize_0 = panda_sorted_intermediate_keyvals_arr[j].keySize;
if(cpu_compare(key_0,keySize_0,key_1,keySize_1)!=0)
continue;
//copy values from cpu_contex to panda context
int val_arr_len_1 = cpu_sorted_intermediate_keyvals_arr[i].val_arr_len;
int index = panda_sorted_intermediate_keyvals_arr[j].val_arr_len;
if (panda_sorted_intermediate_keyvals_arr[j].val_arr_len ==0)
panda_sorted_intermediate_keyvals_arr[j].vals = NULL;
panda_sorted_intermediate_keyvals_arr[j].val_arr_len += val_arr_len_1;
val_t *vals = panda_sorted_intermediate_keyvals_arr[j].vals;
panda_sorted_intermediate_keyvals_arr[j].vals = (val_t*)realloc(vals, sizeof(val_t)*(panda_sorted_intermediate_keyvals_arr[j].val_arr_len));
for (int k=0;k<val_arr_len_1;k++){
char *val_0 = (char *)(cpu_sorted_intermediate_keyvals_arr[i].vals[k].val);
int valSize_0 = cpu_sorted_intermediate_keyvals_arr[i].vals[k].valSize;
panda_sorted_intermediate_keyvals_arr[j].vals[index+k].val = malloc(sizeof(char)*valSize_0);
panda_sorted_intermediate_keyvals_arr[j].vals[index+k].valSize = valSize_0;
memcpy(panda_sorted_intermediate_keyvals_arr[j].vals[index+k].val, val_0, valSize_0);
}//for
break;
}//for
if (j == d_g_state_0->sorted_keyvals_arr_len){
if (d_g_state_0->sorted_keyvals_arr_len == 0) panda_sorted_intermediate_keyvals_arr = NULL;
val_t *vals = cpu_sorted_intermediate_keyvals_arr[i].vals;
int val_arr_len = cpu_sorted_intermediate_keyvals_arr[i].val_arr_len;
d_g_state_0->sorted_keyvals_arr_len++;
panda_sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(panda_sorted_intermediate_keyvals_arr,
sizeof(keyvals_t)*(d_g_state_0->sorted_keyvals_arr_len));
int index = d_g_state_0->sorted_keyvals_arr_len-1;
keyvals_t* kvals_p = (keyvals_t *)&(panda_sorted_intermediate_keyvals_arr[index]);
kvals_p->keySize = keySize_1;
kvals_p->key = malloc(sizeof(char)*keySize_1);
memcpy(kvals_p->key, key_1, keySize_1);
kvals_p->vals = (val_t *)malloc(sizeof(val_t)*val_arr_len);
kvals_p->val_arr_len = val_arr_len;
for (int k=0; k < val_arr_len; k++){
char *val_0 = (char *)(cpu_sorted_intermediate_keyvals_arr[i].vals[k].val);
int valSize_0 = cpu_sorted_intermediate_keyvals_arr[i].vals[k].valSize;
kvals_p->vals[k].valSize = valSize_0;
kvals_p->vals[k].val = (char *)malloc(sizeof(char)*valSize_0);
memcpy(kvals_p->vals[k].val,val_0, valSize_0);
}//for
}//if (j == sorted_key_arr_len){
}//if
d_g_state_0->sorted_intermediate_keyvals_arr = cpu_sorted_intermediate_keyvals_arr;
DoLog("CPU_GROUP_ID:[%d] DONE.",d_g_state_1->cpu_group_id);
}
void PandaShuffleMergeGPU(panda_context *d_g_state_1, gpu_context *d_g_state_0){
DoLog("PandaShuffleMergeGPU GPU_ID:[%d]",d_g_state_0->gpu_id);
char *sorted_keys_shared_buff_0 = (char *)d_g_state_0->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff_0 = (char *)d_g_state_0->h_sorted_vals_shared_buff;
sorted_keyval_pos_t *keyval_pos_arr_0 = d_g_state_0->h_sorted_keyval_pos_arr;
keyvals_t * sorted_intermediate_keyvals_arr = d_g_state_1->sorted_intermediate_keyvals_arr;
void *key_0, *key_1;
int keySize_0, keySize_1;
bool equal;
for (int i=0;i<d_g_state_0->d_sorted_keyvals_arr_len;i++){
//DoLog("keyPos:%d",keyval_pos_arr_0[i].keyPos);
key_0 = sorted_keys_shared_buff_0 + keyval_pos_arr_0[i].keyPos;
keySize_0 = keyval_pos_arr_0[i].keySize;
int j = 0;
for (; j<d_g_state_1->sorted_keyvals_arr_len; j++){
key_1 = sorted_intermediate_keyvals_arr[j].key;
keySize_1 = sorted_intermediate_keyvals_arr[j].keySize;
if(cpu_compare(key_0,keySize_0,key_1,keySize_1)!=0)
continue;
val_t *vals = sorted_intermediate_keyvals_arr[j].vals;
//copy values from gpu to cpu context
int val_arr_len_0 =keyval_pos_arr_0[i].val_arr_len;
val_pos_t * val_pos_arr =keyval_pos_arr_0[i].val_pos_arr;
int index = sorted_intermediate_keyvals_arr[j].val_arr_len;
sorted_intermediate_keyvals_arr[j].val_arr_len += val_arr_len_0;
sorted_intermediate_keyvals_arr[j].vals = (val_t*)realloc(vals, sizeof(val_t)*(sorted_intermediate_keyvals_arr[j].val_arr_len));
for (int k=0;k<val_arr_len_0;k++){
char *val_0 = sorted_vals_shared_buff_0 + val_pos_arr[k].valPos;
int valSize_0 = val_pos_arr[k].valSize;
sorted_intermediate_keyvals_arr[j].vals[index+k].val = malloc(sizeof(char)*valSize_0);
sorted_intermediate_keyvals_arr[j].vals[index+k].valSize = valSize_0;
memcpy(sorted_intermediate_keyvals_arr[j].vals[index+k].val, val_0, valSize_0);
}//for
break;
}//for
if (j == d_g_state_1->sorted_keyvals_arr_len){
if (d_g_state_1->sorted_keyvals_arr_len == 0) sorted_intermediate_keyvals_arr = NULL;
//val_t *vals = sorted_intermediate_keyvals_arr[j].vals;
int val_arr_len =keyval_pos_arr_0[i].val_arr_len;
val_pos_t * val_pos_arr =keyval_pos_arr_0[i].val_pos_arr;
d_g_state_1->sorted_keyvals_arr_len++;
sorted_intermediate_keyvals_arr = (keyvals_t *)realloc(sorted_intermediate_keyvals_arr, sizeof(keyvals_t)*(d_g_state_1->sorted_keyvals_arr_len));
int index = d_g_state_1->sorted_keyvals_arr_len-1;
keyvals_t* kvals_p = (keyvals_t *)&(sorted_intermediate_keyvals_arr[index]);
kvals_p->keySize = keySize_0;
kvals_p->key = malloc(sizeof(char)*keySize_0);
memcpy(kvals_p->key, key_0, keySize_0);
kvals_p->vals = (val_t *)malloc(sizeof(val_t)*val_arr_len);
kvals_p->val_arr_len = val_arr_len;
for (int k=0; k < val_arr_len; k++){
char *val_0 = sorted_vals_shared_buff_0 + val_pos_arr[k].valPos;
int valSize_0 = val_pos_arr[k].valSize;
kvals_p->vals[k].valSize = valSize_0;
kvals_p->vals[k].val = (char *)malloc(sizeof(char)*valSize_0);
memcpy(kvals_p->vals[k].val,val_0, valSize_0);
}//for
}//if (j == sorted_key_arr_len){
}//if
d_g_state_1->sorted_intermediate_keyvals_arr = sorted_intermediate_keyvals_arr;
DoLog("GPU_ID:[%d] DONE",d_g_state_0->gpu_id);
}
void Panda_Shuffle_Merge(gpu_context *d_g_state_0, gpu_context *d_g_state_1){
char *sorted_keys_shared_buff_0 = (char *)d_g_state_0->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff_0 = (char *)d_g_state_0->h_sorted_vals_shared_buff;
char *sorted_keys_shared_buff_1 = (char *)d_g_state_1->h_sorted_keys_shared_buff;
char *sorted_vals_shared_buff_1 = (char *)d_g_state_1->h_sorted_vals_shared_buff;
sorted_keyval_pos_t *keyval_pos_arr_0 = d_g_state_0->h_sorted_keyval_pos_arr;
sorted_keyval_pos_t *keyval_pos_arr_1 = d_g_state_1->h_sorted_keyval_pos_arr;
int totalValSize_1 = d_g_state_1->totalValSize;
int totalKeySize_1 = d_g_state_1->totalKeySize;
void *key_0,*key_1;
int keySize_0,keySize_1;
bool equal;
//DoLog("len1:%d len2:%d\n",d_g_state_0->d_sorted_keyvals_arr_len, d_g_state_1->d_sorted_keyvals_arr_len);
for (int i=0;i<d_g_state_0->d_sorted_keyvals_arr_len;i++){
key_0 = sorted_keys_shared_buff_0 + keyval_pos_arr_0[i].keyPos;
keySize_0 = keyval_pos_arr_0[i].keySize;
int j;
for (j=0;j<d_g_state_1->d_sorted_keyvals_arr_len;j++){
key_1 = sorted_keys_shared_buff_1 + keyval_pos_arr_1[j].keyPos;
keySize_1 = keyval_pos_arr_1[j].keySize;
if(cpu_compare(key_0,keySize_0,key_1,keySize_1)!=0)
continue;
//copy all vals in d_g_state_0->h_sorted_keyval_pos_arr[i] to d_g_state_1->h_sorted_keyval_pos_arr[j];
int incValSize = 0;
int len0 = keyval_pos_arr_0[i].val_arr_len;
int len1 = keyval_pos_arr_1[j].val_arr_len;
//DoLog("i:%d j:%d compare: key_0:%s key_1:%s true:%s len0:%d len1:%d\n", i, j, key_0,key_1,(equal ? "true":"false"),len0,len1);
keyval_pos_arr_1[j].val_pos_arr = (val_pos_t*)realloc(keyval_pos_arr_1[j].val_pos_arr,sizeof(val_pos_t)*(len0+len1));
keyval_pos_arr_1[j].val_arr_len = len0+len1;
for (int k = len1; k < len1 + len0; k++){
keyval_pos_arr_1[j].val_pos_arr[k].valSize = keyval_pos_arr_0[i].val_pos_arr[k-len1].valSize;
keyval_pos_arr_1[j].val_pos_arr[k].valPos = keyval_pos_arr_0[i].val_pos_arr[k-len1].valPos;
incValSize += keyval_pos_arr_0[i].val_pos_arr[k-len1].valSize;
}//for
sorted_vals_shared_buff_1 = (char*)realloc(sorted_vals_shared_buff_1, totalValSize_1 + incValSize);
for (int k = len1; k < len1 + len0; k++){
void *val_1 = sorted_vals_shared_buff_1 + totalValSize_1;
void *val_0 = sorted_vals_shared_buff_0+keyval_pos_arr_0[i].val_pos_arr[k-len1].valPos;
memcpy(val_1, val_0, keyval_pos_arr_0[i].val_pos_arr[k-len1].valSize);
totalValSize_1 += keyval_pos_arr_0[i].val_pos_arr[k-len1].valSize;
}//for
break;
}//for (int j = 0;
//key_0 is not exist in d_g_state_1->h_sorted_keyval_pos_arr, create new keyval pair position there
if(j==d_g_state_1->d_sorted_keyvals_arr_len){
sorted_keys_shared_buff_1 = (char*)realloc(sorted_keys_shared_buff_1, (totalKeySize_1 + keySize_0));
//assert(keySize_0 == keyval_pos_arr_0[i].keySize);
void *key_0 = sorted_keys_shared_buff_0 + keyval_pos_arr_0[i].keyPos;
void *key_1 = sorted_keys_shared_buff_1 + totalKeySize_1;
memcpy(key_1, key_0, keySize_0);
totalKeySize_1 += keySize_0;
keyval_pos_arr_1 = (sorted_keyval_pos_t *)realloc(keyval_pos_arr_1, sizeof(sorted_keyval_pos_t)*(d_g_state_1->d_sorted_keyvals_arr_len+1));
sorted_keyval_pos_t *new_p = &(keyval_pos_arr_1[d_g_state_1->d_sorted_keyvals_arr_len]);
d_g_state_1->d_sorted_keyvals_arr_len += 1;
new_p->keySize = keySize_0;
new_p->keyPos = totalKeySize_1 - keySize_0;
int len0 = keyval_pos_arr_0[i].val_arr_len;
new_p->val_arr_len = len0;
new_p->val_pos_arr = (val_pos_t *)malloc(sizeof(val_pos_t)*len0);
int incValSize = 0;
for (int k = 0; k < len0; k++){
new_p->val_pos_arr[k].valSize = keyval_pos_arr_0[i].val_pos_arr[k].valSize;
new_p->val_pos_arr[k].valPos = keyval_pos_arr_0[i].val_pos_arr[k].valPos;
incValSize += keyval_pos_arr_0[i].val_pos_arr[k].valSize;
}//for
sorted_vals_shared_buff_1 = (char*)realloc(sorted_vals_shared_buff_1,(totalValSize_1 + incValSize));
for (int k = 0; k < len0; k++){
void *val_1 = sorted_vals_shared_buff_1 + totalValSize_1;
void *val_0 = sorted_vals_shared_buff_0 + keyval_pos_arr_0[i].val_pos_arr[k].valPos;
memcpy(val_1,val_0,keyval_pos_arr_0[i].val_pos_arr[k].valSize);
totalValSize_1 += keyval_pos_arr_0[i].val_pos_arr[k].valSize;
}//for
}//if(j==arr_len)
}//for (int i = 0;
d_g_state_1->h_sorted_keyval_pos_arr = keyval_pos_arr_1;
int total_count = 0;
for (int i=0; i<d_g_state_1->d_sorted_keyvals_arr_len; i++){
total_count += d_g_state_1->h_sorted_keyval_pos_arr[i].val_arr_len;
}//for
DoLog("total number of intermeidate records on two GPU's:%d",total_count);
keyval_pos_t *tmp_keyval_pos_arr = (keyval_pos_t *)malloc(sizeof(keyval_pos_t)*total_count);
DoLog("total number of different intermediate records on two GPU's:%d",d_g_state_1->d_sorted_keyvals_arr_len);
int *pos_arr_4_pos_arr = (int*)malloc(sizeof(int)*d_g_state_1->d_sorted_keyvals_arr_len);
memset(pos_arr_4_pos_arr,0,sizeof(int)*d_g_state_1->d_sorted_keyvals_arr_len);
int index = 0;
for (int i=0; i<d_g_state_1->d_sorted_keyvals_arr_len; i++){
sorted_keyval_pos_t *p = (sorted_keyval_pos_t *)&(d_g_state_1->h_sorted_keyval_pos_arr[i]);
for (int j=0;j<p->val_arr_len;j++){
tmp_keyval_pos_arr[index].keyPos = p->keyPos;
tmp_keyval_pos_arr[index].keySize = p->keySize;
tmp_keyval_pos_arr[index].valPos = p->val_pos_arr[j].valPos;
tmp_keyval_pos_arr[index].valSize = p->val_pos_arr[j].valSize;
//printf("tmp_keyval_pos_arr[%d].keyPos:%d keySize:%d valPos:%d valSize:%d\n",
//index,p->keyPos,p->keySize,p->val_pos_arr[j].valPos,p->val_pos_arr[j].valSize);
//printf("key:%s val:%d\n",(char*)(sorted_keys_shared_buff_1+p->keyPos), *(int*)(sorted_vals_shared_buff_1+p->val_pos_arr[j].valPos));
index++;
}//for
pos_arr_4_pos_arr[i] = index;
}
//printf("totalKeySize_1:%d totalValSize_1:%d\n",totalKeySize_1,totalValSize_1);
//printf("%s\n",sorted_keys_shared_buff_1);
checkCudaErrors(cudaMalloc((void**)&d_g_state_1->d_keyval_pos_arr,sizeof(keyval_pos_t)*total_count));
checkCudaErrors(cudaMemcpy(d_g_state_1->d_keyval_pos_arr,tmp_keyval_pos_arr,sizeof(keyval_pos_t)*total_count,cudaMemcpyHostToDevice));
//d_g_state_1->d_sorted_keyvals_arr_len = d_g_state_1->d_sorted_keyvals_arr_len;
checkCudaErrors(cudaMalloc((void**)&d_g_state_1->d_pos_arr_4_sorted_keyval_pos_arr,sizeof(int)*d_g_state_1->d_sorted_keyvals_arr_len));
checkCudaErrors(cudaMemcpy(d_g_state_1->d_pos_arr_4_sorted_keyval_pos_arr,pos_arr_4_pos_arr,sizeof(int)*d_g_state_1->d_sorted_keyvals_arr_len,cudaMemcpyHostToDevice));
//TODO release these buffer bebore allocate
checkCudaErrors(cudaMalloc((void **)&d_g_state_1->d_sorted_keys_shared_buff,totalKeySize_1));
checkCudaErrors(cudaMalloc((void **)&d_g_state_1->d_sorted_vals_shared_buff,totalValSize_1));
checkCudaErrors(cudaMemcpy(d_g_state_1->d_sorted_keys_shared_buff,sorted_keys_shared_buff_1,totalKeySize_1,cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_g_state_1->d_sorted_vals_shared_buff,sorted_vals_shared_buff_1,totalValSize_1,cudaMemcpyHostToDevice));
//d_g_state_1->d_sorted_keys_shared_buff = sorted_keys_shared_buff_1;
//d_g_state_1->d_sorted_vals_shared_buff = sorted_vals_shared_buff_1;
d_g_state_1->totalKeySize = totalKeySize_1;
d_g_state_1->totalValSize = totalValSize_1;
}
#endif
|
728d3b03d178291e913eb769063b102b1ee31c6a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/hip/Loops.cuh>
#include <ATen/native/hip/Math.cuh>
#include <ATen/NumericUtils.h>
#include <c10/hip/HIPMathCompat.h>
#include <ATen/NumericUtils.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
void exp2_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "exp2_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp2(a);
});
});
}
void i0_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "i0_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0(a);
});
});
}
void i0e_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0e(a);
});
});
}
void sigmoid_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return static_cast<scalar_t>(1) / (static_cast<scalar_t>(1) + ::exp(-a));
});
});
}
void sinc_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "sinc_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
if (a == scalar_t(0)) {
return scalar_t(1);
} else {
// NVCC says constexpr var is not accessible from device
scalar_t product = c10::detail::pi<scalar_t>() * a;
return std::sin(product) / product;
}
});
});
}
void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.common_dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
return c10::hip::compat::log(x_acc / (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc);
return c10::hip::compat::log(z / (T_ACC(1) - z));
});
}
});
}
void erf_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
}
void erfc_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "erfc_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
}
void erfinv_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using T_ACC = acc_type<scalar_t, true>;
const T_ACC inv_alpha = static_cast<T_ACC>(2.0 / (window_length - 1));
const T_ACC beta = static_cast<T_ACC>(beta_);
const T_ACC inv_i0_beta = 1.0 / calc_i0(beta);
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t {
T_ACC x = static_cast<T_ACC>(a) * inv_alpha - 1;
T_ACC y = std::max<T_ACC>(0, 1 - x * x);
return calc_i0(beta * ::sqrt(y)) * inv_i0_beta;
});
});
}
void entr_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
iter.common_dtype(),
"entr_cuda",
[&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t x) -> scalar_t {
if (at::_isnan(x)) {
return x;
} else if (x > 0) {
return -x * ::log(x);
} else if (x == 0) {
return 0;
}
return static_cast<scalar_t>(-INFINITY);
});
});
}
REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda);
REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda);
REGISTER_DISPATCH(special_i0e_stub, &i0e_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda);
REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda);
REGISTER_DISPATCH(special_entr_stub, &entr_kernel_cuda);
} // namespace native
} // namespace at
|
728d3b03d178291e913eb769063b102b1ee31c6a.cu
|
#include <ATen/native/UnaryOps.h>
#include <limits>
#include <ATen/AccumulateType.h>
#include <ATen/Context.h>
#include <ATen/Dispatch.h>
#include <ATen/native/DispatchStub.h>
#include <ATen/native/Math.h>
#include <ATen/native/TensorFactories.h>
#include <ATen/native/TensorIterator.h>
#include <ATen/native/cuda/Loops.cuh>
#include <ATen/native/cuda/Math.cuh>
#include <ATen/NumericUtils.h>
#include <c10/cuda/CUDAMathCompat.h>
#include <ATen/NumericUtils.h>
#include <c10/util/complex.h>
namespace at {
namespace native {
void exp2_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "exp2_cuda",
[&]() {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::exp2(a);
});
});
}
void i0_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "i0_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0(a);
});
});
}
void i0e_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.common_dtype(), "i0e_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return calc_i0e(a);
});
});
}
void sigmoid_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "sigmoid_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return static_cast<scalar_t>(1) / (static_cast<scalar_t>(1) + std::exp(-a));
});
});
}
void sinc_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_AND_COMPLEX_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "sinc_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
if (a == scalar_t(0)) {
return scalar_t(1);
} else {
// NVCC says constexpr var is not accessible from device
scalar_t product = c10::detail::pi<scalar_t>() * a;
return std::sin(product) / product;
}
});
});
}
void logit_kernel_cuda(TensorIteratorBase& iter, const Scalar& eps_scalar) {
AT_DISPATCH_FLOATING_TYPES_AND2(
at::ScalarType::Half,
at::ScalarType::BFloat16,
iter.common_dtype(),
"logit_cuda",
[&]() {
using T_ACC = acc_type<scalar_t, true>;
const T_ACC eps = eps_scalar.to<T_ACC>();
if (eps < T_ACC(0)) {
gpu_kernel(iter, [] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
return c10::cuda::compat::log(x_acc / (T_ACC(1) - x_acc));
});
} else {
const T_ACC lo = eps;
const T_ACC hi = T_ACC(1) - eps;
gpu_kernel(
iter, [lo, hi] GPU_LAMBDA(scalar_t x) -> scalar_t {
const T_ACC x_acc = static_cast<T_ACC>(x);
T_ACC z = x_acc < lo ? lo : (x_acc > hi ? hi : x_acc);
return c10::cuda::compat::log(z / (T_ACC(1) - z));
});
}
});
}
void erf_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, iter.common_dtype(), "erf_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erf(a);
});
});
}
void erfc_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half, ScalarType::BFloat16,
iter.common_dtype(), "erfc_cuda",
[&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfc(a);
});
});
}
void erfinv_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND_HALF(iter.common_dtype(), "erfinv_cuda", [&]() {
gpu_kernel(iter, []GPU_LAMBDA(scalar_t a) -> scalar_t {
return ::erfinv(a);
});
});
}
void kaiser_window_kernel_cuda(TensorIteratorBase& iter, int64_t window_length, double beta_){
AT_DISPATCH_FLOATING_TYPES_AND2(ScalarType::Half, ScalarType::BFloat16, iter.dtype(), "kaiser_window_cuda", [&](){
using T_ACC = acc_type<scalar_t, true>;
const T_ACC inv_alpha = static_cast<T_ACC>(2.0 / (window_length - 1));
const T_ACC beta = static_cast<T_ACC>(beta_);
const T_ACC inv_i0_beta = 1.0 / calc_i0(beta);
gpu_kernel(iter, [=]GPU_LAMBDA(scalar_t a) -> scalar_t {
T_ACC x = static_cast<T_ACC>(a) * inv_alpha - 1;
T_ACC y = std::max<T_ACC>(0, 1 - x * x);
return calc_i0(beta * ::sqrt(y)) * inv_i0_beta;
});
});
}
void entr_kernel_cuda(TensorIteratorBase& iter) {
AT_DISPATCH_FLOATING_TYPES_AND2(
ScalarType::Half,
ScalarType::BFloat16,
iter.common_dtype(),
"entr_cuda",
[&]() {
gpu_kernel(iter, [=] GPU_LAMBDA(scalar_t x) -> scalar_t {
if (at::_isnan(x)) {
return x;
} else if (x > 0) {
return -x * std::log(x);
} else if (x == 0) {
return 0;
}
return static_cast<scalar_t>(-INFINITY);
});
});
}
REGISTER_DISPATCH(exp2_stub, &exp2_kernel_cuda);
REGISTER_DISPATCH(i0_stub, &i0_kernel_cuda);
REGISTER_DISPATCH(special_i0e_stub, &i0e_kernel_cuda);
REGISTER_DISPATCH(sigmoid_stub, &sigmoid_kernel_cuda);
REGISTER_DISPATCH(sinc_stub, &sinc_kernel_cuda);
REGISTER_DISPATCH(logit_stub, &logit_kernel_cuda);
REGISTER_DISPATCH(erf_stub, &erf_kernel_cuda);
REGISTER_DISPATCH(erfc_stub, &erfc_kernel_cuda);
REGISTER_DISPATCH(erfinv_stub, &erfinv_kernel_cuda);
REGISTER_DISPATCH(kaiser_window_stub, &kaiser_window_kernel_cuda);
REGISTER_DISPATCH(special_entr_stub, &entr_kernel_cuda);
} // namespace native
} // namespace at
|
9e8e9aee88324a33b51643c5b4336e62d86bc04b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "addMoreThreads.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
hipMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
hipMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
addMoreThreads), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
addMoreThreads), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
addMoreThreads), dim3(gridBlock),dim3(threadBlock), 0, 0, n,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
9e8e9aee88324a33b51643c5b4336e62d86bc04b.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "addMoreThreads.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int n = XSIZE*YSIZE;
float *x = NULL;
cudaMalloc(&x, XSIZE*YSIZE);
float *y = NULL;
cudaMalloc(&y, XSIZE*YSIZE);
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
addMoreThreads<<<gridBlock,threadBlock>>>(n,x,y);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
addMoreThreads<<<gridBlock,threadBlock>>>(n,x,y);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
addMoreThreads<<<gridBlock,threadBlock>>>(n,x,y);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
67a8f8a9d4d20aea62a1a634c1718cc127aa8391.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "organized_point_cloud.h"
#include <iostream>
#include <stdio.h>
#include "helper_math.h"
namespace dart {
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float2 pp,
const float2 fl,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x),
(v - pp.y)*(depth/fl.y),
depth,
1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float2 pp,
const float2 fl,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x),
(v - pp.y)*(depth/fl.y),
depth,
1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float4 * Kinv,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
const float4 p = make_float4( u, v, depth, 1);
float4 vert = make_float4( dot(Kinv[0],p),
dot(Kinv[1],p),
dot(Kinv[2],p),
dot(Kinv[3],p));
vert /= vert.w;
vert.w = 1;
vert.z = -vert.z;
vertOut[index] = vert;
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float4 * Kinv,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
const float4 p = make_float4( u, v, depth, 1);
float4 vert = make_float4( dot(Kinv[0],p),
dot(Kinv[1],p),
dot(Kinv[2],p),
dot(Kinv[3],p));
vert /= vert.w;
vert.w = 1;
vert.z = -vert.z;
vertOut[index] = vert;
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType, int iters>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float * cameraParams,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
// http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
const float &fx = cameraParams[0];
const float &fy = cameraParams[1];
const float &cx = cameraParams[2];
const float &cy = cameraParams[3];
const float &k1 = cameraParams[4];
const float &k2 = cameraParams[5];
const float &p1 = cameraParams[6];
const float &p2 = cameraParams[7];
const float &k3 = cameraParams[8];
float xp, yp, xpp, ypp;
xpp = xp = (u - cx) / fx;
ypp = yp = (v - cy) / fy;
#pragma unroll
for (int i=0; i<iters; ++i) {
float r2 = xp*xp + yp*yp;
float r4 = r2*r2;
float r6 = r4*r2;
float denom = 1 + k1*r2 + k2*r4 + k3*r6;
float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp);
float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp;
xp = (xpp - dxp)/denom;
yp = (ypp - dyp)/denom;
}
vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType, int iters>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float * cameraParams,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
// http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
const float& fx = cameraParams[0];
const float& fy = cameraParams[1];
const float& cx = cameraParams[2];
const float& cy = cameraParams[3];
const float& k1 = cameraParams[4];
const float& k2 = cameraParams[5];
const float& p1 = cameraParams[6];
const float& p2 = cameraParams[7];
const float& k3 = cameraParams[8];
float xp, yp, xpp, ypp;
xpp = xp = (u - cx) / fx;
ypp = yp = (v - cy) / fy;
#pragma unroll
for (int i=0; i<iters; ++i) {
float r2 = xp*xp + yp*yp;
float r4 = r2*r2;
float r6 = r4*r2;
float denom = 1 + k1*r2 + k2*r4 + k3*r6;
float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp);
float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp;
xp = (xpp - dxp)/denom;
yp = (ypp - dyp)/denom;
}
vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f);
}
else {
vertOut[index].w = 0;
}
}
__global__ void gpu_verticesToNormals(const float4 * vertIn,
float4 * normOut,
const int width,
const int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
const float4 & v = vertIn[index];
// // don't process invalid vertices
if ( v.w == 0) {
normOut[index] = make_float4(0);
return;
}
const float4 & vLeft = vertIn[ x == 0 ? index : index-1];
const float4 & vRight = vertIn[ x == width-1 ? index : index+1];
const float4 & vUp = vertIn[ y == 0 ? index : index-width];
const float4 & vDown = vertIn[ y == height-1 ? index : index+width];
const float3 vX = make_float3( (vRight.w == 0 ? v : vRight) - (vLeft.w == 0 ? v : vLeft) );
const float3 vY = make_float3( (vDown.w == 0 ? v : vDown) - (vUp.w == 0 ? v : vUp) );
const float3 n = cross(vY,vX);
const float len2 = dot(n,n);
if (len2 > 0) {
const float invLen = 1.0f / (float)sqrtf(len2);
normOut[index] = make_float4(n.x*invLen,n.y*invLen,n.z*invLen,1);
}
else {
normOut[index] = make_float4(0);
}
}
__global__ void gpu_eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
// check vertex validity
float4& v = verts[index];
if ( v.w == 0) {
return;
}
// check normal threshold
const float4& n = norms[index];
if (dot(make_float3(n),planeNormal) < epsNorm) {
return;
}
// check distance threshold
if (abs(dot(make_float3(v),planeNormal) - planeD) < epsDist ) {
v.w = -1;
}
}
__global__ void gpu_cropBox(float4 * verts, const int width, const int height, const float3 boxMin, const float3 boxMax) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
// check vertex validity
float4& v = verts[index];
if ( v.w == 0) {
return;
}
if (v.x < boxMin.x || v.x > boxMax.x ||
v.y < boxMin.y || v.y > boxMax.y ||
v.z < boxMin.z || v.z > boxMax.z) {
v.w = -1;
}
}
__global__ void gpu_maskPointCloud(float4* verts, const int width, const int height, const int* mask) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
int m = mask[index];
if (m == 0) {
verts[index].w = -1;
}
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_depthToVertices), dim3(grid),dim3(block), 0, 0, depthIn, vertOut, width, height, pp, fl, range);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_depthToVertices), dim3(grid),dim3(block), 0, 0, depthIn, vertOut, width, height, pp, fl, range, scale);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_depthToVertices<DepthType,5>), dim3(grid),dim3(block), 0, 0, depthIn, vertOut, width, height, calibrationParams, range);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range, const float scale) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_depthToVertices<DepthType,5>), dim3(grid),dim3(block), 0, 0, depthIn, vertOut, width, height, calibrationParams, range, scale);
}
void verticesToNormals(const float4 * vertIn, float4 * normOut, const int width, const int height) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_verticesToNormals), dim3(grid),dim3(block), 0, 0, vertIn,normOut,width,height);
}
void eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_eliminatePlane), dim3(grid),dim3(block), 0, 0, verts,norms,width,height,planeNormal,planeD,epsDist,epsNorm);
}
void cropBox(float4 * verts, const int width, const int height, const float3 & boxMin, const float3 & boxMax) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_cropBox), dim3(grid),dim3(block), 0, 0, verts,width,height,boxMin,boxMax);
}
void maskPointCloud(float4 * verts, const int width, const int height, const int * mask) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
hipLaunchKernelGGL(( gpu_maskPointCloud), dim3(grid),dim3(block), 0, 0, verts,width,height,mask);
}
#define COMPILE_DEPTH_TYPE(type) \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range, const float scale);
COMPILE_DEPTH_TYPE(float)
COMPILE_DEPTH_TYPE(ushort)
}
|
67a8f8a9d4d20aea62a1a634c1718cc127aa8391.cu
|
#include "organized_point_cloud.h"
#include <iostream>
#include <stdio.h>
#include "helper_math.h"
namespace dart {
// -=-=-=-=-=-=-=-=-=- kernels -=-=-=-=-=-=-=-=-=-
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float2 pp,
const float2 fl,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x),
(v - pp.y)*(depth/fl.y),
depth,
1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float2 pp,
const float2 fl,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
vertOut[index] = make_float4( (u - pp.x)*(depth/fl.x),
(v - pp.y)*(depth/fl.y),
depth,
1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float4 * Kinv,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
const float4 p = make_float4( u, v, depth, 1);
float4 vert = make_float4( dot(Kinv[0],p),
dot(Kinv[1],p),
dot(Kinv[2],p),
dot(Kinv[3],p));
vert /= vert.w;
vert.w = 1;
vert.z = -vert.z;
vertOut[index] = vert;
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float4 * Kinv,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
const float4 p = make_float4( u, v, depth, 1);
float4 vert = make_float4( dot(Kinv[0],p),
dot(Kinv[1],p),
dot(Kinv[2],p),
dot(Kinv[3],p));
vert /= vert.w;
vert.w = 1;
vert.z = -vert.z;
vertOut[index] = vert;
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType, int iters>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float * cameraParams,
const float2 range) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = depthIn[index];
if (depth >= range.x && depth <= range.y) {
// http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
const float &fx = cameraParams[0];
const float &fy = cameraParams[1];
const float &cx = cameraParams[2];
const float &cy = cameraParams[3];
const float &k1 = cameraParams[4];
const float &k2 = cameraParams[5];
const float &p1 = cameraParams[6];
const float &p2 = cameraParams[7];
const float &k3 = cameraParams[8];
float xp, yp, xpp, ypp;
xpp = xp = (u - cx) / fx;
ypp = yp = (v - cy) / fy;
#pragma unroll
for (int i=0; i<iters; ++i) {
float r2 = xp*xp + yp*yp;
float r4 = r2*r2;
float r6 = r4*r2;
float denom = 1 + k1*r2 + k2*r4 + k3*r6;
float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp);
float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp;
xp = (xpp - dxp)/denom;
yp = (ypp - dyp)/denom;
}
vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f);
}
else {
vertOut[index].w = 0;
}
}
template <typename DepthType, int iters>
__global__ void gpu_depthToVertices(const DepthType * depthIn,
float4 * vertOut,
const int width,
const int height,
const float * cameraParams,
const float2 range,
const float scale) {
const int u = blockIdx.x*blockDim.x + threadIdx.x;
const int v = blockIdx.y*blockDim.y + threadIdx.y;
const int index = u + v*width;
if (u >= width || v >= height)
return;
float depth = scale*depthIn[index];
if (depth >= range.x && depth <= range.y) {
// http://docs.opencv.org/modules/calib3d/doc/camera_calibration_and_3d_reconstruction.html
const float& fx = cameraParams[0];
const float& fy = cameraParams[1];
const float& cx = cameraParams[2];
const float& cy = cameraParams[3];
const float& k1 = cameraParams[4];
const float& k2 = cameraParams[5];
const float& p1 = cameraParams[6];
const float& p2 = cameraParams[7];
const float& k3 = cameraParams[8];
float xp, yp, xpp, ypp;
xpp = xp = (u - cx) / fx;
ypp = yp = (v - cy) / fy;
#pragma unroll
for (int i=0; i<iters; ++i) {
float r2 = xp*xp + yp*yp;
float r4 = r2*r2;
float r6 = r4*r2;
float denom = 1 + k1*r2 + k2*r4 + k3*r6;
float dxp = 2*p1*xp*yp + p2*(r2 + 2*xp*xp);
float dyp = p1*(r2 + 2*yp*yp) + 2*p2*xp*yp;
xp = (xpp - dxp)/denom;
yp = (ypp - dyp)/denom;
}
vertOut[index] = make_float4(xp*depth,yp*depth,depth,1.0f);
}
else {
vertOut[index].w = 0;
}
}
__global__ void gpu_verticesToNormals(const float4 * vertIn,
float4 * normOut,
const int width,
const int height) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
const float4 & v = vertIn[index];
// // don't process invalid vertices
if ( v.w == 0) {
normOut[index] = make_float4(0);
return;
}
const float4 & vLeft = vertIn[ x == 0 ? index : index-1];
const float4 & vRight = vertIn[ x == width-1 ? index : index+1];
const float4 & vUp = vertIn[ y == 0 ? index : index-width];
const float4 & vDown = vertIn[ y == height-1 ? index : index+width];
const float3 vX = make_float3( (vRight.w == 0 ? v : vRight) - (vLeft.w == 0 ? v : vLeft) );
const float3 vY = make_float3( (vDown.w == 0 ? v : vDown) - (vUp.w == 0 ? v : vUp) );
const float3 n = cross(vY,vX);
const float len2 = dot(n,n);
if (len2 > 0) {
const float invLen = 1.0f / (float)sqrtf(len2);
normOut[index] = make_float4(n.x*invLen,n.y*invLen,n.z*invLen,1);
}
else {
normOut[index] = make_float4(0);
}
}
__global__ void gpu_eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
// check vertex validity
float4& v = verts[index];
if ( v.w == 0) {
return;
}
// check normal threshold
const float4& n = norms[index];
if (dot(make_float3(n),planeNormal) < epsNorm) {
return;
}
// check distance threshold
if (abs(dot(make_float3(v),planeNormal) - planeD) < epsDist ) {
v.w = -1;
}
}
__global__ void gpu_cropBox(float4 * verts, const int width, const int height, const float3 boxMin, const float3 boxMax) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
// check vertex validity
float4& v = verts[index];
if ( v.w == 0) {
return;
}
if (v.x < boxMin.x || v.x > boxMax.x ||
v.y < boxMin.y || v.y > boxMax.y ||
v.z < boxMin.z || v.z > boxMax.z) {
v.w = -1;
}
}
__global__ void gpu_maskPointCloud(float4* verts, const int width, const int height, const int* mask) {
const int x = blockIdx.x*blockDim.x + threadIdx.x;
const int y = blockIdx.y*blockDim.y + threadIdx.y;
if (x >= width || y >= height)
return;
const int index = x + y*width;
int m = mask[index];
if (m == 0) {
verts[index].w = -1;
}
}
// -=-=-=-=-=-=-=-=-=- interface -=-=-=-=-=-=-=-=-=-
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_depthToVertices<<<grid,block>>>(depthIn, vertOut, width, height, pp, fl, range);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_depthToVertices<<<grid,block>>>(depthIn, vertOut, width, height, pp, fl, range, scale);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_depthToVertices<DepthType,5><<<grid,block>>>(depthIn, vertOut, width, height, calibrationParams, range);
}
template <typename DepthType>
void depthToVertices(const DepthType * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationParams, const float2 range, const float scale) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_depthToVertices<DepthType,5><<<grid,block>>>(depthIn, vertOut, width, height, calibrationParams, range, scale);
}
void verticesToNormals(const float4 * vertIn, float4 * normOut, const int width, const int height) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_verticesToNormals<<<grid,block>>>(vertIn,normOut,width,height);
}
void eliminatePlane(float4 * verts, const float4 * norms, const int width, const int height, const float3 planeNormal, const float planeD, const float epsDist, const float epsNorm) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_eliminatePlane<<<grid,block>>>(verts,norms,width,height,planeNormal,planeD,epsDist,epsNorm);
}
void cropBox(float4 * verts, const int width, const int height, const float3 & boxMin, const float3 & boxMax) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_cropBox<<<grid,block>>>(verts,width,height,boxMin,boxMax);
}
void maskPointCloud(float4 * verts, const int width, const int height, const int * mask) {
dim3 block(16,8,1);
dim3 grid( ceil( width / (float)block.x), ceil( height / (float)block.y ));
gpu_maskPointCloud<<<grid,block>>>(verts,width,height,mask);
}
#define COMPILE_DEPTH_TYPE(type) \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float2 pp, const float2 fl, const float2 range, const float scale); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range); \
template void depthToVertices<type>(const type * depthIn, float4 * vertOut, const int width, const int height, const float * calibrationparams, const float2 range, const float scale);
COMPILE_DEPTH_TYPE(float)
COMPILE_DEPTH_TYPE(ushort)
}
|
99fe488d92e89577712d91a655469b5603f496dd.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <openssl/md5.h>
#include "d_cracker.h"
#include "CHECK.h"
#include "config.h"
#include "wrappers.h"
//prototype for the kernel
__global__ void d_crack_kernel(unsigned char * hash, int hashLen,
int length, unsigned char * d_result,
int d_result_size);
__device__ int d_powerOf(int val, int size);
//constant array containing all the possible characters in the password
__constant__ char VALID_CHARS[NUMCHARS];
/*malloccmp
* a compare like function that compares two strings of length. It simply
* compares the elements at each location.
*
* @params:
* str1 - an unsigned char pointer to the first character in string 1
* str2 - an unsigned char pointer to the first character in string 2
* length - the length of str1 and str2, the number of items compared.
*/
int malloccmp(unsigned char * str1, unsigned char * str2, int length) {
for (int i = 0; i < length; i++) {
if (str1[i] != str2[i]) {
return 0;
}
}
return 1;
}
/*printHash
* prints len items starting from hash as hexadecimal. Used to print hashes as
* hex.
*
* @params:
* hash - a pointer to the start of the hash to print
* len - the number of items to print from hash.
*/
void printHash(unsigned char * hash, int len) {
for (int k = 0; k < len; k++) {
printf("%x", hash[k]);
if (k == len - 1) {
printf("\n");
}
}
}
/*printPassword
* prints len characters of a string starting at pass. Used to print the result
* password.
*
* @params:
* pass - an unsigned char pointer to the first element in the string to print
* len - the number of items to print.
*/
void printPassword(unsigned char * pass, int len) {
for (int k = 0; k < len; k++) {
printf("%s", (unsigned char *) &pass[k]);
if (k == len - 1) {
printf("\n");
}
}
}
/*d_crack
*
* Sets up and calls the kernal to brute-force a password hash.
*
* @params
* hash - the password hash to brute-force
* hashLen - the length of the hash
* outpass - the result password to return
*/
float d_crack(unsigned char * hash, int hashLen, unsigned char * outpass) {
hipEvent_t start_cpu, stop_cpu;
float cpuMsecTime = -1;
//Use cuda functions to do the timing
//create event objects
CHECK(hipEventCreate(&start_cpu));
CHECK(hipEventCreate(&stop_cpu));
//record the starting time
CHECK(hipEventRecord(start_cpu));
int passLength = 3;
int size = hashLen * sizeof(char);
int outsize = MAX_PASSWORD_LENGTH * sizeof(char);
int passoutsize = pow(NUMCHARS, passLength) * (passLength + 1);
unsigned char * d_hash;
CHECK(hipMalloc((void**)&d_hash, size));
unsigned char * d_passwords;
CHECK(hipMalloc((void**)&d_passwords, passoutsize));
unsigned char * d_result;
CHECK(hipMalloc((void**)&d_result, outsize));
//build the const array of all lowercase characters
char VALID_CHARS_CPU[NUMCHARS];
for (int i = 0; i < NUMCHARS; i++) {
VALID_CHARS_CPU[i] = (char)(i + 97);
}
CHECK(hipMemcpyToSymbol(VALID_CHARS, VALID_CHARS_CPU, NUMCHARS * sizeof(char)));
CHECK(hipMemcpy(d_hash, hash, size, hipMemcpyHostToDevice));
dim3 block(NUMCHARS, 1, 1);
dim3 grid(ceil(pow(NUMCHARS, passLength)/(float)(NUMCHARS)), 1);
hipLaunchKernelGGL(( d_crack_kernel), dim3(grid), dim3(block), 0, 0, d_hash, hashLen, passLength, d_passwords, passoutsize);
CHECK(hipDeviceSynchronize());
unsigned char * passwords = (unsigned char *) Malloc(passoutsize);
CHECK(hipMemcpy(passwords, d_passwords, passoutsize, hipMemcpyDeviceToHost));
unsigned char * hashes = (unsigned char *) Malloc(pow(NUMCHARS, passLength) * hashLen);
int j = 0;
for (int i = 0; i < passoutsize; i+=(passLength + 1)) { //+ 1 corrects for null pointer
//if (i < 17000)
// printf("i: %d, s: %s\n", i, (unsigned char *) &passwords[i]); // print out generated passwords for debugging
// printf("%lu", (unsigned long) passLength);
MD5_CTX md5;
MD5_Init(&md5);
MD5_Update(&md5, &(passwords[i]), (unsigned long) passLength);
MD5_Final(&hashes[j], &md5);
if (malloccmp(&passwords[i], (unsigned char *) "pas", passLength)) {
printHash(&hashes[j], hashLen);
}
j += hashLen;
}
//NOTE: Hashes are forming correctly and we are getting the correct hash for input = 2
unsigned char * ourHash = (unsigned char *) Malloc(hashLen);
int numHashes = pow(NUMCHARS, passLength) * hashLen;
int z = 0;
for (int i = 0; i < numHashes; i+=hashLen) {
// printHash(hash, hashLen);
for (int j = 0; j < hashLen; j++) {
ourHash[j] = hashes[i + j];
}
// printHash(&hashes[i], hashLen);
// printf("%d\n", malloccmp(ourHash, hash, hashLen));
if (malloccmp(ourHash, hash, hashLen)) {
//TODO: Break here, we found the password
printf("Password: ");
printPassword(&passwords[z], 1);
}
for (int k = 0; k < hashLen; k++) {
ourHash[k] = '\0';
}
z +=(passLength + 1);
}
// for (int i = 0; i < pow(NUMCHARS, passLength) * hashLen; i++) {
// ourHash[i % hashLen] = hashes[i];
// if (((i % hashLen) == 0) && malloccmp(ourHash, hashes, hashLen)) {
// printf("Hello");
// for (int j = 0; j < hashLen; j++) {
// printf("%x", ourHash[j]);
// if (j == hashLen - 1) {
// printf("\n");
// }
// }
// for (int k = 0; k < hashLen; k++) {
// ourHash[k] = '\0';
// }
// }
// }
CHECK(hipMemcpy(outpass, d_result, outsize, hipMemcpyDeviceToHost));
CHECK(hipFree(d_hash));
CHECK(hipFree(d_result));
free(ourHash);
free(passwords);
free(hashes);
//record the ending time and wait for event to complete
CHECK(hipEventRecord(stop_cpu));
CHECK(hipEventSynchronize(stop_cpu));
//calculate the elapsed time between the two events
CHECK(hipEventElapsedTime(&cpuMsecTime, start_cpu, stop_cpu));
return cpuMsecTime;
}
/*d_crack_kernel
* Kernel code executed by each thread on its own data when the kernel is
* launched. Constant memory is used for the set of all possible characters,
* in this case, lowercase.
* Threads cooperate to help build a possible password built from the Constant
* character array.
* @params:
* hash - array filled with characters to crack.
* hashLen - length of the given hash
* length - the length of the passwords to generate
* d_result - array of possible passwords.
*/
__global__ void d_crack_kernel(unsigned char * hash, int hashLen, int length,
unsigned char * d_result, int d_result_size) {
// printf("blockIdx: %d, blockDim: %d, threadIdx: %d, blockDim mod length: %d\n", blockIdx.x, blockDim.x, threadIdx.x, blockDim.x % length);
int index = (blockIdx.x * blockDim.x + threadIdx.x) * (length + 1);
int t = blockIdx.x * blockDim.x + threadIdx.x;
int inner_index = gridDim.x;
// if (index == 0 || index == 26 ||index == 52) {
// printf("inner index: %d\n", inner_index);
// }
int powerSize = 0;
for (int i = (length - 1); i >= 0; i--) {
if ( i <= (length - 1) - 2) {
// printf("power of: %d", d_powerOf(NUMCHARS, powerSize));
d_result[index] = VALID_CHARS[blockIdx.x / d_powerOf(NUMCHARS, powerSize)];
powerSize++;
} else if ( i == (length - 1) - 1) {
d_result[index + i] = VALID_CHARS[blockIdx.x % NUMCHARS];
} else {
d_result[index + i] = VALID_CHARS[threadIdx.x];
}
// d_result[index + i] = VALID_CHARS[((blockIdx.x * (length - 1)) + (t % NUMCHARS)) % NUMCHARS];
// inner_index /= NUMCHARS;
}
// 4 characters
//d_result[index] = VALID_CHARS[blockIdx.x / (NUMCHARS * NUMCHARS)];
// d_result[index] = VALID_CHARS[blockIdx.x / NUMCHARS];
// d_result[index + 1] = VALID_CHARS[blockIdx.x % NUMCHARS];
// d_result[index + (length - 1)] = VALID_CHARS[threadIdx.x];
d_result[index + (length)] = '\0';
}
__device__ int d_powerOf(int val, int size) {
for (int i = 0; i < size; i++) {
val *= val;
}
return val;
}
|
99fe488d92e89577712d91a655469b5603f496dd.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
#include <cuda_runtime.h>
#include <openssl/md5.h>
#include "d_cracker.h"
#include "CHECK.h"
#include "config.h"
#include "wrappers.h"
//prototype for the kernel
__global__ void d_crack_kernel(unsigned char * hash, int hashLen,
int length, unsigned char * d_result,
int d_result_size);
__device__ int d_powerOf(int val, int size);
//constant array containing all the possible characters in the password
__constant__ char VALID_CHARS[NUMCHARS];
/*malloccmp
* a compare like function that compares two strings of length. It simply
* compares the elements at each location.
*
* @params:
* str1 - an unsigned char pointer to the first character in string 1
* str2 - an unsigned char pointer to the first character in string 2
* length - the length of str1 and str2, the number of items compared.
*/
int malloccmp(unsigned char * str1, unsigned char * str2, int length) {
for (int i = 0; i < length; i++) {
if (str1[i] != str2[i]) {
return 0;
}
}
return 1;
}
/*printHash
* prints len items starting from hash as hexadecimal. Used to print hashes as
* hex.
*
* @params:
* hash - a pointer to the start of the hash to print
* len - the number of items to print from hash.
*/
void printHash(unsigned char * hash, int len) {
for (int k = 0; k < len; k++) {
printf("%x", hash[k]);
if (k == len - 1) {
printf("\n");
}
}
}
/*printPassword
* prints len characters of a string starting at pass. Used to print the result
* password.
*
* @params:
* pass - an unsigned char pointer to the first element in the string to print
* len - the number of items to print.
*/
void printPassword(unsigned char * pass, int len) {
for (int k = 0; k < len; k++) {
printf("%s", (unsigned char *) &pass[k]);
if (k == len - 1) {
printf("\n");
}
}
}
/*d_crack
*
* Sets up and calls the kernal to brute-force a password hash.
*
* @params
* hash - the password hash to brute-force
* hashLen - the length of the hash
* outpass - the result password to return
*/
float d_crack(unsigned char * hash, int hashLen, unsigned char * outpass) {
cudaEvent_t start_cpu, stop_cpu;
float cpuMsecTime = -1;
//Use cuda functions to do the timing
//create event objects
CHECK(cudaEventCreate(&start_cpu));
CHECK(cudaEventCreate(&stop_cpu));
//record the starting time
CHECK(cudaEventRecord(start_cpu));
int passLength = 3;
int size = hashLen * sizeof(char);
int outsize = MAX_PASSWORD_LENGTH * sizeof(char);
int passoutsize = pow(NUMCHARS, passLength) * (passLength + 1);
unsigned char * d_hash;
CHECK(cudaMalloc((void**)&d_hash, size));
unsigned char * d_passwords;
CHECK(cudaMalloc((void**)&d_passwords, passoutsize));
unsigned char * d_result;
CHECK(cudaMalloc((void**)&d_result, outsize));
//build the const array of all lowercase characters
char VALID_CHARS_CPU[NUMCHARS];
for (int i = 0; i < NUMCHARS; i++) {
VALID_CHARS_CPU[i] = (char)(i + 97);
}
CHECK(cudaMemcpyToSymbol(VALID_CHARS, VALID_CHARS_CPU, NUMCHARS * sizeof(char)));
CHECK(cudaMemcpy(d_hash, hash, size, cudaMemcpyHostToDevice));
dim3 block(NUMCHARS, 1, 1);
dim3 grid(ceil(pow(NUMCHARS, passLength)/(float)(NUMCHARS)), 1);
d_crack_kernel<<<grid, block>>>(d_hash, hashLen, passLength, d_passwords, passoutsize);
CHECK(cudaDeviceSynchronize());
unsigned char * passwords = (unsigned char *) Malloc(passoutsize);
CHECK(cudaMemcpy(passwords, d_passwords, passoutsize, cudaMemcpyDeviceToHost));
unsigned char * hashes = (unsigned char *) Malloc(pow(NUMCHARS, passLength) * hashLen);
int j = 0;
for (int i = 0; i < passoutsize; i+=(passLength + 1)) { //+ 1 corrects for null pointer
//if (i < 17000)
// printf("i: %d, s: %s\n", i, (unsigned char *) &passwords[i]); // print out generated passwords for debugging
// printf("%lu", (unsigned long) passLength);
MD5_CTX md5;
MD5_Init(&md5);
MD5_Update(&md5, &(passwords[i]), (unsigned long) passLength);
MD5_Final(&hashes[j], &md5);
if (malloccmp(&passwords[i], (unsigned char *) "pas", passLength)) {
printHash(&hashes[j], hashLen);
}
j += hashLen;
}
//NOTE: Hashes are forming correctly and we are getting the correct hash for input = 2
unsigned char * ourHash = (unsigned char *) Malloc(hashLen);
int numHashes = pow(NUMCHARS, passLength) * hashLen;
int z = 0;
for (int i = 0; i < numHashes; i+=hashLen) {
// printHash(hash, hashLen);
for (int j = 0; j < hashLen; j++) {
ourHash[j] = hashes[i + j];
}
// printHash(&hashes[i], hashLen);
// printf("%d\n", malloccmp(ourHash, hash, hashLen));
if (malloccmp(ourHash, hash, hashLen)) {
//TODO: Break here, we found the password
printf("Password: ");
printPassword(&passwords[z], 1);
}
for (int k = 0; k < hashLen; k++) {
ourHash[k] = '\0';
}
z +=(passLength + 1);
}
// for (int i = 0; i < pow(NUMCHARS, passLength) * hashLen; i++) {
// ourHash[i % hashLen] = hashes[i];
// if (((i % hashLen) == 0) && malloccmp(ourHash, hashes, hashLen)) {
// printf("Hello");
// for (int j = 0; j < hashLen; j++) {
// printf("%x", ourHash[j]);
// if (j == hashLen - 1) {
// printf("\n");
// }
// }
// for (int k = 0; k < hashLen; k++) {
// ourHash[k] = '\0';
// }
// }
// }
CHECK(cudaMemcpy(outpass, d_result, outsize, cudaMemcpyDeviceToHost));
CHECK(cudaFree(d_hash));
CHECK(cudaFree(d_result));
free(ourHash);
free(passwords);
free(hashes);
//record the ending time and wait for event to complete
CHECK(cudaEventRecord(stop_cpu));
CHECK(cudaEventSynchronize(stop_cpu));
//calculate the elapsed time between the two events
CHECK(cudaEventElapsedTime(&cpuMsecTime, start_cpu, stop_cpu));
return cpuMsecTime;
}
/*d_crack_kernel
* Kernel code executed by each thread on its own data when the kernel is
* launched. Constant memory is used for the set of all possible characters,
* in this case, lowercase.
* Threads cooperate to help build a possible password built from the Constant
* character array.
* @params:
* hash - array filled with characters to crack.
* hashLen - length of the given hash
* length - the length of the passwords to generate
* d_result - array of possible passwords.
*/
__global__ void d_crack_kernel(unsigned char * hash, int hashLen, int length,
unsigned char * d_result, int d_result_size) {
// printf("blockIdx: %d, blockDim: %d, threadIdx: %d, blockDim mod length: %d\n", blockIdx.x, blockDim.x, threadIdx.x, blockDim.x % length);
int index = (blockIdx.x * blockDim.x + threadIdx.x) * (length + 1);
int t = blockIdx.x * blockDim.x + threadIdx.x;
int inner_index = gridDim.x;
// if (index == 0 || index == 26 ||index == 52) {
// printf("inner index: %d\n", inner_index);
// }
int powerSize = 0;
for (int i = (length - 1); i >= 0; i--) {
if ( i <= (length - 1) - 2) {
// printf("power of: %d", d_powerOf(NUMCHARS, powerSize));
d_result[index] = VALID_CHARS[blockIdx.x / d_powerOf(NUMCHARS, powerSize)];
powerSize++;
} else if ( i == (length - 1) - 1) {
d_result[index + i] = VALID_CHARS[blockIdx.x % NUMCHARS];
} else {
d_result[index + i] = VALID_CHARS[threadIdx.x];
}
// d_result[index + i] = VALID_CHARS[((blockIdx.x * (length - 1)) + (t % NUMCHARS)) % NUMCHARS];
// inner_index /= NUMCHARS;
}
// 4 characters
//d_result[index] = VALID_CHARS[blockIdx.x / (NUMCHARS * NUMCHARS)];
// d_result[index] = VALID_CHARS[blockIdx.x / NUMCHARS];
// d_result[index + 1] = VALID_CHARS[blockIdx.x % NUMCHARS];
// d_result[index + (length - 1)] = VALID_CHARS[threadIdx.x];
d_result[index + (length)] = '\0';
}
__device__ int d_powerOf(int val, int size) {
for (int i = 0; i < size; i++) {
val *= val;
}
return val;
}
|
8fb28ec4a44afc961b72dd1ca7bcb21d565c4557.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <iostream>
#include <time.h>
#include <boost/lexical_cast.hpp>
#include <boost/program_options.hpp>
#include "GPUNet.h"
#include "GPUNetSettings.h"
#include "Net.h"
#include "NetData.h"
#include "NetTrainer.h"
#include "Profiler.h"
namespace boost_po = boost::program_options;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
hipError_t _m_cudaStat = value; \
if (_m_cudaStat != hipSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
hipGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} \
}
/**
* Profile network
*/
void profile(GPUNet &gnet, Net &net, NetData &d) {
NetTrainer nt(&net);
Profiler p(&gnet, &net, &nt);
p.set_iterations(100);
p.profile_feed_forward_v1_2(d);
p.profile_feed_forward_v2(d);
p.profile_cpu_feedforward(&d.get_training_dataset()->training_set[0]);
p.profile_backprop_v2(d);
p.profile_backprop_v3(d);
p.profile_cpu_backprop(&d.get_training_dataset()->training_set[d.get_training_dataset()->n_input+1]);
}
/**
* Test network
*/
void test(GPUNet &gnet, Net &net, NetData &d) {
gnet.init_from_net(net, d);
gnet.test_feed_forward(net, d);
gnet.test_backprop(net, d);
}
int main(int argc, char **argv) {
srand(time(NULL));
time_t start, stop;
float l_rate, momentum, t_set_pct, hidden_pct;
int max_epochs, save_freq;
std::string dset, netf, fbase;
boost_po::options_description desc("Allowed options");
desc.add_options()
("help,h", "produce help message")
("dataset,d", boost_po::value<std::string>(&dset), "data set file")
("loadnet,n", boost_po::value<std::string>(&netf), "load net file")
("profile,p", boost_po::bool_switch(), "profile GPU functions")
("validate,v", boost_po::bool_switch(), "validate GPU functions")
("test,t", boost_po::bool_switch(), "run test set, this will take a different random sampling to be the test set on every initialization")
("f_base,f", boost_po::value<std::string>(&fbase)->default_value("itr"), "base name of net file when writing, default = [itr]_#.txt")
("l_rate,r", boost_po::value<float>(&l_rate)->default_value(0.7), "learning rate, default = 0.7")
("hidden_pct,h", boost_po::value<float>(&hidden_pct)->default_value(2.0/3.0), "number of hidden nodes as percentage input nodes, default = 2.0/3.0")
("t_pct,c", boost_po::value<float>(&t_set_pct)->default_value(0.8), "percentage of dataset used for training, default = 0.8")
("momentum,m", boost_po::value<float>(&momentum)->default_value(0.9), "momentum, default = 0.9")
("batch,b", boost_po::bool_switch()->default_value(false), "batch update, default = 0 (false), will ignore momentum")
("max_epochs,e", boost_po::value<int>(&max_epochs)->default_value(1000), "max epochs, default = 1000")
("save_freq,s", boost_po::value<int>(&save_freq)->default_value(100), "save data every n epochs, default = 100")
("cpu", boost_po::bool_switch(), "run on CPU instead of GPU")
("reset", boost_po::bool_switch(), "reset all CUDA capable GPUs")
("parallel", boost_po::bool_switch(), "Run networks in parallel on CPU and GPU to compare")
;
boost_po::positional_options_description p;
p.add("dataset", -1);
boost_po::variables_map vm;
boost_po::store(boost_po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);
boost_po::notify(vm);
if (!vm.size()) {
std::cout << "Try: cuda_ann --help\n\n";
return 1;
}
if (vm.count("help")) {
std::cout << desc << std::endl;;
return 1;
}
if (vm["reset"].as<bool>()) {
int count;
CUDA_CHECK_RETURN(hipGetDeviceCount(&count));
for (int i = 0; i < count; i++) {
std::cout << "Resetting gpu: " << i << std::endl;
CUDA_CHECK_RETURN(hipSetDevice(i));
CUDA_CHECK_RETURN(hipDeviceReset());
}
return 1;
}
if (!vm.count("dataset")) {
std::cerr << "Must have dataset parameter" << std::endl;
return 1;
}
NetData d(t_set_pct);
if (!d.load_file(dset))
return 1; //if file did not load
//d.print_loaded_patterns_flatted();
bool net_loaded = false;
GPUNet gnet;
Net net;
if (vm.count("loadnet")) {
std::cout << "Using netfile to initialize" << std::endl;
net_loaded = gnet.load_netfile(netf);
} else {
//init normally
std::cout << "Using " << hidden_pct << " * " << "n_input for hidden nodes" << std::endl;
net.init(d.num_inputs(), d.num_targets(), hidden_pct);
gnet.init(d.num_inputs(), d.num_targets(), hidden_pct, GPUNetSettings::STANDARD);
gnet.alloc_host_mem();
gnet.alloc_dev_mem();
gnet.init_from_net(net, d);
}
// FeatureVector **dv;
// thrust::host_vector<FeatureVector*> hv;
// for (int i = 0; i < d.data.size(); ++i) {
// hv.push_back(d.data[i]);
// }
// gnet.copy_to_device_host_array_ptrs_biased(hv,&dv);
//
// float*d_set;
// gnet.copy_to_device(d.get_training_dataset()->training_set, d.get_training_dataset()->n_training, d.get_training_dataset()->fpp, &d_set);
//
// return 0;
bool train = true;
if (vm["validate"].as<bool>()) {
test(gnet, net, d);
train = false;
}
if (vm["profile"].as<bool>()) {
profile(gnet, net, d);
train = false;
}
if (vm["parallel"].as<bool>()) {
gnet.run_parallel(net, d);
train = false;
}
if (train) {
bool batching = vm["batch"].as<bool>();
if (batching)
std::cout << "Using batch learning mode" << std::endl;
else
std::cout << "Using stochastic learning mode" << std::endl;
if (vm["cpu"].as<bool>()) {
std::cout << "CPU flag set" << std::endl;
NetTrainer nt = NetTrainer(&net);
nt.set_stopping_conds(max_epochs, 97.5);
nt.set_training_params(l_rate, momentum, batching);
start = clock();
nt.train_net(d.get_training_dataset());
stop = clock();
std::cout << "CPU time: " << ((float)stop - start) / CLOCKS_PER_SEC << std::endl;
} else {
if (vm["test"].as<bool>()) {
gnet.run_test_set(d.get_training_dataset());
} else {
gnet.set_base_file_name(fbase);
gnet.set_save_frequency(save_freq);
gnet.set_training_params(l_rate, momentum, batching);
gnet.set_stopping_conds(max_epochs, 95.0);
start = clock();
gnet.train_net_sectioned(d.get_training_dataset());
stop = clock();
std::cout << "GPU time: " << ((float)stop - start) / CLOCKS_PER_SEC << std::endl;
}
}
}
// CUDA_CHECK_RETURN(hipDeviceReset());
// std::cout << "Device reset" << std::endl;
return 0;
}
|
8fb28ec4a44afc961b72dd1ca7bcb21d565c4557.cu
|
#include <cstdio>
#include <iostream>
#include <time.h>
#include <boost/lexical_cast.hpp>
#include <boost/program_options.hpp>
#include "GPUNet.h"
#include "GPUNetSettings.h"
#include "Net.h"
#include "NetData.h"
#include "NetTrainer.h"
#include "Profiler.h"
namespace boost_po = boost::program_options;
/**
* This macro checks return value of the CUDA runtime call and exits
* the application if the call failed.
*/
#define CUDA_CHECK_RETURN(value) { \
cudaError_t _m_cudaStat = value; \
if (_m_cudaStat != cudaSuccess) { \
fprintf(stderr, "Error %s at line %d in file %s\n", \
cudaGetErrorString(_m_cudaStat), __LINE__, __FILE__); \
exit(1); \
} \
}
/**
* Profile network
*/
void profile(GPUNet &gnet, Net &net, NetData &d) {
NetTrainer nt(&net);
Profiler p(&gnet, &net, &nt);
p.set_iterations(100);
p.profile_feed_forward_v1_2(d);
p.profile_feed_forward_v2(d);
p.profile_cpu_feedforward(&d.get_training_dataset()->training_set[0]);
p.profile_backprop_v2(d);
p.profile_backprop_v3(d);
p.profile_cpu_backprop(&d.get_training_dataset()->training_set[d.get_training_dataset()->n_input+1]);
}
/**
* Test network
*/
void test(GPUNet &gnet, Net &net, NetData &d) {
gnet.init_from_net(net, d);
gnet.test_feed_forward(net, d);
gnet.test_backprop(net, d);
}
int main(int argc, char **argv) {
srand(time(NULL));
time_t start, stop;
float l_rate, momentum, t_set_pct, hidden_pct;
int max_epochs, save_freq;
std::string dset, netf, fbase;
boost_po::options_description desc("Allowed options");
desc.add_options()
("help,h", "produce help message")
("dataset,d", boost_po::value<std::string>(&dset), "data set file")
("loadnet,n", boost_po::value<std::string>(&netf), "load net file")
("profile,p", boost_po::bool_switch(), "profile GPU functions")
("validate,v", boost_po::bool_switch(), "validate GPU functions")
("test,t", boost_po::bool_switch(), "run test set, this will take a different random sampling to be the test set on every initialization")
("f_base,f", boost_po::value<std::string>(&fbase)->default_value("itr"), "base name of net file when writing, default = [itr]_#.txt")
("l_rate,r", boost_po::value<float>(&l_rate)->default_value(0.7), "learning rate, default = 0.7")
("hidden_pct,h", boost_po::value<float>(&hidden_pct)->default_value(2.0/3.0), "number of hidden nodes as percentage input nodes, default = 2.0/3.0")
("t_pct,c", boost_po::value<float>(&t_set_pct)->default_value(0.8), "percentage of dataset used for training, default = 0.8")
("momentum,m", boost_po::value<float>(&momentum)->default_value(0.9), "momentum, default = 0.9")
("batch,b", boost_po::bool_switch()->default_value(false), "batch update, default = 0 (false), will ignore momentum")
("max_epochs,e", boost_po::value<int>(&max_epochs)->default_value(1000), "max epochs, default = 1000")
("save_freq,s", boost_po::value<int>(&save_freq)->default_value(100), "save data every n epochs, default = 100")
("cpu", boost_po::bool_switch(), "run on CPU instead of GPU")
("reset", boost_po::bool_switch(), "reset all CUDA capable GPUs")
("parallel", boost_po::bool_switch(), "Run networks in parallel on CPU and GPU to compare")
;
boost_po::positional_options_description p;
p.add("dataset", -1);
boost_po::variables_map vm;
boost_po::store(boost_po::command_line_parser(argc, argv).options(desc).positional(p).run(), vm);
boost_po::notify(vm);
if (!vm.size()) {
std::cout << "Try: cuda_ann --help\n\n";
return 1;
}
if (vm.count("help")) {
std::cout << desc << std::endl;;
return 1;
}
if (vm["reset"].as<bool>()) {
int count;
CUDA_CHECK_RETURN(cudaGetDeviceCount(&count));
for (int i = 0; i < count; i++) {
std::cout << "Resetting gpu: " << i << std::endl;
CUDA_CHECK_RETURN(cudaSetDevice(i));
CUDA_CHECK_RETURN(cudaDeviceReset());
}
return 1;
}
if (!vm.count("dataset")) {
std::cerr << "Must have dataset parameter" << std::endl;
return 1;
}
NetData d(t_set_pct);
if (!d.load_file(dset))
return 1; //if file did not load
//d.print_loaded_patterns_flatted();
bool net_loaded = false;
GPUNet gnet;
Net net;
if (vm.count("loadnet")) {
std::cout << "Using netfile to initialize" << std::endl;
net_loaded = gnet.load_netfile(netf);
} else {
//init normally
std::cout << "Using " << hidden_pct << " * " << "n_input for hidden nodes" << std::endl;
net.init(d.num_inputs(), d.num_targets(), hidden_pct);
gnet.init(d.num_inputs(), d.num_targets(), hidden_pct, GPUNetSettings::STANDARD);
gnet.alloc_host_mem();
gnet.alloc_dev_mem();
gnet.init_from_net(net, d);
}
// FeatureVector **dv;
// thrust::host_vector<FeatureVector*> hv;
// for (int i = 0; i < d.data.size(); ++i) {
// hv.push_back(d.data[i]);
// }
// gnet.copy_to_device_host_array_ptrs_biased(hv,&dv);
//
// float*d_set;
// gnet.copy_to_device(d.get_training_dataset()->training_set, d.get_training_dataset()->n_training, d.get_training_dataset()->fpp, &d_set);
//
// return 0;
bool train = true;
if (vm["validate"].as<bool>()) {
test(gnet, net, d);
train = false;
}
if (vm["profile"].as<bool>()) {
profile(gnet, net, d);
train = false;
}
if (vm["parallel"].as<bool>()) {
gnet.run_parallel(net, d);
train = false;
}
if (train) {
bool batching = vm["batch"].as<bool>();
if (batching)
std::cout << "Using batch learning mode" << std::endl;
else
std::cout << "Using stochastic learning mode" << std::endl;
if (vm["cpu"].as<bool>()) {
std::cout << "CPU flag set" << std::endl;
NetTrainer nt = NetTrainer(&net);
nt.set_stopping_conds(max_epochs, 97.5);
nt.set_training_params(l_rate, momentum, batching);
start = clock();
nt.train_net(d.get_training_dataset());
stop = clock();
std::cout << "CPU time: " << ((float)stop - start) / CLOCKS_PER_SEC << std::endl;
} else {
if (vm["test"].as<bool>()) {
gnet.run_test_set(d.get_training_dataset());
} else {
gnet.set_base_file_name(fbase);
gnet.set_save_frequency(save_freq);
gnet.set_training_params(l_rate, momentum, batching);
gnet.set_stopping_conds(max_epochs, 95.0);
start = clock();
gnet.train_net_sectioned(d.get_training_dataset());
stop = clock();
std::cout << "GPU time: " << ((float)stop - start) / CLOCKS_PER_SEC << std::endl;
}
}
}
// CUDA_CHECK_RETURN(cudaDeviceReset());
// std::cout << "Device reset" << std::endl;
return 0;
}
|
63c9d2bfcb8d14e330052f0e37f83f6757455af7.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S3_7.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(hipMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(hipMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
hipLaunchKernelGGL(( kernel_set_model_inital_conditions) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, *sv, num_volumes);
check_cuda_error( hipPeekAtLastError() );
hipDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(hipMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(hipMemcpy(stims_currents_device, stim_currents, stim_currents_size, hipMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(hipMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(hipMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, hipMemcpyHostToDevice));
}
hipLaunchKernelGGL(( solve_gpu) , dim3(GRID), dim3(BLOCK_SIZE), 0, 0, dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( hipPeekAtLastError() );
check_cuda_error(hipFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(hipFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3490205600339,0.00135187120264140,0.774277559740803,0.774162210916370,0.000180262376049146,0.482808906426101,0.00298656246258262,0.999998274173002,2.00542936224808e-08,1.94715845546440e-05,0.999770529718570,1.00670632095612,0.999986171358002,5.42217137753544e-05,0.708754032619395,10.1027225272363,139.361072406886};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.3398987053447,0.000381918179728744,0.000158723395768871,0.000548013804320374,0.267898492682970,0.127095160679920,0.215952777870270,5.02243260663008,0.0155665987836551,1.88126320054018,1096.74278442967,0.000566505435257165,0.362196376485733,0.0197187196315984,0.00390176834238508,4.78913871828123e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
63c9d2bfcb8d14e330052f0e37f83f6757455af7.cu
|
#include <stddef.h>
#include <stdint.h>
#include "model_gpu_utils.h"
#include "ten_tusscher_2004_epi_S3_7.h"
extern "C" SET_ODE_INITIAL_CONDITIONS_GPU(set_model_initial_conditions_gpu) {
print_to_stdout_and_file("Using ten Tusscher 2004 epi GPU model\n");
// execution configuration
const int GRID = (num_volumes + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t size = num_volumes*sizeof(real);
check_cuda_error(cudaMallocPitch((void **) &(*sv), &pitch_h, size, (size_t )NEQ));
check_cuda_error(cudaMemcpyToSymbol(pitch, &pitch_h, sizeof(size_t)));
kernel_set_model_inital_conditions <<<GRID, BLOCK_SIZE>>>(*sv, num_volumes);
check_cuda_error( cudaPeekAtLastError() );
cudaDeviceSynchronize();
return pitch_h;
}
extern "C" SOLVE_MODEL_ODES_GPU(solve_model_odes_gpu) {
// execution configuration
const int GRID = ((int)num_cells_to_solve + BLOCK_SIZE - 1)/BLOCK_SIZE;
size_t stim_currents_size = sizeof(real)*num_cells_to_solve;
size_t cells_to_solve_size = sizeof(uint32_t)*num_cells_to_solve;
real *stims_currents_device;
check_cuda_error(cudaMalloc((void **) &stims_currents_device, stim_currents_size));
check_cuda_error(cudaMemcpy(stims_currents_device, stim_currents, stim_currents_size, cudaMemcpyHostToDevice));
//the array cells to solve is passed when we are using and adapative mesh
uint32_t *cells_to_solve_device = NULL;
if(cells_to_solve != NULL) {
check_cuda_error(cudaMalloc((void **) &cells_to_solve_device, cells_to_solve_size));
check_cuda_error(cudaMemcpy(cells_to_solve_device, cells_to_solve, cells_to_solve_size, cudaMemcpyHostToDevice));
}
solve_gpu <<<GRID, BLOCK_SIZE>>>(dt, sv, stims_currents_device, cells_to_solve_device, num_cells_to_solve, num_steps);
check_cuda_error( cudaPeekAtLastError() );
check_cuda_error(cudaFree(stims_currents_device));
if(cells_to_solve_device) check_cuda_error(cudaFree(cells_to_solve_device));
}
__global__ void kernel_set_model_inital_conditions(real *sv, int num_volumes)
{
// Thread ID
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
if(threadID < num_volumes) {
/* *((real*)((char*)sv + pitch * 0) + threadID) = INITIAL_V; // V; millivolt
*((real*)((char*)sv + pitch * 1) + threadID) = 0.f; //M
*((real*)((char*)sv + pitch * 2) + threadID) = 0.75; //H
*((real*)((char*)sv + pitch * 3) + threadID) = 0.75f; //J
*((real*)((char*)sv + pitch * 4) + threadID) = 0.f; //Xr1
*((real*)((char*)sv + pitch * 5) + threadID) = 1.f; //Xr2
*((real*)((char*)sv + pitch * 6) + threadID) = 0.f; //Xs
*((real*)((char*)sv + pitch * 7) + threadID) = 1.f; //S
*((real*)((char*)sv + pitch * 8) + threadID) = 0.f; //R
*((real*)((char*)sv + pitch * 9) + threadID) = 0.f; //D
*((real*)((char*)sv + pitch * 10) + threadID) = 1.f; //F
*((real*)((char*)sv + pitch * 11) + threadID) = 1.f; //FCa
*((real*)((char*)sv + pitch * 12) + threadID) = 1.f; //G
*((real*)((char*)sv + pitch * 13) + threadID) = 0.0002; //Cai
*((real*)((char*)sv + pitch * 14) + threadID) = 0.2f; //CaSR
*((real*)((char*)sv + pitch * 15) + threadID) = 11.6f; //Nai
*((real*)((char*)sv + pitch * 16) + threadID) = 138.3f; //Ki
*/
// Elnaz's steady-state initial conditions
real sv_sst[]={-86.3490205600339,0.00135187120264140,0.774277559740803,0.774162210916370,0.000180262376049146,0.482808906426101,0.00298656246258262,0.999998274173002,2.00542936224808e-08,1.94715845546440e-05,0.999770529718570,1.00670632095612,0.999986171358002,5.42217137753544e-05,0.708754032619395,10.1027225272363,139.361072406886};
for (uint32_t i = 0; i < NEQ; i++)
*((real*)((char*)sv + pitch * i) + threadID) = sv_sst[i];
}
}
// Solving the model for each cell in the tissue matrix ni x nj
__global__ void solve_gpu(real dt, real *sv, real* stim_currents,
uint32_t *cells_to_solve, uint32_t num_cells_to_solve,
int num_steps)
{
int threadID = blockDim.x * blockIdx.x + threadIdx.x;
int sv_id;
// Each thread solves one cell model
if(threadID < num_cells_to_solve) {
if(cells_to_solve)
sv_id = cells_to_solve[threadID];
else
sv_id = threadID;
real rDY[NEQ];
for (int n = 0; n < num_steps; ++n) {
RHS_gpu(sv, rDY, stim_currents[threadID], sv_id, dt);
*((real*)((char*)sv) + sv_id) = dt*rDY[0] + *((real*)((char*)sv) + sv_id);
for(int i = 0; i < NEQ; i++) {
*((real*)((char*)sv + pitch * i) + sv_id) = rDY[i];
}
}
}
}
inline __device__ void RHS_gpu(real *sv, real *rDY_, real stim_current, int threadID_, real dt) {
// State variables
real svolt = *((real*)((char*)sv + pitch * 0) + threadID_);
real sm = *((real*)((char*)sv + pitch * 1) + threadID_);
real sh = *((real*)((char*)sv + pitch * 2) + threadID_);
real sj = *((real*)((char*)sv + pitch * 3) + threadID_);
real sxr1 = *((real*)((char*)sv + pitch * 4) + threadID_);
real sxr2 = *((real*)((char*)sv + pitch * 5) + threadID_);
real sxs = *((real*)((char*)sv + pitch * 6) + threadID_);
real ss = *((real*)((char*)sv + pitch * 7) + threadID_);
real sr = *((real*)((char*)sv + pitch * 8) + threadID_);
real sd = *((real*)((char*)sv + pitch * 9) + threadID_);
real sf = *((real*)((char*)sv + pitch * 10) + threadID_);
real sfca = *((real*)((char*)sv + pitch * 11) + threadID_);
real sg = *((real*)((char*)sv + pitch * 12) + threadID_);
real Cai = *((real*)((char*)sv + pitch * 13) + threadID_);
real CaSR = *((real*)((char*)sv + pitch * 14) + threadID_);
real Nai = *((real*)((char*)sv + pitch * 15) + threadID_);
real Ki = *((real*)((char*)sv + pitch * 16) + threadID_);
//External concentrations
real Ko=5.4;
real Cao=2.0;
real Nao=140.0;
//Intracellular volumes
real Vc=0.016404;
real Vsr=0.001094;
//Calcium dynamics
real Bufc=0.15f;
real Kbufc=0.001f;
real Bufsr=10.f;
real Kbufsr=0.3f;
real taufca=2.f;
real taug=2.f;
real Vmaxup=0.000425f;
real Kup=0.00025f;
//Constants
const real R = 8314.472f;
const real F = 96485.3415f;
const real T =310.0f;
real RTONF =(R*T)/F;
//Cellular capacitance
real CAPACITANCE=0.185;
//Parameters for currents
//Parameters for IKr
real Gkr=0.096;
//Parameters for Iks
real pKNa=0.03;
///#ifdef EPI
real Gks=0.245;
///#endif
///#ifdef ENDO
/// real Gks=0.245;
///#endif
///#ifdef MCELL
//real Gks=0.062;
///#endif
//Parameters for Ik1
real GK1=5.405;
//Parameters for Ito
///#ifdef EPI
real Gto=0.294;
///#endif
///#ifdef ENDO
/// real Gto=0.073;
///#endif
///#ifdef MCELL
/// real Gto=0.294;
///#endif
//Parameters for INa
real GNa=14.838;
//Parameters for IbNa
real GbNa=0.00029;
//Parameters for INaK
real KmK=1.0;
real KmNa=40.0;
real knak=1.362;
//Parameters for ICaL
real GCaL=0.000175;
//Parameters for IbCa
real GbCa=0.000592;
//Parameters for INaCa
real knaca=1000;
real KmNai=87.5;
real KmCa=1.38;
real ksat=0.1;
real n=0.35;
//Parameters for IpCa
real GpCa=0.825;
real KpCa=0.0005;
//Parameters for IpK;
real GpK=0.0146;
// Setting Elnaz's parameters
real parameters []={14.3398987053447,0.000381918179728744,0.000158723395768871,0.000548013804320374,0.267898492682970,0.127095160679920,0.215952777870270,5.02243260663008,0.0155665987836551,1.88126320054018,1096.74278442967,0.000566505435257165,0.362196376485733,0.0197187196315984,0.00390176834238508,4.78913871828123e-05};
GNa=parameters[0];
GbNa=parameters[1];
GCaL=parameters[2];
GbCa=parameters[3];
Gto=parameters[4];
Gkr=parameters[5];
Gks=parameters[6];
GK1=parameters[7];
GpK=parameters[8];
knak=parameters[9];
knaca=parameters[10];
Vmaxup=parameters[11];
GpCa=parameters[12];
real arel=parameters[13];
real crel=parameters[14];
real Vleak=parameters[15];
real IKr;
real IKs;
real IK1;
real Ito;
real INa;
real IbNa;
real ICaL;
real IbCa;
real INaCa;
real IpCa;
real IpK;
real INaK;
real Irel;
real Ileak;
real dNai;
real dKi;
real dCai;
real dCaSR;
real A;
// real BufferFactorc;
// real BufferFactorsr;
real SERCA;
real Caisquare;
real CaSRsquare;
real CaCurrent;
real CaSRCurrent;
real fcaold;
real gold;
real Ek;
real Ena;
real Eks;
real Eca;
real CaCSQN;
real bjsr;
real cjsr;
real CaBuf;
real bc;
real cc;
real Ak1;
real Bk1;
real rec_iK1;
real rec_ipK;
real rec_iNaK;
real AM;
real BM;
real AH_1;
real BH_1;
real AH_2;
real BH_2;
real AJ_1;
real BJ_1;
real AJ_2;
real BJ_2;
real M_INF;
real H_INF;
real J_INF;
real TAU_M;
real TAU_H;
real TAU_J;
real axr1;
real bxr1;
real axr2;
real bxr2;
real Xr1_INF;
real Xr2_INF;
real TAU_Xr1;
real TAU_Xr2;
real Axs;
real Bxs;
real Xs_INF;
real TAU_Xs;
real R_INF;
real TAU_R;
real S_INF;
real TAU_S;
real Ad;
real Bd;
real Cd;
real TAU_D;
real D_INF;
real TAU_F;
real F_INF;
real FCa_INF;
real G_INF;
real inverseVcF2=1/(2*Vc*F);
real inverseVcF=1./(Vc*F);
real Kupsquare=Kup*Kup;
// real BufcKbufc=Bufc*Kbufc;
// real Kbufcsquare=Kbufc*Kbufc;
// real Kbufc2=2*Kbufc;
// real BufsrKbufsr=Bufsr*Kbufsr;
// const real Kbufsrsquare=Kbufsr*Kbufsr;
// const real Kbufsr2=2*Kbufsr;
const real exptaufca=exp(-dt/taufca);
const real exptaug=exp(-dt/taug);
real sItot;
//Needed to compute currents
Ek=RTONF*(log((Ko/Ki)));
Ena=RTONF*(log((Nao/Nai)));
Eks=RTONF*(log((Ko+pKNa*Nao)/(Ki+pKNa*Nai)));
Eca=0.5*RTONF*(log((Cao/Cai)));
Ak1=0.1/(1.+exp(0.06*(svolt-Ek-200)));
Bk1=(3.*exp(0.0002*(svolt-Ek+100))+
exp(0.1*(svolt-Ek-10)))/(1.+exp(-0.5*(svolt-Ek)));
rec_iK1=Ak1/(Ak1+Bk1);
rec_iNaK=(1./(1.+0.1245*exp(-0.1*svolt*F/(R*T))+0.0353*exp(-svolt*F/(R*T))));
rec_ipK=1./(1.+exp((25-svolt)/5.98));
//Compute currents
INa=GNa*sm*sm*sm*sh*sj*(svolt-Ena);
ICaL=GCaL*sd*sf*sfca*4*svolt*(F*F/(R*T))*
(exp(2*svolt*F/(R*T))*Cai-0.341*Cao)/(exp(2*svolt*F/(R*T))-1.);
Ito=Gto*sr*ss*(svolt-Ek);
IKr=Gkr*sqrt(Ko/5.4)*sxr1*sxr2*(svolt-Ek);
IKs=Gks*sxs*sxs*(svolt-Eks);
IK1=GK1*rec_iK1*(svolt-Ek);
INaCa=knaca*(1./(KmNai*KmNai*KmNai+Nao*Nao*Nao))*(1./(KmCa+Cao))*
(1./(1+ksat*exp((n-1)*svolt*F/(R*T))))*
(exp(n*svolt*F/(R*T))*Nai*Nai*Nai*Cao-
exp((n-1)*svolt*F/(R*T))*Nao*Nao*Nao*Cai*2.5);
INaK=knak*(Ko/(Ko+KmK))*(Nai/(Nai+KmNa))*rec_iNaK;
IpCa=GpCa*Cai/(KpCa+Cai);
IpK=GpK*rec_ipK*(svolt-Ek);
IbNa=GbNa*(svolt-Ena);
IbCa=GbCa*(svolt-Eca);
//Determine total current
(sItot) = IKr +
IKs +
IK1 +
Ito +
INa +
IbNa +
ICaL +
IbCa +
INaK +
INaCa +
IpCa +
IpK +
stim_current;
//update concentrations
Caisquare=Cai*Cai;
CaSRsquare=CaSR*CaSR;
CaCurrent=-(ICaL+IbCa+IpCa-2.0f*INaCa)*inverseVcF2*CAPACITANCE;
/// A=0.016464f*CaSRsquare/(0.0625f+CaSRsquare)+0.008232f;
A=arel*CaSRsquare/(0.0625f+CaSRsquare)+crel;
Irel=A*sd*sg;
///Ileak=0.00008f*(CaSR-Cai);
Ileak=Vleak*(CaSR-Cai);
SERCA=Vmaxup/(1.f+(Kupsquare/Caisquare));
CaSRCurrent=SERCA-Irel-Ileak;
CaCSQN=Bufsr*CaSR/(CaSR+Kbufsr);
dCaSR=dt*(Vc/Vsr)*CaSRCurrent;
bjsr=Bufsr-CaCSQN-dCaSR-CaSR+Kbufsr;
cjsr=Kbufsr*(CaCSQN+dCaSR+CaSR);
CaSR=(sqrt(bjsr*bjsr+4.*cjsr)-bjsr)/2.;
CaBuf=Bufc*Cai/(Cai+Kbufc);
dCai=dt*(CaCurrent-CaSRCurrent);
bc=Bufc-CaBuf-dCai-Cai+Kbufc;
cc=Kbufc*(CaBuf+dCai+Cai);
Cai=(sqrt(bc*bc+4*cc)-bc)/2;
dNai=-(INa+IbNa+3*INaK+3*INaCa)*inverseVcF*CAPACITANCE;
Nai+=dt*dNai;
dKi=-(stim_current+IK1+Ito+IKr+IKs-2*INaK+IpK)*inverseVcF*CAPACITANCE;
Ki+=dt*dKi;
//compute steady state values and time constants
AM=1./(1.+exp((-60.-svolt)/5.));
BM=0.1/(1.+exp((svolt+35.)/5.))+0.10/(1.+exp((svolt-50.)/200.));
TAU_M=AM*BM;
M_INF=1./((1.+exp((-56.86-svolt)/9.03))*(1.+exp((-56.86-svolt)/9.03)));
if (svolt>=-40.)
{
AH_1=0.;
BH_1=(0.77/(0.13*(1.+exp(-(svolt+10.66)/11.1))));
TAU_H= 1.0/(AH_1+BH_1);
}
else
{
AH_2=(0.057*exp(-(svolt+80.)/6.8));
BH_2=(2.7*exp(0.079*svolt)+(3.1e5)*exp(0.3485*svolt));
TAU_H=1.0/(AH_2+BH_2);
}
H_INF=1./((1.+exp((svolt+71.55)/7.43))*(1.+exp((svolt+71.55)/7.43)));
if(svolt>=-40.)
{
AJ_1=0.;
BJ_1=(0.6*exp((0.057)*svolt)/(1.+exp(-0.1*(svolt+32.))));
TAU_J= 1.0/(AJ_1+BJ_1);
}
else
{
AJ_2=(((-2.5428e4)*exp(0.2444*svolt)-(6.948e-6)*
exp(-0.04391*svolt))*(svolt+37.78)/
(1.+exp(0.311*(svolt+79.23))));
BJ_2=(0.02424*exp(-0.01052*svolt)/(1.+exp(-0.1378*(svolt+40.14))));
TAU_J= 1.0/(AJ_2+BJ_2);
}
J_INF=H_INF;
Xr1_INF=1./(1.+exp((-26.-svolt)/7.));
axr1=450./(1.+exp((-45.-svolt)/10.));
bxr1=6./(1.+exp((svolt-(-30.))/11.5));
TAU_Xr1=axr1*bxr1;
Xr2_INF=1./(1.+exp((svolt-(-88.))/24.));
axr2=3./(1.+exp((-60.-svolt)/20.));
bxr2=1.12/(1.+exp((svolt-60.)/20.));
TAU_Xr2=axr2*bxr2;
Xs_INF=1./(1.+exp((-5.-svolt)/14.));
Axs=1100./(sqrt(1.+exp((-10.-svolt)/6)));
Bxs=1./(1.+exp((svolt-60.)/20.));
TAU_Xs=Axs*Bxs;
#ifdef EPI
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
#ifdef ENDO
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+28)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=1000.*exp(-(svolt+67)*(svolt+67)/1000.)+8.;
#endif
#ifdef MCELL
R_INF=1./(1.+exp((20-svolt)/6.));
S_INF=1./(1.+exp((svolt+20)/5.));
TAU_R=9.5*exp(-(svolt+40.)*(svolt+40.)/1800.)+0.8;
TAU_S=85.*exp(-(svolt+45.)*(svolt+45.)/320.)+5./(1.+exp((svolt-20.)/5.))+3.;
#endif
D_INF=1./(1.+exp((-5-svolt)/7.5));
Ad=1.4/(1.+exp((-35-svolt)/13))+0.25;
Bd=1.4/(1.+exp((svolt+5)/5));
Cd=1./(1.+exp((50-svolt)/20));
TAU_D=Ad*Bd+Cd;
F_INF=1./(1.+exp((svolt+20)/7));
TAU_F=1125*exp(-(svolt+27)*(svolt+27)/240)+80+165/(1.+exp((25-svolt)/10));
FCa_INF=(1./(1.+pow((Cai/0.000325),8))+
0.1/(1.+exp((Cai-0.0005)/0.0001))+
0.20/(1.+exp((Cai-0.00075)/0.0008))+
0.23 )/1.46;
if(Cai<0.00035)
G_INF=1./(1.+pow((Cai/0.00035),6));
else
G_INF=1./(1.+pow((Cai/0.00035),16));
//Update gates
rDY_[1] = M_INF-(M_INF-sm)*exp(-dt/TAU_M);
rDY_[2] = H_INF-(H_INF-sh)*exp(-dt/TAU_H);
rDY_[3] = J_INF-(J_INF-sj)*exp(-dt/TAU_J);
rDY_[4] = Xr1_INF-(Xr1_INF-sxr1)*exp(-dt/TAU_Xr1);
rDY_[5] = Xr2_INF-(Xr2_INF-sxr2)*exp(-dt/TAU_Xr2);
rDY_[6] = Xs_INF-(Xs_INF-sxs)*exp(-dt/TAU_Xs);
rDY_[7] = S_INF-(S_INF-ss)*exp(-dt/TAU_S);
rDY_[8] = R_INF-(R_INF-sr)*exp(-dt/TAU_R);
rDY_[9] = D_INF-(D_INF-sd)*exp(-dt/TAU_D);
rDY_[10] = F_INF-(F_INF-sf)*exp(-dt/TAU_F);
fcaold= sfca;
sfca = FCa_INF-(FCa_INF-sfca)*exptaufca;
if(sfca>fcaold && (svolt)>-37)
sfca = fcaold;
gold = sg;
sg = G_INF-(G_INF-sg)*exptaug;
if(sg>gold && (svolt)>-37)
sg=gold;
//update voltage
rDY_[0] = svolt + dt*(-sItot);
rDY_[11] = sfca;
rDY_[12] = sg;
rDY_[13] = Cai;
rDY_[14] = CaSR;
rDY_[15] = Nai;
rDY_[16] = Ki;
}
|
ef3bc23f9bde40c86d53b894aac39fa99f86497f.hip
|
// !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
|
ef3bc23f9bde40c86d53b894aac39fa99f86497f.cu
|
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<64, 32, 32>;
using WarpShape = cutlass::gemm::GemmShape<64, 32, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwish<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, true,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
74bfdc2c06b73af008d99e0b4bb156a7b36dac6d.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
template <int BLOCK_SIZE> __global__ void
matrixMul(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = 0;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
int main(void)
{
dim3 grid(300, 300);
dim3 threads(32, 32);
dim3 dimsA(300*32, 300*32, 1);
dim3 dimsB(300*32, 300*32, 1);
float* A, *B, *C;
hipLaunchKernelGGL(( matrixMul<32>), dim3(grid), dim3(threads), 0, 0, C, A, B, 0, 0);
}
|
74bfdc2c06b73af008d99e0b4bb156a7b36dac6d.cu
|
template <int BLOCK_SIZE> __global__ void
matrixMul(float* C, float* A, float* B, int wA, int wB)
{
int bx = blockIdx.x;
int by = blockIdx.y;
int tx = threadIdx.x;
int ty = threadIdx.y;
int aBegin = wA * BLOCK_SIZE * by;
int aEnd = aBegin + wA - 1;
int aStep = BLOCK_SIZE;
int bBegin = BLOCK_SIZE * bx;
int bStep = BLOCK_SIZE * wB;
float Csub = 0;
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep)
{
__shared__ float As[BLOCK_SIZE][BLOCK_SIZE];
__shared__ float Bs[BLOCK_SIZE][BLOCK_SIZE];
As[ty][tx] = A[a + wA * ty + tx];
Bs[ty][tx] = B[b + wB * ty + tx];
__syncthreads();
for (int k = 0; k < BLOCK_SIZE; ++k)
{
Csub += As[ty][k] * Bs[k][tx];
}
__syncthreads();
}
int c = wB * BLOCK_SIZE * by + BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
}
int main(void)
{
dim3 grid(300, 300);
dim3 threads(32, 32);
dim3 dimsA(300*32, 300*32, 1);
dim3 dimsB(300*32, 300*32, 1);
float* A, *B, *C;
matrixMul<32><<<grid, threads>>>(C, A, B, 0, 0);
}
|
53360759b5429983e8b4fce535fe716bbaf21eb0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
void displayMatrix(int* A, size_t M, size_t N);
__global__ void transposeKernel(int* A, int* B, int M, int N) {
int i_A = N * (blockDim.y * blockIdx.y + threadIdx.y) +
blockDim.x * blockIdx.x + threadIdx.x;
}
hipError_t transposeHost(int* h_A, int* h_B, int M, int N) {
hipError_t status = hipSuccess;
hipEvent_t start, finish;
hipEventCreate(&start);
hipEventCreate(&finish);
size_t size = M * N * sizeof(int);
size_t pitch;
float msecs = 0;
int* d_A;
int* d_B;
const int BLOCK_SIZE = 1024;
const int GRID_SIZE = (M - 1) / BLOCK_SIZE + 1;
dim3 Dim3Blocks(BLOCK_SIZE, BLOCK_SIZE);
dim3 Dim3Grids(N / BLOCK_SIZE, M / BLOCK_SIZE);
int i = 0, k = 0;
auto begin = std::chrono::high_resolution_clock::now();
while (i < M * N) {
for (int j = k; j < M * N; j += N) {
h_B[i++] = h_A[j];
}
k++;
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> cputime = end - begin;
std::cout << "CPU Elapsed Time: " << cputime.count() << " ms" << std::endl;
std::cout << "\n******* CPU *********\n";
displayMatrix(h_A, M, N);
displayMatrix(h_B, N, M);
std::cout << "\n******* CPU *********\n\n";
status = hipMalloc((void**)&d_A, size);
if (status != hipSuccess) {
std::cerr << "hipMalloc failed for d_A!\n";
goto Error;
}
status = hipMalloc((void**)&d_B, size);
if (status != hipSuccess) {
std::cerr << "hipMalloc failed for d_B!\n";
goto Error;
}
status = hipMemcpy(d_A, h_A, size, hipMemcpyHostToDevice);
if (status != hipSuccess) {
std::cerr << "hipMemcpy failed for h_A to d_A.\n";
goto Error;
}
/*
for (int i = 0; i < M * N; i++) {
h_B[i] = -1;
}
*/
hipEventRecord(start);
hipLaunchKernelGGL(( transposeKernel), dim3(Dim3Grids), dim3(Dim3Blocks), 0, 0, d_A, d_B, M, N);
hipDeviceSynchronize();
hipEventRecord(finish);
hipMemcpy(h_B, d_B, size, hipMemcpyDeviceToHost);
hipEventElapsedTime(&msecs, start, finish);
std::cout << "GPU(CUDA) Elapsed Time: " << msecs << "ms\n";
displayMatrix(h_B, N, M);
Error:
hipFree(d_A);
hipFree(d_B);
return status;
}
void displayMatrix(int* A, size_t M, size_t N) {
for (size_t i = 0; i < M * N; i++) {
if (i % N == 0)
std::cout << "\n";
std::cout << A[i] << " ";
}
std::cout << "\n";
}
int main(int argc, char** argv) {
if (argc == 3) {
int M = atoi(argv[1]);
int N = atoi(argv[2]);
std::cout << "M = " << M << ", N = " << N << "\n";
size_t size = M * N * sizeof(int);
int* h_A = (int*)malloc(size);
if (h_A == NULL) {
std::cerr << "Failed allocating memory for h_A!";
return 1;
}
int* h_B = (int*)malloc(size);
if (h_B == NULL) {
std::cerr << "Failed allocating memory for h_B!";
return 3;
}
for (int i = 0; i < M * N; i++) {
// h_A[i] = rand() % 100;
h_A[i] = i + 1;
}
hipError_t status = transposeHost(h_A, h_B, M, N);
if (status != hipSuccess) {
std::cerr << "transposeHost failed!\n";
return 1;
}
free(h_A);
free(h_B);
return 0;
}
}
|
53360759b5429983e8b4fce535fe716bbaf21eb0.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <device_launch_parameters.h>
#include <chrono>
#include <cstdlib>
#include <iostream>
void displayMatrix(int* A, size_t M, size_t N);
__global__ void transposeKernel(int* A, int* B, int M, int N) {
int i_A = N * (blockDim.y * blockIdx.y + threadIdx.y) +
blockDim.x * blockIdx.x + threadIdx.x;
}
cudaError_t transposeHost(int* h_A, int* h_B, int M, int N) {
cudaError_t status = cudaSuccess;
cudaEvent_t start, finish;
cudaEventCreate(&start);
cudaEventCreate(&finish);
size_t size = M * N * sizeof(int);
size_t pitch;
float msecs = 0;
int* d_A;
int* d_B;
const int BLOCK_SIZE = 1024;
const int GRID_SIZE = (M - 1) / BLOCK_SIZE + 1;
dim3 Dim3Blocks(BLOCK_SIZE, BLOCK_SIZE);
dim3 Dim3Grids(N / BLOCK_SIZE, M / BLOCK_SIZE);
int i = 0, k = 0;
auto begin = std::chrono::high_resolution_clock::now();
while (i < M * N) {
for (int j = k; j < M * N; j += N) {
h_B[i++] = h_A[j];
}
k++;
}
auto end = std::chrono::high_resolution_clock::now();
std::chrono::duration<double, std::milli> cputime = end - begin;
std::cout << "CPU Elapsed Time: " << cputime.count() << " ms" << std::endl;
std::cout << "\n******* CPU *********\n";
displayMatrix(h_A, M, N);
displayMatrix(h_B, N, M);
std::cout << "\n******* CPU *********\n\n";
status = cudaMalloc((void**)&d_A, size);
if (status != cudaSuccess) {
std::cerr << "cudaMalloc failed for d_A!\n";
goto Error;
}
status = cudaMalloc((void**)&d_B, size);
if (status != cudaSuccess) {
std::cerr << "cudaMalloc failed for d_B!\n";
goto Error;
}
status = cudaMemcpy(d_A, h_A, size, cudaMemcpyHostToDevice);
if (status != cudaSuccess) {
std::cerr << "cudaMemcpy failed for h_A to d_A.\n";
goto Error;
}
/*
for (int i = 0; i < M * N; i++) {
h_B[i] = -1;
}
*/
cudaEventRecord(start);
transposeKernel<<<Dim3Grids, Dim3Blocks>>>(d_A, d_B, M, N);
cudaDeviceSynchronize();
cudaEventRecord(finish);
cudaMemcpy(h_B, d_B, size, cudaMemcpyDeviceToHost);
cudaEventElapsedTime(&msecs, start, finish);
std::cout << "GPU(CUDA) Elapsed Time: " << msecs << "ms\n";
displayMatrix(h_B, N, M);
Error:
cudaFree(d_A);
cudaFree(d_B);
return status;
}
void displayMatrix(int* A, size_t M, size_t N) {
for (size_t i = 0; i < M * N; i++) {
if (i % N == 0)
std::cout << "\n";
std::cout << A[i] << " ";
}
std::cout << "\n";
}
int main(int argc, char** argv) {
if (argc == 3) {
int M = atoi(argv[1]);
int N = atoi(argv[2]);
std::cout << "M = " << M << ", N = " << N << "\n";
size_t size = M * N * sizeof(int);
int* h_A = (int*)malloc(size);
if (h_A == NULL) {
std::cerr << "Failed allocating memory for h_A!";
return 1;
}
int* h_B = (int*)malloc(size);
if (h_B == NULL) {
std::cerr << "Failed allocating memory for h_B!";
return 3;
}
for (int i = 0; i < M * N; i++) {
// h_A[i] = rand() % 100;
h_A[i] = i + 1;
}
cudaError_t status = transposeHost(h_A, h_B, M, N);
if (status != cudaSuccess) {
std::cerr << "transposeHost failed!\n";
return 1;
}
free(h_A);
free(h_B);
return 0;
}
}
|
bce37b8c3f1d912433bf7346fc15419a984a6ba9.hip
|
// !!! This is a file automatically generated by hipify!!!
//xfail:NOT_ALL_VERIFIED
//--blockDim=16 --gridDim=1 --no-inline
//
#include <hip/hip_runtime.h>
__global__ void foo()
{
__shared__ int A[16];
A[0] = threadIdx.x;
}
|
bce37b8c3f1d912433bf7346fc15419a984a6ba9.cu
|
//xfail:NOT_ALL_VERIFIED
//--blockDim=16 --gridDim=1 --no-inline
//
#include <cuda.h>
__global__ void foo()
{
__shared__ int A[16];
A[0] = threadIdx.x;
}
|
5c3862a0ad40a5bbe849013b3f988a0494a01887.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// TriggerSelection.cpp
// HiggsAnalysis_new
//
// Created by Joona Havukainen on 5/31/19.
// Copyright 2019 Joona Havukainen. All rights reserved.
//
__device__
bool L1METTrigger(float L1MET_x, float L1MET_y, float L1MET_cut)
{
float L1MET = sqrtf(powf(L1MET_x, 2.f)+powf(L1MET_y, 2.f));
return L1MET>L1MET_cut;
}
__global__
void triggerSelection(float *inputArray, bool *passedArray, bool *passed, float L1MetCut, int variablesPerEvent, int nEvents, int triggerIndex)
{
int processIndex = blockIdx.x * blockDim.x + threadIdx.x;
int localIndex = processIndex * variablesPerEvent;
if(processIndex<nEvents)
{
passedArray[processIndex]=((bool)inputArray[localIndex+triggerIndex+2] && L1METTrigger(inputArray[localIndex+triggerIndex+0], inputArray[localIndex+triggerIndex+1], L1MetCut));
passed[processIndex] = passed[processIndex] && passedArray[processIndex];
}
}
|
5c3862a0ad40a5bbe849013b3f988a0494a01887.cu
|
//
// TriggerSelection.cpp
// HiggsAnalysis_new
//
// Created by Joona Havukainen on 5/31/19.
// Copyright © 2019 Joona Havukainen. All rights reserved.
//
__device__
bool L1METTrigger(float L1MET_x, float L1MET_y, float L1MET_cut)
{
float L1MET = sqrtf(powf(L1MET_x, 2.f)+powf(L1MET_y, 2.f));
return L1MET>L1MET_cut;
}
__global__
void triggerSelection(float *inputArray, bool *passedArray, bool *passed, float L1MetCut, int variablesPerEvent, int nEvents, int triggerIndex)
{
int processIndex = blockIdx.x * blockDim.x + threadIdx.x;
int localIndex = processIndex * variablesPerEvent;
if(processIndex<nEvents)
{
passedArray[processIndex]=((bool)inputArray[localIndex+triggerIndex+2] && L1METTrigger(inputArray[localIndex+triggerIndex+0], inputArray[localIndex+triggerIndex+1], L1MetCut));
passed[processIndex] = passed[processIndex] && passedArray[processIndex];
}
}
|
644252350b5ffb7fbefdb4b7cc619bc90442658b.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <iostream>
__global__ void CUDAadd(int *a, int *b, int *c, unsigned int SizeOfArray)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < SizeOfArray)
{
c[i] = a[i] + b[i];
}
}
int main()
{
const unsigned int SizeOfArray = 5;
int a[SizeOfArray];
int b[SizeOfArray];
int c[SizeOfArray] = { 0 };
for (unsigned int i = 0; i < SizeOfArray; i++)
{
a[i] = i;
b[i] = i*10;
}
int *dev_a;
int *dev_b;
int *dev_c;
hipMalloc((void**)&dev_a, SizeOfArray * sizeof(int));
hipMalloc((void**)&dev_b, SizeOfArray * sizeof(int));
hipMalloc((void**)&dev_c, SizeOfArray * sizeof(int));
hipMemcpy(dev_a, a, SizeOfArray * sizeof(int), hipMemcpyHostToDevice);
hipMemcpy(dev_b, b, SizeOfArray * sizeof(int), hipMemcpyHostToDevice);
//This is creating the error:int numberOfBlocks = ceil(col / MaxThreadsPerBlock); // ceil is there just to be save
//this solves the problem as it dynamic changes size based on the size of the number of points.
int threadsPerBlock = 256;
int blocksPerGrid = ((SizeOfArray)+threadsPerBlock - 1) / threadsPerBlock;
//Block size may not exceed ~ 65000
for (; blocksPerGrid > 65000;)
{
threadsPerBlock *= 2;
blocksPerGrid = ((SizeOfArray)+threadsPerBlock - 1) / threadsPerBlock;
}
//blocksPerGrid, threadsPerBlock
CUDAadd << < blocksPerGrid, threadsPerBlock >> >(dev_a, dev_b, dev_c, SizeOfArray);
hipDeviceSynchronize();
hipMemcpy(c, dev_c, SizeOfArray * sizeof(int), hipMemcpyDeviceToHost);
for (unsigned int i = 0; i < SizeOfArray; i++)
{
std::cout << "c: " << c[i] << std::endl;;
}
hipFree(dev_a);
hipFree(dev_b);
hipFree(dev_c);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
|
644252350b5ffb7fbefdb4b7cc619bc90442658b.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <string>
#include <iostream>
__global__ void CUDAadd(int *a, int *b, int *c, unsigned int SizeOfArray)
{
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < SizeOfArray)
{
c[i] = a[i] + b[i];
}
}
int main()
{
const unsigned int SizeOfArray = 5;
int a[SizeOfArray];
int b[SizeOfArray];
int c[SizeOfArray] = { 0 };
for (unsigned int i = 0; i < SizeOfArray; i++)
{
a[i] = i;
b[i] = i*10;
}
int *dev_a;
int *dev_b;
int *dev_c;
cudaMalloc((void**)&dev_a, SizeOfArray * sizeof(int));
cudaMalloc((void**)&dev_b, SizeOfArray * sizeof(int));
cudaMalloc((void**)&dev_c, SizeOfArray * sizeof(int));
cudaMemcpy(dev_a, a, SizeOfArray * sizeof(int), cudaMemcpyHostToDevice);
cudaMemcpy(dev_b, b, SizeOfArray * sizeof(int), cudaMemcpyHostToDevice);
//This is creating the error:int numberOfBlocks = ceil(col / MaxThreadsPerBlock); // ceil is there just to be save
//this solves the problem as it dynamic changes size based on the size of the number of points.
int threadsPerBlock = 256;
int blocksPerGrid = ((SizeOfArray)+threadsPerBlock - 1) / threadsPerBlock;
//Block size may not exceed ~ 65000
for (; blocksPerGrid > 65000;)
{
threadsPerBlock *= 2;
blocksPerGrid = ((SizeOfArray)+threadsPerBlock - 1) / threadsPerBlock;
}
//blocksPerGrid, threadsPerBlock
CUDAadd << < blocksPerGrid, threadsPerBlock >> >(dev_a, dev_b, dev_c, SizeOfArray);
cudaDeviceSynchronize();
cudaMemcpy(c, dev_c, SizeOfArray * sizeof(int), cudaMemcpyDeviceToHost);
for (unsigned int i = 0; i < SizeOfArray; i++)
{
std::cout << "c: " << c[i] << std::endl;;
}
cudaFree(dev_a);
cudaFree(dev_b);
cudaFree(dev_c);
return 0;
}
// Helper function for using CUDA to add vectors in parallel.
|
1eb4759cb72fa412ac2a80232c332cedb6ed57ab.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlarft_kernels.cu normal z -> s, Tue Feb 9 16:05:34 2016
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define sgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ float shared_data[];
//===================================================================================================
static __device__
void slarft_gemvcolwise_device( int m, float *v, float *tau,
float *c, int ldc, float *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
float *dc = c + blockIdx.x * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_S_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_S_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_CONJ(sum[0]);
#else
tmp = - MAGMA_S_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_S_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_S_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_S_ZERO;
}
}
//===================================================================================================
__global__
void slarft_gemvcolwise_kernel( int m, float *v, int ldv, float *tau,
float *T, int ldt, int step )
{
slarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
//===================================================================================================
__global__
void slarft_gemvcolwise_kernel_batched( int m, float **v_array, int ldv, float **tau_array,
float **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
slarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemvcolwise(
magma_int_t m, magma_int_t step,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( slarft_gemvcolwise_kernel)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v, ldv, tau, T, ldt, step);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
float **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
hipLaunchKernelGGL(( slarft_gemvcolwise_kernel_batched)
, dim3(grid), dim3(threads), 0, queue->cuda_stream() ,
m, v_array, ldv, tau_array, T_array, ldt, step);
}
//===================================================================================================
//===================================================================================================
// sgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
slarft_gemvrowwise_device(
int m, int i,
float *tau,
float *v_ptr, int ldv,
float *x_ptr, int incx,
float *T_ptr, int ldt,
float *W, float* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
float res = MAGMA_S_ZERO;
v_ptr += ldv * ty;
if (tx < sgemv_bs)
{
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0] * (*tau);
}
#endif
}
//T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
//T(i,i) = tau(i)
//===================================================================================================
__global__ void
slarft_gemvrowwise_kernel(
int m, int i,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
float *W = T +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
//===================================================================================================
__global__ void
slarft_gemvrowwise_kernel_batched(
int m, int i,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
float *W = T_array[batchid] +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemvrowwise(
magma_int_t m, magma_int_t i,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
hipLaunchKernelGGL(( slarft_gemvrowwise_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau, v, ldv, T, ldt);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
/* sgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
hipLaunchKernelGGL(( slarft_gemvrowwise_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, i, tau_array, v_array, ldv, T_array, ldt);
}
//===================================================================================================
//===================================================================================================
/*
loop_inside
*/
static __device__ void
slarft_gemv_loop_inside_device(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
float *sdata = (float*)shared_data;
float res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
float *v_ptr = v;
v_ptr += i;
float *x_ptr = v_ptr + i * ldv;
res = MAGMA_S_ZERO;
if (tx < sgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * sgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * sgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
//===================================================================================================
__global__ void
slarft_gemv_loop_inside_kernel(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
slarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
//===================================================================================================
__global__ void
slarft_gemv_loop_inside_kernel_batched(
int n, int k,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
slarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
hipLaunchKernelGGL(( slarft_gemv_loop_inside_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau, v, ldv, T, ldt);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
hipLaunchKernelGGL(( slarft_gemv_loop_inside_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
n, k, tau_array, v_array, ldv, T_array, ldt);
}
//===================================================================================================
//===================================================================================================
static __device__ void
slarft_strmv_sm32x32_device(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_S_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
//===================================================================================================
__global__ void
slarft_strmv_sm32x32_kernel(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
slarft_strmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
//===================================================================================================
__global__ void
slarft_strmv_sm32x32_kernel_batched(
int n, int k, float **tau_array,
float **Tin_array, int ldtin, float **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
slarft_strmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
//===================================================================================================
//===================================================================================================
extern "C"
void magmablas_slarft_strmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Tin, magma_int_t ldtin,
float *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
hipLaunchKernelGGL(( slarft_strmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Tin, ldtin, Tout, ldtout);
}
//===================================================================================================
extern "C"
void magmablas_slarft_strmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Tin_array, magma_int_t ldtin,
float **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
hipLaunchKernelGGL(( slarft_strmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
//===================================================================================================
//===================================================================================================
//===================================================================================================
static __device__ void
slarft_recstrmv_sm32x32_device(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_S_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
//===================================================================================================
__global__ void
slarft_recstrmv_sm32x32_kernel(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
slarft_recstrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
//===================================================================================================
__global__ void
slarft_recstrmv_sm32x32_kernel_batched(
int m, int n, float **tau_array,
float **Trec_array, int ldtrec, float **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
slarft_recstrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
//===================================================================================================
extern "C"
void magmablas_slarft_recstrmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Trec, magma_int_t ldtrec,
float *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
hipLaunchKernelGGL(( slarft_recstrmv_sm32x32_kernel)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
//===================================================================================================
extern "C"
void magmablas_slarft_recstrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Trec_array, magma_int_t ldtrec,
float **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
hipLaunchKernelGGL(( slarft_recstrmv_sm32x32_kernel_batched)
, dim3(grid), dim3(threads), shmem, queue->cuda_stream() ,
m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
//===================================================================================================
|
1eb4759cb72fa412ac2a80232c332cedb6ed57ab.cu
|
/*
-- MAGMA (version 2.0.0) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
@date February 2016
@generated from magmablas/zlarft_kernels.cu normal z -> s, Tue Feb 9 16:05:34 2016
@author Azzam Haidar
*/
#include "magma_internal.h"
#include "magma_templates.h"
#define sgemv_bs 32
#define BLOCK_SIZE 512
#define use_gemm_larft
extern __shared__ float shared_data[];
//===================================================================================================
static __device__
void slarft_gemvcolwise_device( int m, float *v, float *tau,
float *c, int ldc, float *T, int ldt, int step )
{
const int thblk = blockIdx.x;
if (thblk > step)
return;
/* if blockIdx.x < step step performs the z = V(tx:n,tx)' * V(tx:n,1:tx-1) used for computing T:*/
if ( !MAGMA_S_EQUAL(*tau, MAGMA_S_ZERO) ) {
if (thblk < step) {
const int tx = threadIdx.x;
float *dc = c + blockIdx.x * ldc;
__shared__ float sum[ BLOCK_SIZE ];
float tmp;
/* perform {T_i}^H := V(:,i)' * V(:,1:i-1) */
if (tx == 0)
tmp = dc[0]; //since V[0] should be one
else
tmp = MAGMA_S_ZERO;
for( int j = tx+1; j < m; j += BLOCK_SIZE ) {
tmp += MAGMA_S_CONJ( v[j] ) * dc[j];
}
sum[tx] = tmp;
magma_sum_reduce< BLOCK_SIZE >( tx, sum );
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_CONJ(sum[0]);
#else
tmp = - MAGMA_S_CONJ(*tau) * sum[0];
*(T+thblk) = MAGMA_S_CONJ(tmp); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
//*(T+thblk) = - MAGMA_S_CONJ(sum[0]) * (*tau); // T = - tau(tx) * V(tx:n,1:tx-1)' * V(tx:n,tx) = tmp'
#endif
}
else {
#if defined (use_gemm_larft)
*(T+thblk) = MAGMA_S_ONE;
#else
*(T+thblk) = *tau;
#endif
}
}// in case tau is zero put the corresponding column of T to zero
else
{
*(T+thblk) = MAGMA_S_ZERO;
}
}
//===================================================================================================
__global__
void slarft_gemvcolwise_kernel( int m, float *v, int ldv, float *tau,
float *T, int ldt, int step )
{
slarft_gemvcolwise_device(m, v+step+step*ldv, tau+step, v+step, ldv, T+step*ldt, ldt, step);
}
//===================================================================================================
__global__
void slarft_gemvcolwise_kernel_batched( int m, float **v_array, int ldv, float **tau_array,
float **T_array, int ldt, int step )
{
int batchid = blockIdx.z;
slarft_gemvcolwise_device(m, v_array[batchid]+step+step*ldv, tau_array[batchid]+step, v_array[batchid]+step, ldv, T_array[batchid]+step*ldt, ldt, step);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemvcolwise(
magma_int_t m, magma_int_t step,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *tau,
magma_queue_t queue )
{
dim3 grid( step+1, 1, 1 );
dim3 threads( BLOCK_SIZE );
slarft_gemvcolwise_kernel
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v, ldv, tau, T, ldt, step);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemvcolwise_batched(
magma_int_t m, magma_int_t step,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
float **tau_array, magma_int_t batchCount, magma_queue_t queue )
{
dim3 grid( step+1, 1, batchCount );
dim3 threads( BLOCK_SIZE );
slarft_gemvcolwise_kernel_batched
<<< grid, threads, 0, queue->cuda_stream() >>>
( m, v_array, ldv, tau_array, T_array, ldt, step);
}
//===================================================================================================
//===================================================================================================
// sgemv(y=alpha*A*x) interface: T/W=tau*v*x,
static __device__ void
slarft_gemvrowwise_device(
int m, int i,
float *tau,
float *v_ptr, int ldv,
float *x_ptr, int incx,
float *T_ptr, int ldt,
float *W, float* sdata)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
if (tx == 0 && ty == 0)
{
T_ptr[0] = *tau;
}
if (i <= 0) return;
float res = MAGMA_S_ZERO;
v_ptr += ldv * ty;
if (tx < sgemv_bs)
{
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
#if defined (use_gemm_larft)
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0];
}
#else
if (tx == 0)
{
W[ty] = -sdata[ty * sgemv_bs + 0] * (*tau);
}
#endif
}
//T(1:i-1,i) := - tau(i) * V(i:n,1:i-1)' * V(i:n,i)
//T(i,i) = tau(i)
//===================================================================================================
__global__ void
slarft_gemvrowwise_kernel(
int m, int i,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
float *W = T +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau+i, v+i, ldv, v+i+i*ldv, 1,
T+i+i*ldt, ldt, W, sdata);
}
//===================================================================================================
__global__ void
slarft_gemvrowwise_kernel_batched(
int m, int i,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
float *W = T_array[batchid] +i*ldt;
float *sdata = (float*)shared_data;
slarft_gemvrowwise_device(m, i, tau_array[batchid]+i, v_array[batchid]+i, ldv, v_array[batchid]+i+i*ldv, 1,
T_array[batchid] +i+i*ldt, ldt, W, sdata);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemvrowwise(
magma_int_t m, magma_int_t i,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
float *W,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
slarft_gemvrowwise_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau, v, ldv, T, ldt);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemvrowwise_batched(
magma_int_t m, magma_int_t i,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(i,1), 1);
size_t shmem = sizeof(float)*sgemv_bs*(i+1);
/* sgemvrowwise used a bigger shared memory and has more data reuse and performs better
*/
slarft_gemvrowwise_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, i, tau_array, v_array, ldv, T_array, ldt);
}
//===================================================================================================
//===================================================================================================
/*
loop_inside
*/
static __device__ void
slarft_gemv_loop_inside_device(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
int tx = threadIdx.x;
int ty = threadIdx.y;
int incx = 1;
float *sdata = (float*)shared_data;
float res;
// write the first elment
if (tx == 0 && ty == 0)
{
T[0] = tau[0];
}
for (int i=1; i < k; i++)
{
int m = n-i;
float *v_ptr = v;
v_ptr += i;
float *x_ptr = v_ptr + i * ldv;
res = MAGMA_S_ZERO;
if (tx < sgemv_bs && ty < i)
{
v_ptr += ldv * ty;
for (int s=tx; s < m; s += sgemv_bs)
{
res += MAGMA_S_CONJ (v_ptr[s]) * x_ptr[s*incx];
}
sdata[ty * sgemv_bs + tx] = res;
}
__syncthreads();
magma_sum_reduce<sgemv_bs>(tx, &(sdata[ty*sgemv_bs+0]));
__syncthreads();
#if defined (use_gemm_larft)
if (tx < i && ty == 0)
{
T[i* ldt + tx] = sdata[tx * sgemv_bs + 0];
}
// not needed since it is overwritten in trmv
/*
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
*/
#else
if (tx < i && ty == 0)
{
T[i* ldt + tx] = -sdata[tx * sgemv_bs + 0] * (tau[i]);
}
if (tx == i && ty == 0)
{
T[i * ldt + i] = tau[i];
}
#endif
v_ptr -= i;
} // end of loop k
}
//===================================================================================================
__global__ void
slarft_gemv_loop_inside_kernel(
int n, int k,
float *tau,
float *v, int ldv,
float *T, int ldt)
{
slarft_gemv_loop_inside_device(n, k, tau, v, ldv, T, ldt);
}
//===================================================================================================
__global__ void
slarft_gemv_loop_inside_kernel_batched(
int n, int k,
float **tau_array,
float **v_array, int ldv,
float **T_array, int ldt)
{
int batchid = blockIdx.z;
slarft_gemv_loop_inside_device(n, k, tau_array[batchid], v_array[batchid], ldv, T_array[batchid], ldt);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemv_loop_inside(
magma_int_t n, magma_int_t k,
float *tau,
float *v, magma_int_t ldv,
float *T, magma_int_t ldt,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
slarft_gemv_loop_inside_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau, v, ldv, T, ldt);
}
//===================================================================================================
extern "C"
void magmablas_slarft_gemv_loop_inside_batched(
magma_int_t n, magma_int_t k,
float **tau_array,
float **v_array, magma_int_t ldv,
float **T_array, magma_int_t ldt, magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(sgemv_bs, max(k,1), 1);
size_t shmem = sizeof(float) * (sgemv_bs*(k+1));
slarft_gemv_loop_inside_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(n, k, tau_array, v_array, ldv, T_array, ldt);
}
//===================================================================================================
//===================================================================================================
static __device__ void
slarft_strmv_sm32x32_device(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// this routine apply a sequence of trmv to update k column of the triangular
// T starting at n-k to n where T is of size n by n and where the first n-k
// columns of T are supposed updated previously.
// So the routine load all of T nxn to the shared memory
// and apply the sequence of trmv.
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n-k; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#if defined(use_gemm_larft)
for (int s=n-k; s < n; s++)
{
if (tx == s)
sdata[tx + s*n] = tau[s];
else
sdata[tx + s*n] = -tau[s] * Tin[tx + s * ldtin];
}
#else
for (int s=n-k; s < n; s++)
{
sdata[tx + s*n] = Tin[tx + s * ldtin];
}
#endif
// perform trmv
for (int i=n-k; i < n; i++)
{
__syncthreads();
res = MAGMA_S_ZERO;
if (tx < i)
{
for (int j=tx; j < i; j++)
{
res += sdata[tx + j * n] * sdata[j+ i * n];
}
}
__syncthreads();
if (tx < i)
{
sdata[tx + i * n] = res;
}
}
__syncthreads();
// write back the updated block of k column of T
for (int s=n-k; s < n; s++)
{
Tout[tx + s * ldtout] = sdata[tx + s*n];
}
}
//===================================================================================================
__global__ void
slarft_strmv_sm32x32_kernel(
int n, int k, float *tau,
float *Tin, int ldtin, float *Tout, int ldtout )
{
slarft_strmv_sm32x32_device( n, k, tau, Tin, ldtin, Tout, ldtout);
}
//===================================================================================================
__global__ void
slarft_strmv_sm32x32_kernel_batched(
int n, int k, float **tau_array,
float **Tin_array, int ldtin, float **Tout_array, int ldtout )
{
int batchId = blockIdx.z;
slarft_strmv_sm32x32_device( n, k, tau_array[batchId], Tin_array[batchId], ldtin, Tout_array[batchId], ldtout);
}
//===================================================================================================
//===================================================================================================
extern "C"
void magmablas_slarft_strmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Tin, magma_int_t ldtin,
float *Tout, magma_int_t ldtout,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
slarft_strmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Tin, ldtin, Tout, ldtout);
}
//===================================================================================================
extern "C"
void magmablas_slarft_strmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Tin_array, magma_int_t ldtin,
float **Tout_array, magma_int_t ldtout,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*m);
slarft_strmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Tin_array, ldtin, Tout_array, ldtout);
}
//===================================================================================================
//===================================================================================================
//===================================================================================================
static __device__ void
slarft_recstrmv_sm32x32_device(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
int tx = threadIdx.x;
float *sdata = (float*)shared_data;
float res;
// to update a certain column i, threads go in horizontal fashion where
// every thread read one row and do it gemv(dot) to generate
// one element of the column of T then move to the next column
// read T into shared
for (int s=0; s < n; s++)
{
sdata[tx + s*n] = Trec[tx + s * ldtrec];
}
__syncthreads();
// perform sequence of n-1 gemv
for (int i=0; i < n; i++)
{
res = MAGMA_S_ZERO;
for (int j=0; j < i; j++)
{
res += sdata[tx + j * n] * Ttri[j+ i * ldttri];
}
__syncthreads(); // a enlever
sdata[tx + i * n] = -tau[i] * (sdata[tx + i * n] + res);
__syncthreads();
}
// write back the updated block of k column of T multiplying by -tau
for (int s=0; s < n; s++)
{
Trec[tx + s * ldtrec] = sdata[tx + s*n];
}
}
//===================================================================================================
__global__ void
slarft_recstrmv_sm32x32_kernel(
int m, int n, float *tau,
float *Trec, int ldtrec, float *Ttri, int ldttri)
{
slarft_recstrmv_sm32x32_device(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
//===================================================================================================
__global__ void
slarft_recstrmv_sm32x32_kernel_batched(
int m, int n, float **tau_array,
float **Trec_array, int ldtrec, float **Ttri_array, int ldttri)
{
int batchId = blockIdx.z;
slarft_recstrmv_sm32x32_device(m, n, tau_array[batchId], Trec_array[batchId], ldtrec, Ttri_array[batchId], ldttri);
}
//===================================================================================================
extern "C"
void magmablas_slarft_recstrmv_sm32x32(
magma_int_t m, magma_int_t n,
float *tau,
float *Trec, magma_int_t ldtrec,
float *Ttri, magma_int_t ldttri,
magma_queue_t queue )
{
dim3 grid(1);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
slarft_recstrmv_sm32x32_kernel
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau, Trec, ldtrec, Ttri, ldttri);
}
//===================================================================================================
extern "C"
void magmablas_slarft_recstrmv_sm32x32_batched(
magma_int_t m, magma_int_t n,
float **tau_array,
float **Trec_array, magma_int_t ldtrec,
float **Ttri_array, magma_int_t ldttri,
magma_int_t batchCount, magma_queue_t queue)
{
dim3 grid(1, 1, batchCount);
dim3 threads(max(m,1), 1, 1);
size_t shmem = sizeof(float)*(m*n);
slarft_recstrmv_sm32x32_kernel_batched
<<< grid, threads, shmem, queue->cuda_stream() >>>
(m, n, tau_array, Trec_array, ldtrec, Ttri_array, ldttri);
}
//===================================================================================================
|
0a2502b4e2d9b8adad415485050aaa89bb64b986.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> host_in = thrust::host_vector<int>(idata, idata + n);
thrust::host_vector<int> host_out = thrust::host_vector<int>(odata, odata + n);
thrust::device_vector<int> dev_in = thrust::device_vector<int>(host_in);
thrust::device_vector<int> dev_out = thrust::device_vector<int>(host_out);
// TODO
timer().startGpuTimer();
thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin());
timer().endGpuTimer();
thrust::copy(dev_out.begin(), dev_out.end(), odata);
}
}
}
|
0a2502b4e2d9b8adad415485050aaa89bb64b986.cu
|
#include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
thrust::host_vector<int> host_in = thrust::host_vector<int>(idata, idata + n);
thrust::host_vector<int> host_out = thrust::host_vector<int>(odata, odata + n);
thrust::device_vector<int> dev_in = thrust::device_vector<int>(host_in);
thrust::device_vector<int> dev_out = thrust::device_vector<int>(host_out);
// TODO
timer().startGpuTimer();
thrust::exclusive_scan(dev_in.begin(), dev_in.end(), dev_out.begin());
timer().endGpuTimer();
thrust::copy(dev_out.begin(), dev_out.end(), odata);
}
}
}
|
bb3663db5b6a3c7c4a6780c8e953bafb49cc7a09.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
// customDllFunctions.cu
//////////////////////////
// Template to write .dlls
//////////////////////////
/* Include the following directories for the program to run appropriately:
///////////////////////
in the VC++ directories:
$(VC_IncludePath);
$(WindowsSDK_IncludePath);
C:\ProgramData\NVIDIA Corporation\CUDA Samples\v9.0\common\inc;
$(CUDA_INC_PATH)
C:\Program Files\National Instruments\LabVIEW 2015\cintools
////////////////////////
CUDA/C/C++ directories:
./
../../common/inc
$(CudaToolkitDir)/include
////////////////////////////////
Linker/General include libraries:
cudart.lib
//changed the target machine platform from 32 to 64 bit
*/
////////////////////////////////////////////////////////////////////////////////
// Complex operations,
////////////////////////////////////////////////////////////////////////////////
__device__ static __inline__ float cmagf(float x, float y)
{
float a, b, v, w, t;
a = fabsf(x);
b = fabsf(y);
if (a > b) {
v = a;
w = b;
}
else {
v = b;
w = a;
}
t = w / v;
t = 1.0f + t * t;
t = v * sqrtf(t);
if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
t = v + w;
}
return t;
}
__global__ void ConvertCmplx2Polar(float* inRe, float* inIm, float* mag, float* phase, int size) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
{
phase[i] = atan2f(inIm[i], inRe[i]);
mag[i] = cmagf(inIm[i], inRe[i]);
}
}
|
bb3663db5b6a3c7c4a6780c8e953bafb49cc7a09.cu
|
#include "includes.h"
// customDllFunctions.cu
//////////////////////////
// Template to write .dlls
//////////////////////////
/* Include the following directories for the program to run appropriately:
///////////////////////
in the VC++ directories:
$(VC_IncludePath);
$(WindowsSDK_IncludePath);
C:\ProgramData\NVIDIA Corporation\CUDA Samples\v9.0\common\inc;
$(CUDA_INC_PATH)
C:\Program Files\National Instruments\LabVIEW 2015\cintools
////////////////////////
CUDA/C/C++ directories:
./
../../common/inc
$(CudaToolkitDir)/include
////////////////////////////////
Linker/General include libraries:
cudart.lib
//changed the target machine platform from 32 to 64 bit
*/
////////////////////////////////////////////////////////////////////////////////
// Complex operations,
////////////////////////////////////////////////////////////////////////////////
__device__ static __inline__ float cmagf(float x, float y)
{
float a, b, v, w, t;
a = fabsf(x);
b = fabsf(y);
if (a > b) {
v = a;
w = b;
}
else {
v = b;
w = a;
}
t = w / v;
t = 1.0f + t * t;
t = v * sqrtf(t);
if ((v == 0.0f) || (v > 3.402823466e38f) || (w > 3.402823466e38f)) {
t = v + w;
}
return t;
}
__global__ void ConvertCmplx2Polar(float* inRe, float* inIm, float* mag, float* phase, int size) {
const int numThreads = blockDim.x * gridDim.x;
const int threadID = blockIdx.x * blockDim.x + threadIdx.x;
for (int i = threadID; i < size; i += numThreads)
{
phase[i] = atan2f(inIm[i], inRe[i]);
mag[i] = cmagf(inIm[i], inRe[i]);
}
}
|
4441982a2df91be9abcefe2559a48461919d4385.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
// CUDA kernel ------------------------------------------------------------------------------------------
__global__ void TaskQueue_gpu(task_t *queues, int *n_task_in_queue, int *n_written_tasks, int *n_consumed_tasks,
int *data, int gpuQueueSize, int iterations) {
extern __shared__ int l_mem[];
int* last_queue = l_mem;
task_t* t = (task_t*)&last_queue[1];
const int tid = threadIdx.x;
const int tile_size = blockDim.x;
while(true) {
// Fetch task
if(tid == 0) {
int idx_queue = *last_queue;
int j, jj;
bool not_done = true;
do {
if(atomicAdd_system(n_consumed_tasks + idx_queue, 0) == atomicAdd_system(n_written_tasks + idx_queue, 0)) {
idx_queue = (idx_queue + 1) % NUM_TASK_QUEUES;
} else {
if(atomicAdd_system(n_task_in_queue + idx_queue, 0) > 0) {
j = atomicAdd_system(n_task_in_queue + idx_queue, -1) - 1;
if(j >= 0) {
t->id = (queues + idx_queue * gpuQueueSize + j)->id;
t->op = (queues + idx_queue * gpuQueueSize + j)->op;
jj = atomicAdd_system(n_consumed_tasks + idx_queue, 1) + 1;
not_done = false;
if(jj == atomicAdd_system(n_written_tasks + idx_queue, 0)) {
idx_queue = (idx_queue + 1) % NUM_TASK_QUEUES;
}
*last_queue = idx_queue;
} else {
idx_queue = (idx_queue + 1) % NUM_TASK_QUEUES;
}
} else {
idx_queue = (idx_queue + 1) % NUM_TASK_QUEUES;
}
}
} while(not_done);
}
__syncthreads();
// Compute task
if(t->op == SIGNAL_STOP_KERNEL) {
break;
} else {
if(t->op == SIGNAL_WORK_KERNEL) {
for(int i = 0; i < iterations; i++) {
data[t->id * tile_size + tid] += tile_size;
}
data[t->id * tile_size + tid] += t->id;
}
if(t->op == SIGNAL_NOTWORK_KERNEL) {
for(int i = 0; i < 1; i++) {
data[t->id * tile_size + tid] += tile_size;
}
data[t->id * tile_size + tid] += t->id;
}
}
}
}
hipError_t call_TaskQueue_gpu(int blocks, int threads, task_t *queues, int *n_task_in_queue,
int *n_written_tasks, int *n_consumed_tasks, int *data, int gpuQueueSize, int iterations,
int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
hipLaunchKernelGGL(( TaskQueue_gpu), dim3(dimGrid), dim3(dimBlock), l_mem_size, 0, queues, n_task_in_queue, n_written_tasks,
n_consumed_tasks, data, gpuQueueSize, iterations);
hipError_t err = hipGetLastError();
return err;
}
|
4441982a2df91be9abcefe2559a48461919d4385.cu
|
/*
* Copyright (c) 2016 University of Cordoba and University of Illinois
* All rights reserved.
*
* Developed by: IMPACT Research Group
* University of Cordoba and University of Illinois
* http://impact.crhc.illinois.edu/
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* with the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* > Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimers.
* > Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimers in the
* documentation and/or other materials provided with the distribution.
* > Neither the names of IMPACT Research Group, University of Cordoba,
* University of Illinois nor the names of its contributors may be used
* to endorse or promote products derived from this Software without
* specific prior written permission.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS WITH
* THE SOFTWARE.
*
*/
#define _CUDA_COMPILER_
#include "support/common.h"
// CUDA kernel ------------------------------------------------------------------------------------------
__global__ void TaskQueue_gpu(task_t *queues, int *n_task_in_queue, int *n_written_tasks, int *n_consumed_tasks,
int *data, int gpuQueueSize, int iterations) {
extern __shared__ int l_mem[];
int* last_queue = l_mem;
task_t* t = (task_t*)&last_queue[1];
const int tid = threadIdx.x;
const int tile_size = blockDim.x;
while(true) {
// Fetch task
if(tid == 0) {
int idx_queue = *last_queue;
int j, jj;
bool not_done = true;
do {
if(atomicAdd_system(n_consumed_tasks + idx_queue, 0) == atomicAdd_system(n_written_tasks + idx_queue, 0)) {
idx_queue = (idx_queue + 1) % NUM_TASK_QUEUES;
} else {
if(atomicAdd_system(n_task_in_queue + idx_queue, 0) > 0) {
j = atomicAdd_system(n_task_in_queue + idx_queue, -1) - 1;
if(j >= 0) {
t->id = (queues + idx_queue * gpuQueueSize + j)->id;
t->op = (queues + idx_queue * gpuQueueSize + j)->op;
jj = atomicAdd_system(n_consumed_tasks + idx_queue, 1) + 1;
not_done = false;
if(jj == atomicAdd_system(n_written_tasks + idx_queue, 0)) {
idx_queue = (idx_queue + 1) % NUM_TASK_QUEUES;
}
*last_queue = idx_queue;
} else {
idx_queue = (idx_queue + 1) % NUM_TASK_QUEUES;
}
} else {
idx_queue = (idx_queue + 1) % NUM_TASK_QUEUES;
}
}
} while(not_done);
}
__syncthreads();
// Compute task
if(t->op == SIGNAL_STOP_KERNEL) {
break;
} else {
if(t->op == SIGNAL_WORK_KERNEL) {
for(int i = 0; i < iterations; i++) {
data[t->id * tile_size + tid] += tile_size;
}
data[t->id * tile_size + tid] += t->id;
}
if(t->op == SIGNAL_NOTWORK_KERNEL) {
for(int i = 0; i < 1; i++) {
data[t->id * tile_size + tid] += tile_size;
}
data[t->id * tile_size + tid] += t->id;
}
}
}
}
cudaError_t call_TaskQueue_gpu(int blocks, int threads, task_t *queues, int *n_task_in_queue,
int *n_written_tasks, int *n_consumed_tasks, int *data, int gpuQueueSize, int iterations,
int l_mem_size){
dim3 dimGrid(blocks);
dim3 dimBlock(threads);
TaskQueue_gpu<<<dimGrid, dimBlock, l_mem_size>>>(queues, n_task_in_queue, n_written_tasks,
n_consumed_tasks, data, gpuQueueSize, iterations);
cudaError_t err = cudaGetLastError();
return err;
}
|
07eefc19364f8de49c33357423f9edeff78bfb40.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void cu_dsigmoid_a(double* src, double* dst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
float tmp = __fsub_rd(1.0, src[tid]);
dst[tid] = __fmul_rd(tmp, src[tid]);
tid += stride;
}
}
|
07eefc19364f8de49c33357423f9edeff78bfb40.cu
|
#include "includes.h"
__global__ void cu_dsigmoid_a(double* src, double* dst, int n){
int tid = threadIdx.x + blockIdx.x * blockDim.x;
int stride = blockDim.x * gridDim.x;
while(tid < n){
float tmp = __fsub_rd(1.0, src[tid]);
dst[tid] = __fmul_rd(tmp, src[tid]);
tid += stride;
}
}
|
07ba9ccd048f56eb67e2c96fd4c4f751a86283f5.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef STRING_BITONIC_PROC_H
#define STRING_BITONIC_PROC_H
#include "string_bitonicProc_kernel.cu"
//#define NUM_BLOCK_PER_CHUNK_BITONIC_SORT 512//b256
/*
@totalLenInBytes, is not used.
*/
void string_bitonicSortMultipleBlocks(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int numBlock, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=NUM_BLOCK_PER_CHUNK_BITONIC_SORT;
int numBlock_y=1;
int numChunk=numBlock/numBlock_x;
if(numBlock%numBlock_x!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*numBlock_x;
end=start+numBlock_x;
if(end>numBlock)
end=numBlock;
//printf("bitonicSortMultipleBlocks_kernel: %d, range, %d, %d\n", i, start, end);
hipLaunchKernelGGL(( string_bitonicSortMultipleBlocks_kernel), dim3(grid),dim3(thread), 0, 0, d_rawData, totalLenInBytes, d_values, d_bound, start, end-start, d_output);
hipDeviceSynchronize();
}
// hipDeviceSynchronize();
}
void initialize(cmp_type_t *d_data, int rLen, cmp_type_t value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( initialize_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, value);
}
hipDeviceSynchronize();
}
void int4toint2(int4 *d_data, int rLen, Record* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( int4toint2_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void getIntYArray(Record *d_data, int rLen, int* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getIntYArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void getXYArray(cmp_type_t *d_data, int rLen, Record* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getXYArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void getZWArray(cmp_type_t *d_data, int rLen, Record* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( getZWArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_output);
}
hipDeviceSynchronize();
}
void setXYArray(cmp_type_t *d_data, int rLen, Record* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( setXYArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_value);
}
hipDeviceSynchronize();
}
void setZWArray(cmp_type_t *d_data, int rLen, Record* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
hipLaunchKernelGGL(( setZWArray_kernel), dim3(grid),dim3(thread), 0, 0, d_data, start, rLen, d_value);
}
hipDeviceSynchronize();
}
#endif
|
07ba9ccd048f56eb67e2c96fd4c4f751a86283f5.cu
|
#ifndef STRING_BITONIC_PROC_H
#define STRING_BITONIC_PROC_H
#include "string_bitonicProc_kernel.cu"
//#define NUM_BLOCK_PER_CHUNK_BITONIC_SORT 512//b256
/*
@totalLenInBytes, is not used.
*/
void string_bitonicSortMultipleBlocks(void* d_rawData, int totalLenInBytes, cmp_type_t * d_values, int* d_bound, int numBlock, cmp_type_t * d_output)
{
int numThreadsPerBlock_x=SHARED_MEM_INT2;
int numThreadsPerBlock_y=1;
int numBlock_x=NUM_BLOCK_PER_CHUNK_BITONIC_SORT;
int numBlock_y=1;
int numChunk=numBlock/numBlock_x;
if(numBlock%numBlock_x!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*numBlock_x;
end=start+numBlock_x;
if(end>numBlock)
end=numBlock;
//printf("bitonicSortMultipleBlocks_kernel: %d, range, %d, %d\n", i, start, end);
string_bitonicSortMultipleBlocks_kernel<<<grid,thread>>>(d_rawData, totalLenInBytes, d_values, d_bound, start, end-start, d_output);
cudaThreadSynchronize();
}
// cudaThreadSynchronize();
}
void initialize(cmp_type_t *d_data, int rLen, cmp_type_t value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
initialize_kernel<<<grid,thread>>>(d_data, start, rLen, value);
}
cudaThreadSynchronize();
}
void int4toint2(int4 *d_data, int rLen, Record* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
int4toint2_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void getIntYArray(Record *d_data, int rLen, int* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getIntYArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void getXYArray(cmp_type_t *d_data, int rLen, Record* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getXYArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void getZWArray(cmp_type_t *d_data, int rLen, Record* d_output)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
getZWArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_output);
}
cudaThreadSynchronize();
}
void setXYArray(cmp_type_t *d_data, int rLen, Record* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
setXYArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_value);
}
cudaThreadSynchronize();
}
void setZWArray(cmp_type_t *d_data, int rLen, Record* d_value)
{
int numThreadsPerBlock_x=512;
int numThreadsPerBlock_y=1;
int numBlock_x=512;
int numBlock_y=1;
int chunkSize=numBlock_x*numThreadsPerBlock_x;
int numChunk=rLen/chunkSize;
if(rLen%chunkSize!=0)
numChunk++;
dim3 thread( numThreadsPerBlock_x, numThreadsPerBlock_y, 1);
dim3 grid( numBlock_x, numBlock_y , 1);
int i=0;
int start=0;
int end=0;
for(i=0;i<numChunk;i++)
{
start=i*chunkSize;
end=start+chunkSize;
if(end>rLen)
end=rLen;
setZWArray_kernel<<<grid,thread>>>(d_data, start, rLen, d_value);
}
cudaThreadSynchronize();
}
#endif
|
369dc8023256dfab10fdfce71947063bbca5a015.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
//we obviously using CUDA :-D
#define CUDA
//#define DebugValues // if debug values print values!
#include <hip/hip_runtime.h>
#include "md5_kernel.h"
#include "md5.h"
#include <stdio.h>
#include "cuPrintf.hip"
//__constant__
//__device__ __constant__ unsigned int charset_c[256];// = {0xb182b498, 0xf4d2ac41, 0x1f636569, 0xaf4caf00};
//__device__ __constant__ unsigned int target_hash[4];// = {0xb182b498, 0xf4d2ac41, 0x1f636569, 0xaf4caf00};
__shared__ unsigned int charset_c[256];
__shared__ unsigned int target_hash[4];
void CCU2()
{
hipError_t result = hipGetLastError();
if(result != hipSuccess)
{
char msg[512];msg[0]=0;
strcpy_s(msg, 512, "CUDA kernel error: ");
strcat_s(msg, 512, hipGetErrorString(result));
printf(msg);
return;
}
}
extern "C" void cuda_get_mem(gpu_data_xyz* data)
{
//get memory for data transfers
// Alocate only once
//hipMalloc( (void**)&charset_c, 256*sizeof(unsigned int));
//hipMalloc( (void**)&target_hash, 4*sizeof(unsigned int));
//CCU2();
// hipMalloc ((void **) &target_hash , sizeof(int)*4);
//CCU2();
hipMalloc ((void **) &data->data_d , sizeof(int)*4*data->thread_n*data->grid_n);
CCU2();
hipHostMalloc((void **) &data->data_h, sizeof(int)*4*data->thread_n*data->grid_n);
CCU2();
hipMalloc ((void **) &data->result_d , sizeof(int)*data->thread_n*data->grid_n);
CCU2();
hipHostMalloc((void **) &data->result, sizeof(int)*data->thread_n*data->grid_n);
CCU2();
}
__global__ void initShared(int* hash_i, unsigned char *charset, int charset_len)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// load to shared memory array
// assumes Mystruct has correct copy assignment semantics
//d_s[threadIdx.x] = theStructArray[tid]
for(int i=0;i<charset_len;i++)
charset_c[threadIdx.x+i] = charset[tid+i];
for(int i=0;i<4;i++)
target_hash[threadIdx.x+i] = hash_i[tid+i];
__syncthreads();
// Each thread has now loaded one value to the block scoped shared value
}
extern "C" void init_md5_cuda(gpu_data_xyz* data, int* hash_i, unsigned char *charset, int charset_len)
{
hipLaunchKernelGGL(( initShared), dim3(data->grid_n), dim3(1), 0, 0, hash_i, charset, charset_len);
//hipMemcpy( cuda_x, &x, sizeof( float ), hipMemcpyHostToDevice );
//int charset_int[256];
//memset(charset_int, 0, sizeof(charset_int));
//for(int i=0;i<charset_len;i++)
// charset_int[i] = charset[i];
/*
for(int i=0;i<4;i++)
charset_int[100+i] = hash_i[i];*/
memset(data->result, 0, sizeof(int)*data->thread_n*data->grid_n);
hipMemcpy(data->result_d, data->result, sizeof(int)*data->thread_n*data->grid_n, hipMemcpyHostToDevice);
CCU2();
//hipMemcpy( charset_c, &charset_int, sizeof(unsigned int)*charset_len, hipMemcpyHostToDevice );
//hipMemcpyToSymbol(charset_c, &charset_int, sizeof(unsigned int)*charset_len);
//hipMemcpyToSymbol("charset_c", charset_int, sizeof(int)*charset_len);//
//CCU2();
//hipMemcpyToSymbol(target_hash, &hash_i, sizeof(int)*4);
//hipMemcpy(target_hash, hash_i, sizeof(unsigned int)*4, hipMemcpyHostToDevice);
//CCU2();
// hipMemcpyToSymbol(md5_const, md5_const_host, sizeof(md5_const));
}
extern "C" void cuda_free_mem(gpu_data_xyz* data)
{
//hipFree(target_hash);
hipFree(data->data_d);
hipFree(data->result_d);
hipHostFree(data->data_h);
hipHostFree(data->result);
}
__global__ void md5_gpu_bruteforce_thread(unsigned int *data_d, unsigned int *result_d, unsigned int pwd_len, int charset_len)
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
//unsigned int data[4];
unsigned int data0, data1, data2, data3;
int a,b,c,d;
const int len = pwd_len*8;//code_len[0];//
int ta,tb,tc,td;
data0=data_d[ix*4+0];
data1=data_d[ix*4+1];
data2=data_d[ix*4+2];
data3=data_d[ix*4+3];
//for(int i=0;i<4;i++)data[i]=data_d[ix*4+i];//
// if(s3_limit==26)data[0]=(data[0]&0xff00ffff)+(('a'+s3)<<16);//only if 3 symbols to brute force
int res = KEY_NOT_FOUND;
ta = target_hash[0];
tb = target_hash[1];
tc = target_hash[2];
td = target_hash[3];
// Unroll known steps:
unrollII (tc, td, ta, tb, data2, S43, 0x2ad7d2bb); /* 63 */
unrollII (td, ta, tb, tc, 0, S42, 0xbd3af235); /* 62 */
unrollII (ta, tb, tc, td, 0, S41, 0xf7537e82); /* 61 */
unrollII (tb, tc, td, ta, 0, S44, 0x4e0811a1); /* 60 */
unrollII (tc, td, ta, tb, 0, S43, 0xa3014314); /* 59 */
unrollII (td, ta, tb, tc, 0, S42, 0xfe2ce6e0); /* 58 */
unrollII (ta, tb, tc, td, 0, S41, 0x6fa87e4f); /* 57 */
unrollII (tb, tc, td, ta, data1, S44, 0x85845dd1); /* 56 */
unrollII (tc, td, ta, tb, 0, S43, 0xffeff47d); /* 55 */
unrollII (td, ta, tb, tc, data3, S42, 0x8f0ccc92); /* 54 */
unrollII (ta, tb, tc, td, 0, S41, 0x655b59c3); /* 53 */
unrollII (tb, tc, td, ta, 0, S44, 0xfc93a039); /* 52 */
unrollII (tc, td, ta, tb, len, S43, 0xab9423a7); /* 51 */
unrollII (td, ta, tb, tc, 0, S42, 0x432aff97); /* 50 */
for(int s2=0;s2<charset_len;s2++)
{
data0=(data0&0xffff00ff)+((charset_c[s2])<<8);
for(int s1=0;s1<charset_len;s1++)
{
data0=(data0&0xffffff00)+(charset_c[s1]);
a=0x67452301;b=0xefcdab89;c=0x98badcfe;d=0x10325476;
/* Round 1 */
FF2_first (a, b, c, d, data0, S11, 0xd76aa478,0,0); /* 1 */
FF2 (d, a, b, c, data1, S12, 0xe8c7b756,1,1); /* 2 */
FF2 (c, d, a, b, data2, S13, 0x242070db,2,2); /* 3 */
FF2 (b, c, d, a, data3, S14, 0xc1bdceee,3,3); /* 4 */
FF2_NoData (a, b, c, d, 0, S11, 0xf57c0faf,4,15); /* 5 */ //5th and so forth is 0
FF2_NoData (d, a, b, c, 0, S12, 0x4787c62a,5,15); /* 6 */
FF2_NoData (c, d, a, b, 0, S13, 0xa8304613,6,15); /* 7 */
FF2_NoData (b, c, d, a, 0, S14, 0xfd469501,7,15); /* 8 */
FF2_NoData (a, b, c, d, 0, S11, 0x698098d8,8,15); /* 9 */
FF2_NoData (d, a, b, c, 0, S12, 0x8b44f7af,9,15); /* 10 */
FF2_NoData (c, d, a, b, 0, S13, 0xffff5bb1,10,15); /* 11 */
FF2_NoData (b, c, d, a, 0, S14, 0x895cd7be,11,15); /* 12 */
FF2_NoData (a, b, c, d, 0, S11, 0x6b901122,12,15); /* 13 */
FF2_NoData (d, a, b, c, 0, S12, 0xfd987193,13,15); /* 14 */
FF2 (c, d, a, b, len, S13, 0xa679438e,14,14); /* 15 */
FF2_NoData (b, c, d, a, 0, S14, 0x49b40821,15,15); /* 16 */
/* Round 2 */
GG2 (a, b, c, d, data1, S21, 0xf61e2562,16,1); /* 17 */
GG2_NoData (d, a, b, c, 0, S22, 0xc040b340,17,15); /* 18 */
GG2_NoData (c, d, a, b, 0, S23, 0x265e5a51,18,15); /* 19 */
GG2 (b, c, d, a, data0, S24, 0xe9b6c7aa,19,0); /* 20 */
GG2_NoData (a, b, c, d, 0, S21, 0xd62f105d,20,15); /* 21 */
GG2_NoData (d, a, b, c, 0, S22, 0x2441453,21,15); /* 22 */
GG2_NoData (c, d, a, b, 0, S23, 0xd8a1e681,22,15); /* 23 */
GG2_NoData (b, c, d, a, 0, S24, 0xe7d3fbc8,23,15); /* 24 */
GG2_NoData (a, b, c, d, 0, S21, 0x21e1cde6,24,15); /* 25 */
GG2 (d, a, b, c, len, S22, 0xc33707d6,25,14); /* 26 */
GG2 (c, d, a, b, data3, S23, 0xf4d50d87,26,3); /* 27 */
GG2_NoData (b, c, d, a, 0, S24, 0x455a14ed,27,15); /* 28 */
GG2_NoData (a, b, c, d, 0, S21, 0xa9e3e905,28,15); /* 29 */
GG2 (d, a, b, c, data2, S22, 0xfcefa3f8,29,2); /* 30 */
GG2_NoData (c, d, a, b, 0, S23, 0x676f02d9,30,15); /* 31 */
GG2_NoData (b, c, d, a, 0, S24, 0x8d2a4c8a,31,15); /* 32 */
/* Round 3 */
HH2_NoData (a, b, c, d, 0, S31, 0xfffa3942,32,15); /* 33 */
HH2_NoData (d, a, b, c, 0, S32, 0x8771f681,33,15); /* 34 */
HH2_NoData (c, d, a, b, 0, S33, 0x6d9d6122,34,15); /* 35 */
HH2 (b, c, d, a, len, S34, 0xfde5380c,35,14); /* 36 */
HH2 (a, b, c, d, data1, S31, 0xa4beea44,36,1); /* 37 */
HH2_NoData (d, a, b, c, 0, S32, 0x4bdecfa9,37,15); /* 38 */
HH2_NoData (c, d, a, b, 0, S33, 0xf6bb4b60,38,15); /* 39 */
HH2_NoData (b, c, d, a, 0, S34, 0xbebfbc70,39,15); /* 40 */
HH2_NoData (a, b, c, d, 0, S31, 0x289b7ec6,40,15); /* 41 */
HH2 (d, a, b, c, data0, S32, 0xeaa127fa,41,0); /* 42 */
HH2 (c, d, a, b, data3, S33, 0xd4ef3085,42,3); /* 43 */
HH2_NoData (b, c, d, a, 0, S34, 0x4881d05,43,15); /* 44 */
HH2_NoData (a, b, c, d, 0, S31, 0xd9d4d039,44,15); /* 45 */
HH2_NoData (d, a, b, c, 0, S32, 0xe6db99e5,45,15); /* 46 */
if(d!=td)continue;
HH2_NoData (c, d, a, b, 0, S33, 0x1fa27cf8,46,15); /* 47 */
HH2 (b, c, d, a, data2, S34, 0xc4ac5665,47,2); /* 48 */
/* Round 4 */
II2 (a, b, c, d, data0, S41, 0xf4292244,48,0); /* 49 */
if(a==ta && b==tb && c==tc)
{
res = KEY_FOUND;
#ifdef DebugValues // finnaly print values
//cuPrintf("a = %X, b = %X, c = %X, d = %X\n", a, b, c, d);
//cuPrintf("ta = %X, tb = %X, tc = %X, td = %X\n", ta, tb, tc, td);
//cuPrintf("th0 = %X, th1 = %X, th2 = %X, th3 = %X\n", target_hash[0], target_hash[1], target_hash[2], target_hash[3]);
// For value unrolling:
cuPrintf("ta = %X, tb = %X, tc = %X, td = %X\n", ta_old, tb_old, tc_old, td_old);
#endif
};
}
}
result_d[ix] = res;
}
// The host CPU Mandebrot thread spawner
// 0: OK
// 1: Error while calculation
extern "C" int do_123(gpu_data_xyz* data, int pwd_len, int charset_len)
{
dim3 threads(data->thread_n);
dim3 grid(data->grid_n);
//copy combinations to device
/* hipEvent_t stop;
CUDA_SAFE_CALL( hipEventCreate(&stop) );
CCU2();*/
// hipMemcpyAsync(data_d, data->data_h, sizeof(int)*4*data->thread_n*data->grid_n, hipMemcpyHostToDevice,0);
hipMemcpy(data->data_d, data->data_h, sizeof(int)*4*data->thread_n*data->grid_n, hipMemcpyHostToDevice);
CCU2();
#ifdef DebugValues
cudaPrintfInit(); // init print - first step of cuPrintf
#endif
hipLaunchKernelGGL(( md5_gpu_bruteforce_thread), dim3(grid), dim3(threads), 0, 0, data->data_d, data->result_d, pwd_len, charset_len);
CCU2();
// hipMemcpyAsync(data->result, result_d, sizeof(int)*data->thread_n*data->grid_n, hipMemcpyDeviceToHost, NULL);
hipMemcpy(data->result, data->result_d, sizeof(int)*data->thread_n*data->grid_n, hipMemcpyDeviceToHost);
CCU2();
#ifdef DebugValues
cudaPrintfDisplay(stdout, true); // part two of cuPrintf
cudaPrintfEnd();
#endif
hipError_t result = hipGetLastError();
if(result != hipSuccess)
{
char msg[512];msg[0]=0;
strcpy_s(msg, 512, "CUDA kernel error: ");
strcat_s(msg, 512, hipGetErrorString(result));
printf(msg);
return 1;
}
/* hipEventRecord(stop, 0);
while( hipEventQuery(stop) == hipErrorNotReady )
{
Sleep(3);
}
CUDA_SAFE_CALL( hipEventDestroy(stop));*/
return 0;
}
|
369dc8023256dfab10fdfce71947063bbca5a015.cu
|
#include <stdio.h>
//we obviously using CUDA :-D
#define CUDA
//#define DebugValues // if debug values print values!
#include <cuda.h>
#include "md5_kernel.h"
#include "md5.h"
#include <stdio.h>
#include "cuPrintf.cu"
//__constant__
//__device__ __constant__ unsigned int charset_c[256];// = {0xb182b498, 0xf4d2ac41, 0x1f636569, 0xaf4caf00};
//__device__ __constant__ unsigned int target_hash[4];// = {0xb182b498, 0xf4d2ac41, 0x1f636569, 0xaf4caf00};
__shared__ unsigned int charset_c[256];
__shared__ unsigned int target_hash[4];
void CCU2()
{
cudaError_t result = cudaGetLastError();
if(result != cudaSuccess)
{
char msg[512];msg[0]=0;
strcpy_s(msg, 512, "CUDA kernel error: ");
strcat_s(msg, 512, cudaGetErrorString(result));
printf(msg);
return;
}
}
extern "C" void cuda_get_mem(gpu_data_xyz* data)
{
//get memory for data transfers
// Alocate only once
//cudaMalloc( (void**)&charset_c, 256*sizeof(unsigned int));
//cudaMalloc( (void**)&target_hash, 4*sizeof(unsigned int));
//CCU2();
// cudaMalloc ((void **) &target_hash , sizeof(int)*4);
//CCU2();
cudaMalloc ((void **) &data->data_d , sizeof(int)*4*data->thread_n*data->grid_n);
CCU2();
cudaMallocHost((void **) &data->data_h, sizeof(int)*4*data->thread_n*data->grid_n);
CCU2();
cudaMalloc ((void **) &data->result_d , sizeof(int)*data->thread_n*data->grid_n);
CCU2();
cudaMallocHost((void **) &data->result, sizeof(int)*data->thread_n*data->grid_n);
CCU2();
}
__global__ void initShared(int* hash_i, unsigned char *charset, int charset_len)
{
int tid = blockDim.x * blockIdx.x + threadIdx.x;
// load to shared memory array
// assumes Mystruct has correct copy assignment semantics
//d_s[threadIdx.x] = theStructArray[tid]
for(int i=0;i<charset_len;i++)
charset_c[threadIdx.x+i] = charset[tid+i];
for(int i=0;i<4;i++)
target_hash[threadIdx.x+i] = hash_i[tid+i];
__syncthreads();
// Each thread has now loaded one value to the block scoped shared value
}
extern "C" void init_md5_cuda(gpu_data_xyz* data, int* hash_i, unsigned char *charset, int charset_len)
{
initShared<<<data->grid_n, 1>>>(hash_i, charset, charset_len);
//cudaMemcpy( cuda_x, &x, sizeof( float ), cudaMemcpyHostToDevice );
//int charset_int[256];
//memset(charset_int, 0, sizeof(charset_int));
//for(int i=0;i<charset_len;i++)
// charset_int[i] = charset[i];
/*
for(int i=0;i<4;i++)
charset_int[100+i] = hash_i[i];*/
memset(data->result, 0, sizeof(int)*data->thread_n*data->grid_n);
cudaMemcpy(data->result_d, data->result, sizeof(int)*data->thread_n*data->grid_n, cudaMemcpyHostToDevice);
CCU2();
//cudaMemcpy( charset_c, &charset_int, sizeof(unsigned int)*charset_len, cudaMemcpyHostToDevice );
//cudaMemcpyToSymbol(charset_c, &charset_int, sizeof(unsigned int)*charset_len);
//cudaMemcpyToSymbol("charset_c", charset_int, sizeof(int)*charset_len);//
//CCU2();
//cudaMemcpyToSymbol(target_hash, &hash_i, sizeof(int)*4);
//cudaMemcpy(target_hash, hash_i, sizeof(unsigned int)*4, cudaMemcpyHostToDevice);
//CCU2();
// cudaMemcpyToSymbol(md5_const, md5_const_host, sizeof(md5_const));
}
extern "C" void cuda_free_mem(gpu_data_xyz* data)
{
//cudaFree(target_hash);
cudaFree(data->data_d);
cudaFree(data->result_d);
cudaFreeHost(data->data_h);
cudaFreeHost(data->result);
}
__global__ void md5_gpu_bruteforce_thread(unsigned int *data_d, unsigned int *result_d, unsigned int pwd_len, int charset_len)
{
const int ix = blockDim.x * blockIdx.x + threadIdx.x;
//unsigned int data[4];
unsigned int data0, data1, data2, data3;
int a,b,c,d;
const int len = pwd_len*8;//code_len[0];//
int ta,tb,tc,td;
data0=data_d[ix*4+0];
data1=data_d[ix*4+1];
data2=data_d[ix*4+2];
data3=data_d[ix*4+3];
//for(int i=0;i<4;i++)data[i]=data_d[ix*4+i];//
// if(s3_limit==26)data[0]=(data[0]&0xff00ffff)+(('a'+s3)<<16);//only if 3 symbols to brute force
int res = KEY_NOT_FOUND;
ta = target_hash[0];
tb = target_hash[1];
tc = target_hash[2];
td = target_hash[3];
// Unroll known steps:
unrollII (tc, td, ta, tb, data2, S43, 0x2ad7d2bb); /* 63 */
unrollII (td, ta, tb, tc, 0, S42, 0xbd3af235); /* 62 */
unrollII (ta, tb, tc, td, 0, S41, 0xf7537e82); /* 61 */
unrollII (tb, tc, td, ta, 0, S44, 0x4e0811a1); /* 60 */
unrollII (tc, td, ta, tb, 0, S43, 0xa3014314); /* 59 */
unrollII (td, ta, tb, tc, 0, S42, 0xfe2ce6e0); /* 58 */
unrollII (ta, tb, tc, td, 0, S41, 0x6fa87e4f); /* 57 */
unrollII (tb, tc, td, ta, data1, S44, 0x85845dd1); /* 56 */
unrollII (tc, td, ta, tb, 0, S43, 0xffeff47d); /* 55 */
unrollII (td, ta, tb, tc, data3, S42, 0x8f0ccc92); /* 54 */
unrollII (ta, tb, tc, td, 0, S41, 0x655b59c3); /* 53 */
unrollII (tb, tc, td, ta, 0, S44, 0xfc93a039); /* 52 */
unrollII (tc, td, ta, tb, len, S43, 0xab9423a7); /* 51 */
unrollII (td, ta, tb, tc, 0, S42, 0x432aff97); /* 50 */
for(int s2=0;s2<charset_len;s2++)
{
data0=(data0&0xffff00ff)+((charset_c[s2])<<8);
for(int s1=0;s1<charset_len;s1++)
{
data0=(data0&0xffffff00)+(charset_c[s1]);
a=0x67452301;b=0xefcdab89;c=0x98badcfe;d=0x10325476;
/* Round 1 */
FF2_first (a, b, c, d, data0, S11, 0xd76aa478,0,0); /* 1 */
FF2 (d, a, b, c, data1, S12, 0xe8c7b756,1,1); /* 2 */
FF2 (c, d, a, b, data2, S13, 0x242070db,2,2); /* 3 */
FF2 (b, c, d, a, data3, S14, 0xc1bdceee,3,3); /* 4 */
FF2_NoData (a, b, c, d, 0, S11, 0xf57c0faf,4,15); /* 5 */ //5th and so forth is 0
FF2_NoData (d, a, b, c, 0, S12, 0x4787c62a,5,15); /* 6 */
FF2_NoData (c, d, a, b, 0, S13, 0xa8304613,6,15); /* 7 */
FF2_NoData (b, c, d, a, 0, S14, 0xfd469501,7,15); /* 8 */
FF2_NoData (a, b, c, d, 0, S11, 0x698098d8,8,15); /* 9 */
FF2_NoData (d, a, b, c, 0, S12, 0x8b44f7af,9,15); /* 10 */
FF2_NoData (c, d, a, b, 0, S13, 0xffff5bb1,10,15); /* 11 */
FF2_NoData (b, c, d, a, 0, S14, 0x895cd7be,11,15); /* 12 */
FF2_NoData (a, b, c, d, 0, S11, 0x6b901122,12,15); /* 13 */
FF2_NoData (d, a, b, c, 0, S12, 0xfd987193,13,15); /* 14 */
FF2 (c, d, a, b, len, S13, 0xa679438e,14,14); /* 15 */
FF2_NoData (b, c, d, a, 0, S14, 0x49b40821,15,15); /* 16 */
/* Round 2 */
GG2 (a, b, c, d, data1, S21, 0xf61e2562,16,1); /* 17 */
GG2_NoData (d, a, b, c, 0, S22, 0xc040b340,17,15); /* 18 */
GG2_NoData (c, d, a, b, 0, S23, 0x265e5a51,18,15); /* 19 */
GG2 (b, c, d, a, data0, S24, 0xe9b6c7aa,19,0); /* 20 */
GG2_NoData (a, b, c, d, 0, S21, 0xd62f105d,20,15); /* 21 */
GG2_NoData (d, a, b, c, 0, S22, 0x2441453,21,15); /* 22 */
GG2_NoData (c, d, a, b, 0, S23, 0xd8a1e681,22,15); /* 23 */
GG2_NoData (b, c, d, a, 0, S24, 0xe7d3fbc8,23,15); /* 24 */
GG2_NoData (a, b, c, d, 0, S21, 0x21e1cde6,24,15); /* 25 */
GG2 (d, a, b, c, len, S22, 0xc33707d6,25,14); /* 26 */
GG2 (c, d, a, b, data3, S23, 0xf4d50d87,26,3); /* 27 */
GG2_NoData (b, c, d, a, 0, S24, 0x455a14ed,27,15); /* 28 */
GG2_NoData (a, b, c, d, 0, S21, 0xa9e3e905,28,15); /* 29 */
GG2 (d, a, b, c, data2, S22, 0xfcefa3f8,29,2); /* 30 */
GG2_NoData (c, d, a, b, 0, S23, 0x676f02d9,30,15); /* 31 */
GG2_NoData (b, c, d, a, 0, S24, 0x8d2a4c8a,31,15); /* 32 */
/* Round 3 */
HH2_NoData (a, b, c, d, 0, S31, 0xfffa3942,32,15); /* 33 */
HH2_NoData (d, a, b, c, 0, S32, 0x8771f681,33,15); /* 34 */
HH2_NoData (c, d, a, b, 0, S33, 0x6d9d6122,34,15); /* 35 */
HH2 (b, c, d, a, len, S34, 0xfde5380c,35,14); /* 36 */
HH2 (a, b, c, d, data1, S31, 0xa4beea44,36,1); /* 37 */
HH2_NoData (d, a, b, c, 0, S32, 0x4bdecfa9,37,15); /* 38 */
HH2_NoData (c, d, a, b, 0, S33, 0xf6bb4b60,38,15); /* 39 */
HH2_NoData (b, c, d, a, 0, S34, 0xbebfbc70,39,15); /* 40 */
HH2_NoData (a, b, c, d, 0, S31, 0x289b7ec6,40,15); /* 41 */
HH2 (d, a, b, c, data0, S32, 0xeaa127fa,41,0); /* 42 */
HH2 (c, d, a, b, data3, S33, 0xd4ef3085,42,3); /* 43 */
HH2_NoData (b, c, d, a, 0, S34, 0x4881d05,43,15); /* 44 */
HH2_NoData (a, b, c, d, 0, S31, 0xd9d4d039,44,15); /* 45 */
HH2_NoData (d, a, b, c, 0, S32, 0xe6db99e5,45,15); /* 46 */
if(d!=td)continue;
HH2_NoData (c, d, a, b, 0, S33, 0x1fa27cf8,46,15); /* 47 */
HH2 (b, c, d, a, data2, S34, 0xc4ac5665,47,2); /* 48 */
/* Round 4 */
II2 (a, b, c, d, data0, S41, 0xf4292244,48,0); /* 49 */
if(a==ta && b==tb && c==tc)
{
res = KEY_FOUND;
#ifdef DebugValues // finnaly print values
//cuPrintf("a = %X, b = %X, c = %X, d = %X\n", a, b, c, d);
//cuPrintf("ta = %X, tb = %X, tc = %X, td = %X\n", ta, tb, tc, td);
//cuPrintf("th0 = %X, th1 = %X, th2 = %X, th3 = %X\n", target_hash[0], target_hash[1], target_hash[2], target_hash[3]);
// For value unrolling:
cuPrintf("ta = %X, tb = %X, tc = %X, td = %X\n", ta_old, tb_old, tc_old, td_old);
#endif
};
}
}
result_d[ix] = res;
}
// The host CPU Mandebrot thread spawner
// 0: OK
// 1: Error while calculation
extern "C" int do_123(gpu_data_xyz* data, int pwd_len, int charset_len)
{
dim3 threads(data->thread_n);
dim3 grid(data->grid_n);
//copy combinations to device
/* cudaEvent_t stop;
CUDA_SAFE_CALL( cudaEventCreate(&stop) );
CCU2();*/
// cudaMemcpyAsync(data_d, data->data_h, sizeof(int)*4*data->thread_n*data->grid_n, cudaMemcpyHostToDevice,0);
cudaMemcpy(data->data_d, data->data_h, sizeof(int)*4*data->thread_n*data->grid_n, cudaMemcpyHostToDevice);
CCU2();
#ifdef DebugValues
cudaPrintfInit(); // init print - first step of cuPrintf
#endif
md5_gpu_bruteforce_thread<<<grid, threads>>>(data->data_d, data->result_d, pwd_len, charset_len);
CCU2();
// cudaMemcpyAsync(data->result, result_d, sizeof(int)*data->thread_n*data->grid_n, cudaMemcpyDeviceToHost, NULL);
cudaMemcpy(data->result, data->result_d, sizeof(int)*data->thread_n*data->grid_n, cudaMemcpyDeviceToHost);
CCU2();
#ifdef DebugValues
cudaPrintfDisplay(stdout, true); // part two of cuPrintf
cudaPrintfEnd();
#endif
cudaError_t result = cudaGetLastError();
if(result != cudaSuccess)
{
char msg[512];msg[0]=0;
strcpy_s(msg, 512, "CUDA kernel error: ");
strcat_s(msg, 512, cudaGetErrorString(result));
printf(msg);
return 1;
}
/* cudaEventRecord(stop, 0);
while( cudaEventQuery(stop) == cudaErrorNotReady )
{
Sleep(3);
}
CUDA_SAFE_CALL( cudaEventDestroy(stop));*/
return 0;
}
|
50224970c68de4423d1c84d127419ebee2875db0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Program to add two integers using the GPU instead of the CPU.
This program uses unified memory concept implemented from cuda version 6 and above.
Terrible example to start with as the CPU can execute the opertaion 100x faster than the GPU.
Benchmarking timings to compare speeds of execution.
*/
/*
Note that there is a considerable dependency of the ratio of execution times of the CPU and GPU on the
hardware which is being used to execute the run the program.
*/
// Importing the required headers
#include<stdio.h>
#include<cuda.h>
#include<time.h>
// Returns the duration from start to end times in sec
double time_elapsed(struct timespec *start, struct timespec *end)
{
double t;
t = (end->tv_sec - start->tv_sec); // diff in seconds
t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds
return t;
}
// GPU Kernel to add two numbers
__global__ void GPU_ADD(int *a, int *b)
{
a[0] += b[0]; //add the numbers and store the result in 'a'.
}
// CPU function to add two numbers
int CPU_ADD(int a, int b)
{
return a+b; //return result
}
// Code execution begins here
int main()
{
struct timespec start1, end1; //variables to store time for GPU
struct timespec start2, end2; //variables to store time for CPU
int *a1, a2;
int *b1, b2;
hipMallocManaged(&a1, sizeof(int));
hipMallocManaged(&b1, sizeof(int));
printf("Enter the value of a: "); //get value of a
scanf("%d", &a2);
printf("Enter the value of b: "); //get value of b
scanf("%d", &b2);
a1[0] = a2;
b1[0] = b2;
clock_gettime(CLOCK_REALTIME, &start1); //start timestamp
hipLaunchKernelGGL(( GPU_ADD), dim3(1),dim3(1), 0, 0, a1, b1);
hipDeviceSynchronize(); //wait for synchronization and to avoid bus error(core dumped)
int sum1 = a1[0];
clock_gettime(CLOCK_REALTIME, &end1); //end timestamp
clock_gettime(CLOCK_REALTIME, &start2); //start timestamp
int sum2 = CPU_ADD(a2, b2);
clock_gettime(CLOCK_REALTIME, &end2); //end timestamp
printf("\nThe sum of the two numbers using GPU is: %d\n", sum1);
printf("Time taken by GPU is: %E\n\n", time_elapsed(&start1, &end1)); //print result for GPU
printf("The sum of the two numbers using CPU is: %d\n", sum2);
printf("Time taken by CPU is: %E\n", time_elapsed(&start2, &end2)); //print result for CPU
hipFree(a1);
hipFree(b1);
return 0;
}
|
50224970c68de4423d1c84d127419ebee2875db0.cu
|
/*
Program to add two integers using the GPU instead of the CPU.
This program uses unified memory concept implemented from cuda version 6 and above.
Terrible example to start with as the CPU can execute the opertaion 100x faster than the GPU.
Benchmarking timings to compare speeds of execution.
*/
/*
Note that there is a considerable dependency of the ratio of execution times of the CPU and GPU on the
hardware which is being used to execute the run the program.
*/
// Importing the required headers
#include<stdio.h>
#include<cuda.h>
#include<time.h>
// Returns the duration from start to end times in sec
double time_elapsed(struct timespec *start, struct timespec *end)
{
double t;
t = (end->tv_sec - start->tv_sec); // diff in seconds
t += (end->tv_nsec - start->tv_nsec) * 0.000000001; //diff in nanoseconds
return t;
}
// GPU Kernel to add two numbers
__global__ void GPU_ADD(int *a, int *b)
{
a[0] += b[0]; //add the numbers and store the result in 'a'.
}
// CPU function to add two numbers
int CPU_ADD(int a, int b)
{
return a+b; //return result
}
// Code execution begins here
int main()
{
struct timespec start1, end1; //variables to store time for GPU
struct timespec start2, end2; //variables to store time for CPU
int *a1, a2;
int *b1, b2;
cudaMallocManaged(&a1, sizeof(int));
cudaMallocManaged(&b1, sizeof(int));
printf("Enter the value of a: "); //get value of a
scanf("%d", &a2);
printf("Enter the value of b: "); //get value of b
scanf("%d", &b2);
a1[0] = a2;
b1[0] = b2;
clock_gettime(CLOCK_REALTIME, &start1); //start timestamp
GPU_ADD<<<1,1>>>(a1, b1);
cudaDeviceSynchronize(); //wait for synchronization and to avoid bus error(core dumped)
int sum1 = a1[0];
clock_gettime(CLOCK_REALTIME, &end1); //end timestamp
clock_gettime(CLOCK_REALTIME, &start2); //start timestamp
int sum2 = CPU_ADD(a2, b2);
clock_gettime(CLOCK_REALTIME, &end2); //end timestamp
printf("\nThe sum of the two numbers using GPU is: %d\n", sum1);
printf("Time taken by GPU is: %E\n\n", time_elapsed(&start1, &end1)); //print result for GPU
printf("The sum of the two numbers using CPU is: %d\n", sum2);
printf("Time taken by CPU is: %E\n", time_elapsed(&start2, &end2)); //print result for CPU
cudaFree(a1);
cudaFree(b1);
return 0;
}
|
c84a3d0d1043e5eb608ed586a0fbd8bf14ef91a3.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <torch/extension.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <vector>
#define BLOCK_DIM 16
/*
template <typename scalar_t>
__global__ void bmm2x2_cuda_forward_kernel(
const scalar_t* __restrict__ mat_1,
const scalar_t* __restrict__ mat_2,
scalar_t* __restrict__ mat_3,
size_t b)
{
// Each thread computes one batch of 2x2 matmul.
size_t i4 = blockIdx.x * blockDim.x + threadIdx.x;
if (i4 >= b){
return;
}
i4 = i4*4 ;
mat_3[i4] = mat_1[i4] * mat_2[i4] + mat_1[i4+1]*mat_2[i4+2];
mat_3[i4+1] = mat_1[i4] * mat_2[i4+1] + mat_1[i4+1]*mat_2[i4+3];
mat_3[i4+2] = mat_1[i4+2] * mat_2[i4] + mat_1[i4+3]*mat_2[i4+2];
mat_3[i4+3] = mat_1[i4+2] * mat_2[i4+1] + mat_1[i4+3]*mat_2[i4+3];
return;
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_kernel(
const scalar_t* __restrict__ mat_1,
const scalar_t* __restrict__ mat_2,
const scalar_t* __restrict__ d_out,
scalar_t* __restrict__ d_mat1,
scalar_t* __restrict__ d_mat2,
size_t b)
{
// dmat1 is dX, dmat2 is dW, d_out is dY
// Each thread computes one batch of 2x2 matmul.
size_t i4 = blockIdx.x * blockDim.x + threadIdx.x;
if (i4 >= b){
return;
}
i4 = i4*4 ;
/// computing dX = dY.(W^t)
d_mat1[i4] = d_out[i4] * mat_2[i4] + d_out[i4+1]*mat_2[i4+1];
d_mat1[i4+1] = d_out[i4] * mat_2[i4+2] + d_out[i4+1]*mat_2[i4+3];
d_mat1[i4+2] = d_out[i4+2] * mat_2[i4] + d_out[i4+3]*mat_2[i4+1];
d_mat1[i4+3] = d_out[i4+2] * mat_2[i4+2] + d_out[i4+3]*mat_2[i4+3];
/// computing dW = dX^t.dY
d_mat2[i4] = mat_1[i4] * d_out[i4] + mat_1[i4+2]*d_out[i4+2];
d_mat2[i4+1] = mat_1[i4] * d_out[i4+1] + mat_1[i4+2]*d_out[i4+3];
d_mat2[i4+2] = mat_1[i4+1] * d_out[i4] + mat_1[i4+3]*d_out[i4+2];
d_mat2[i4+3] = mat_1[i4+1] * d_out[i4+1] + mat_1[i4+3]*d_out[i4+3];
return;
}
*/
/* this one is working perfectly
template <typename scalar_t>
__global__ void bmm2x2_cuda_forward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> output,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // input_dim//2
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // batch_size
if ((i0 >= s0) || (i1 >= s1)){
return;
}
output[i0][i1][0] = input[i0][i1][0] * weight[i0][0][0] +
input[i0][i1][1] * weight[i0][1][0];
output[i0][i1][1] = input[i0][i1][0] * weight[i0][0][1] +
input[i0][i1][1] * weight[i0][1][1];
return;
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_kernel(
const scalar_t* __restrict__ mat_1,
const scalar_t* __restrict__ mat_2,
const scalar_t* __restrict__ d_out,
scalar_t* __restrict__ d_mat1,
scalar_t* __restrict__ d_mat2,
size_t b)
{
// dmat1 is dX, dmat2 is dW, d_out is dY
// Each thread computes one batch of 2x2 matmul.
size_t i4 = blockIdx.x * blockDim.x + threadIdx.x;
if (i4 >= b){
return;
}
i4 = i4*4 ;
/// computing dX = dY.(W^t)
d_mat1[i4] = d_out[i4] * mat_2[i4] + d_out[i4+1]*mat_2[i4+1];
d_mat1[i4+1] = d_out[i4] * mat_2[i4+2] + d_out[i4+1]*mat_2[i4+3];
d_mat1[i4+2] = d_out[i4+2] * mat_2[i4] + d_out[i4+3]*mat_2[i4+1];
d_mat1[i4+3] = d_out[i4+2] * mat_2[i4+2] + d_out[i4+3]*mat_2[i4+3];
/// computing dW = dX^t.dY
d_mat2[i4] = mat_1[i4] * d_out[i4] + mat_1[i4+2]*d_out[i4+2];
d_mat2[i4+1] = mat_1[i4] * d_out[i4+1] + mat_1[i4+2]*d_out[i4+3];
d_mat2[i4+2] = mat_1[i4+1] * d_out[i4] + mat_1[i4+3]*d_out[i4+2];
d_mat2[i4+3] = mat_1[i4+1] * d_out[i4+1] + mat_1[i4+3]*d_out[i4+3];
return;
}
//////////////////////////////////////////////////
std::vector<torch::Tensor> bmm2x2_cuda_forward(
torch::Tensor input,
torch::Tensor weights) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
// std::cout<<"Batch Size "<<batch_size<<" Input Size "<<input.size(1)<<","<<input.size(2)<<std::endl;
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = ::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = ::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
// size_t threads_per_block = BLOCK_DIM*BLOCK_DIM;
// size_t blocks_per_grid = ::ceil(static_cast<double>(batch_size) /
// static_cast<double>(threads_per_block));
// const int threads_per_block = 1024; // default is 1024
// const dim3 blocks_per_grid((batch_size + threads - 1) / threads, batch_size);
auto output = torch::zeros_like(input);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_forward_cuda", ([&] {
hipLaunchKernelGGL(( bmm2x2_cuda_forward_kernel<scalar_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {output};
}
std::vector<torch::Tensor> bmm2x2_cuda_backward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor grad_output) {
const auto batch_size = input.size(0);
std::cout<<"Batch Size"<<batch_size<<std::endl;
// dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// // spreading batch across multiple blocks and thread
// dim3 blocks_per_grid(1, 1);
// blocks_per_grid.x = ::ceil(static_cast<double>(p) /
// static_cast<double>(threads_per_block.x));
// blocks_per_grid.y = ::ceil(static_cast<double>(m) /
// static_cast<double>(threads_per_block.y));
size_t threads_per_block = BLOCK_DIM*BLOCK_DIM;
size_t blocks_per_grid = ::ceil(static_cast<double>(batch_size) /
static_cast<double>(threads_per_block));
// const int threads = 1024;
// const dim3 blocks((state_size + threads - 1) / threads, batch_size);
auto del_input = torch::zeros_like(input);
auto del_weights = torch::zeros_like(input);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_backward_cuda", ([&] {
hipLaunchKernelGGL(( bmm2x2_cuda_backward_kernel<scalar_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
input.data<scalar_t>(),
weights.data<scalar_t>(),
grad_output.data<scalar_t>(),
del_input.data<scalar_t>(),
del_weights.data<scalar_t>(),
batch_size);
}));
return {del_input, del_weights};
}
*/
template <typename scalar_t>
__global__ void bmm2x2_cuda_forward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> output,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
output[i0][i1][0] = input[i0][i1][0] * weight[i1][0][0] +
input[i0][i1][1] * weight[i1][1][0];
output[i0][i1][1] = input[i0][i1][0] * weight[i1][0][1] +
input[i0][i1][1] * weight[i1][1][1];
return;
}
/// here, we expect the tensor not to be transposed, but does same bmm
std::vector<torch::Tensor> bmm2x2_cuda_forward(
torch::Tensor input,
torch::Tensor weights) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
// std::cout<<"Batch Size "<<batch_size<<" Input Size "<<input.size(1)<<","<<input.size(2)<<std::endl;
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = ::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = ::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
// size_t threads_per_block = BLOCK_DIM*BLOCK_DIM;
// size_t blocks_per_grid = ::ceil(static_cast<double>(batch_size) /
// static_cast<double>(threads_per_block));
// const int threads_per_block = 1024; // default is 1024
// const dim3 blocks_per_grid((batch_size + threads - 1) / threads, batch_size);
auto output = torch::zeros_like(input);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_forward_cuda", ([&] {
hipLaunchKernelGGL(( bmm2x2_cuda_forward_kernel<scalar_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {output};
}
template <typename scalar_t>
__global__ void bmm2x2_cuda_forward_inference_kernel(
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
scalar_t tmp = input[i0][i1][0] * weight[i1][0][0] +
input[i0][i1][1] * weight[i1][1][0];
input[i0][i1][1] = input[i0][i1][0] * weight[i1][0][1] +
input[i0][i1][1] * weight[i1][1][1];
input[i0][i1][0] = tmp;
return;
}
std::vector<torch::Tensor> bmm2x2_cuda_forward_inference(
torch::Tensor input,
torch::Tensor weights) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = ::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = ::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_forward_cuda", ([&] {
hipLaunchKernelGGL(( bmm2x2_cuda_forward_inference_kernel<scalar_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {input};
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_output,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_input,
torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> del_weight,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
/// computing dX = dY.(W^t)
del_input[i0][i1][0] = del_output[i0][i1][0] * weight[i1][0][0] +
del_output[i0][i1][1] * weight[i1][0][1];
del_input[i0][i1][1] = del_output[i0][i1][0] * weight[i1][1][0] +
del_output[i0][i1][1] * weight[i1][1][1];
// d_mat1[i4] = d_out[i4] * mat_2[i4] + d_out[i4+1]*mat_2[i4+1];
// d_mat1[i4+1] = d_out[i4] * mat_2[i4+2] + d_out[i4+1]*mat_2[i4+3];
// d_mat1[i4+2] = d_out[i4+2] * mat_2[i4] + d_out[i4+3]*mat_2[i4+1];
// d_mat1[i4+3] = d_out[i4+2] * mat_2[i4+2] + d_out[i4+3]*mat_2[i4+3];
/// computing dW = X^t.dY
del_weight[i0][i1][0][0] = del_output[i0][i1][0]*input[i0][i1][0];
del_weight[i0][i1][0][1] = del_output[i0][i1][1]*input[i0][i1][0];
del_weight[i0][i1][1][0] = del_output[i0][i1][0]*input[i0][i1][1];
del_weight[i0][i1][1][1] = del_output[i0][i1][1]*input[i0][i1][1];
// d_mat2[i4] = mat_1[i4] * d_out[i4] + mat_1[i4+2]*d_out[i4+2];
// d_mat2[i4+1] = mat_1[i4] * d_out[i4+1] + mat_1[i4+2]*d_out[i4+3];
// d_mat2[i4+2] = mat_1[i4+1] * d_out[i4] + mat_1[i4+3]*d_out[i4+2];
// d_mat2[i4+3] = mat_1[i4+1] * d_out[i4+1] + mat_1[i4+3]*d_out[i4+3];
return;
}
std::vector<torch::Tensor> bmm2x2_cuda_backward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor grad_output) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = ::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = ::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
auto del_input = torch::zeros_like(input);
auto options = torch::TensorOptions().dtype(input.dtype()).device(input.device());
// auto del_weights = torch::empty({s0, s1, 2, 2}, input.device());
auto del_weights = torch::empty({s0, s1, 2, 2}, options);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_backward_cuda", ([&] {
hipLaunchKernelGGL(( bmm2x2_cuda_backward_kernel<scalar_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_weights.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {del_input, torch::sum(del_weights, 0)};
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
/// this is for computing del_input and del_weight seperately
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_delinput_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_output,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_input,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
/// computing dX = dY.(W^t)
del_input[i0][i1][0] = del_output[i0][i1][0] * weight[i1][0][0] +
del_output[i0][i1][1] * weight[i1][0][1];
del_input[i0][i1][1] = del_output[i0][i1][0] * weight[i1][1][0] +
del_output[i0][i1][1] * weight[i1][1][1];
return;
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_delweight_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_output,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_weight,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i1 = blockIdx.x * blockDim.x + threadIdx.x; // input_dim//2
if (i1 >= s1){
return;
}
scalar_t t0=0, t1=0, t2=0, t3=0;
for (size_t k{0}; k < s0; ++k){
t0 += del_output[k][i1][0] * input[k][i1][0];
t1 += del_output[k][i1][1] * input[k][i1][0];
t2 += del_output[k][i1][0] * input[k][i1][1];
t3 += del_output[k][i1][1] * input[k][i1][1];
}
// scalar_t _s0 = static_cast<scalar_t>(s0);
del_weight[i1][0][0] = t0;
del_weight[i1][0][1] = t1;
del_weight[i1][1][0] = t2;
del_weight[i1][1][1] = t3;
return;
}
std::vector<torch::Tensor> bmm2x2_cuda_backward_v2(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor grad_output) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = ::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = ::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
auto del_input = torch::zeros_like(input);
auto del_weights = torch::empty_like(weights);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_backward_input_cuda", ([&] {
hipLaunchKernelGGL(( bmm2x2_cuda_backward_delinput_kernel<scalar_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
int threads = BLOCK_DIM*BLOCK_DIM;
int blocks = ::ceil(static_cast<double>(s1) /
static_cast<double>(threads));
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_backward_weight_cuda", ([&] {
// scalar_t _s0 = static_cast<scalar_t>(s0);
hipLaunchKernelGGL(( bmm2x2_cuda_backward_delweight_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0,
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {del_input, del_weights};
}
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
///////////// THIS PORTION CONTAINS CODE FOR BMM 2X1 (HALVER) FOR 2X2 PORTION
template <typename scalar_t>
__global__ void bmm2x1_cuda_forward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> output,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x1 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
output[i0][i1] = input[i0][i1][0] * weight[i1][0] +
input[i0][i1][1] * weight[i1][1];
return;
}
/// here, we expect the tensor not to be transposed, but does same bmm
std::vector<torch::Tensor> bmm2x1_cuda_forward(
torch::Tensor input,
torch::Tensor weights) {
/// input has shape -> batch size, n_grids, 2
/// weight has shape -> n_grids, 2 ## this is n_grids, 2, 1 for halving the number of inputs.
const auto s0 = input.size(0);
const auto s1 = input.size(1);
// std::cout<<"Batch Size "<<batch_size<<" Input Size "<<input.size(1)<<","<<input.size(2)<<std::endl;
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = ::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = ::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
// size_t threads_per_block = BLOCK_DIM*BLOCK_DIM;
// size_t blocks_per_grid = ::ceil(static_cast<double>(batch_size) /
// static_cast<double>(threads_per_block));
// const int threads_per_block = 1024; // default is 1024
// const dim3 blocks_per_grid((batch_size + threads - 1) / threads, batch_size);
auto output = torch::zeros({s0, s1}, input.device());
/// output has shape Batch, n_group
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x1_forward_cuda", ([&] {
hipLaunchKernelGGL(( bmm2x1_cuda_forward_kernel<scalar_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {output};
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x1_cuda_backward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weight,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> del_output,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_input,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_weight,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
/// computing dX = dY.(W^t)
del_input[i0][i1][0] = del_output[i0][i1] * weight[i1][0];
del_input[i0][i1][1] = del_output[i0][i1] * weight[i1][1];
/// computing dW = X^t.dY
del_weight[i0][i1][0] = del_output[i0][i1]*input[i0][i1][0];
del_weight[i0][i1][1] = del_output[i0][i1]*input[i0][i1][1];
return;
}
std::vector<torch::Tensor> bmm2x1_cuda_backward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor grad_output) {
/// input has shape -> batch size, n_grids, 2
/// weight has shape -> n_grids, 2
const auto s0 = input.size(0);
const auto s1 = input.size(1);
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = ::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = ::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
auto del_input = torch::zeros_like(input);
auto del_weights = torch::empty({s0, s1, 2}, input.device());
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x1_backward_cuda", ([&] {
hipLaunchKernelGGL(( bmm2x1_cuda_backward_kernel<scalar_t>), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0,
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
del_input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {del_input, torch::sum(del_weights, 0)};
}
|
c84a3d0d1043e5eb608ed586a0fbd8bf14ef91a3.cu
|
#include <torch/extension.h>
#include <cuda.h>
#include <cuda_runtime.h>
#include <vector>
#define BLOCK_DIM 16
/*
template <typename scalar_t>
__global__ void bmm2x2_cuda_forward_kernel(
const scalar_t* __restrict__ mat_1,
const scalar_t* __restrict__ mat_2,
scalar_t* __restrict__ mat_3,
size_t b)
{
// Each thread computes one batch of 2x2 matmul.
size_t i4 = blockIdx.x * blockDim.x + threadIdx.x;
if (i4 >= b){
return;
}
i4 = i4*4 ;
mat_3[i4] = mat_1[i4] * mat_2[i4] + mat_1[i4+1]*mat_2[i4+2];
mat_3[i4+1] = mat_1[i4] * mat_2[i4+1] + mat_1[i4+1]*mat_2[i4+3];
mat_3[i4+2] = mat_1[i4+2] * mat_2[i4] + mat_1[i4+3]*mat_2[i4+2];
mat_3[i4+3] = mat_1[i4+2] * mat_2[i4+1] + mat_1[i4+3]*mat_2[i4+3];
return;
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_kernel(
const scalar_t* __restrict__ mat_1,
const scalar_t* __restrict__ mat_2,
const scalar_t* __restrict__ d_out,
scalar_t* __restrict__ d_mat1,
scalar_t* __restrict__ d_mat2,
size_t b)
{
// dmat1 is dX, dmat2 is dW, d_out is dY
// Each thread computes one batch of 2x2 matmul.
size_t i4 = blockIdx.x * blockDim.x + threadIdx.x;
if (i4 >= b){
return;
}
i4 = i4*4 ;
/// computing dX = dY.(W^t)
d_mat1[i4] = d_out[i4] * mat_2[i4] + d_out[i4+1]*mat_2[i4+1];
d_mat1[i4+1] = d_out[i4] * mat_2[i4+2] + d_out[i4+1]*mat_2[i4+3];
d_mat1[i4+2] = d_out[i4+2] * mat_2[i4] + d_out[i4+3]*mat_2[i4+1];
d_mat1[i4+3] = d_out[i4+2] * mat_2[i4+2] + d_out[i4+3]*mat_2[i4+3];
/// computing dW = dX^t.dY
d_mat2[i4] = mat_1[i4] * d_out[i4] + mat_1[i4+2]*d_out[i4+2];
d_mat2[i4+1] = mat_1[i4] * d_out[i4+1] + mat_1[i4+2]*d_out[i4+3];
d_mat2[i4+2] = mat_1[i4+1] * d_out[i4] + mat_1[i4+3]*d_out[i4+2];
d_mat2[i4+3] = mat_1[i4+1] * d_out[i4+1] + mat_1[i4+3]*d_out[i4+3];
return;
}
*/
/* this one is working perfectly
template <typename scalar_t>
__global__ void bmm2x2_cuda_forward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> output,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // input_dim//2
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // batch_size
if ((i0 >= s0) || (i1 >= s1)){
return;
}
output[i0][i1][0] = input[i0][i1][0] * weight[i0][0][0] +
input[i0][i1][1] * weight[i0][1][0];
output[i0][i1][1] = input[i0][i1][0] * weight[i0][0][1] +
input[i0][i1][1] * weight[i0][1][1];
return;
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_kernel(
const scalar_t* __restrict__ mat_1,
const scalar_t* __restrict__ mat_2,
const scalar_t* __restrict__ d_out,
scalar_t* __restrict__ d_mat1,
scalar_t* __restrict__ d_mat2,
size_t b)
{
// dmat1 is dX, dmat2 is dW, d_out is dY
// Each thread computes one batch of 2x2 matmul.
size_t i4 = blockIdx.x * blockDim.x + threadIdx.x;
if (i4 >= b){
return;
}
i4 = i4*4 ;
/// computing dX = dY.(W^t)
d_mat1[i4] = d_out[i4] * mat_2[i4] + d_out[i4+1]*mat_2[i4+1];
d_mat1[i4+1] = d_out[i4] * mat_2[i4+2] + d_out[i4+1]*mat_2[i4+3];
d_mat1[i4+2] = d_out[i4+2] * mat_2[i4] + d_out[i4+3]*mat_2[i4+1];
d_mat1[i4+3] = d_out[i4+2] * mat_2[i4+2] + d_out[i4+3]*mat_2[i4+3];
/// computing dW = dX^t.dY
d_mat2[i4] = mat_1[i4] * d_out[i4] + mat_1[i4+2]*d_out[i4+2];
d_mat2[i4+1] = mat_1[i4] * d_out[i4+1] + mat_1[i4+2]*d_out[i4+3];
d_mat2[i4+2] = mat_1[i4+1] * d_out[i4] + mat_1[i4+3]*d_out[i4+2];
d_mat2[i4+3] = mat_1[i4+1] * d_out[i4+1] + mat_1[i4+3]*d_out[i4+3];
return;
}
//////////////////////////////////////////////////
std::vector<torch::Tensor> bmm2x2_cuda_forward(
torch::Tensor input,
torch::Tensor weights) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
// std::cout<<"Batch Size "<<batch_size<<" Input Size "<<input.size(1)<<","<<input.size(2)<<std::endl;
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = std::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = std::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
// size_t threads_per_block = BLOCK_DIM*BLOCK_DIM;
// size_t blocks_per_grid = std::ceil(static_cast<double>(batch_size) /
// static_cast<double>(threads_per_block));
// const int threads_per_block = 1024; // default is 1024
// const dim3 blocks_per_grid((batch_size + threads - 1) / threads, batch_size);
auto output = torch::zeros_like(input);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_forward_cuda", ([&] {
bmm2x2_cuda_forward_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>(
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {output};
}
std::vector<torch::Tensor> bmm2x2_cuda_backward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor grad_output) {
const auto batch_size = input.size(0);
std::cout<<"Batch Size"<<batch_size<<std::endl;
// dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// // spreading batch across multiple blocks and thread
// dim3 blocks_per_grid(1, 1);
// blocks_per_grid.x = std::ceil(static_cast<double>(p) /
// static_cast<double>(threads_per_block.x));
// blocks_per_grid.y = std::ceil(static_cast<double>(m) /
// static_cast<double>(threads_per_block.y));
size_t threads_per_block = BLOCK_DIM*BLOCK_DIM;
size_t blocks_per_grid = std::ceil(static_cast<double>(batch_size) /
static_cast<double>(threads_per_block));
// const int threads = 1024;
// const dim3 blocks((state_size + threads - 1) / threads, batch_size);
auto del_input = torch::zeros_like(input);
auto del_weights = torch::zeros_like(input);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_backward_cuda", ([&] {
bmm2x2_cuda_backward_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>(
input.data<scalar_t>(),
weights.data<scalar_t>(),
grad_output.data<scalar_t>(),
del_input.data<scalar_t>(),
del_weights.data<scalar_t>(),
batch_size);
}));
return {del_input, del_weights};
}
*/
template <typename scalar_t>
__global__ void bmm2x2_cuda_forward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> output,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
output[i0][i1][0] = input[i0][i1][0] * weight[i1][0][0] +
input[i0][i1][1] * weight[i1][1][0];
output[i0][i1][1] = input[i0][i1][0] * weight[i1][0][1] +
input[i0][i1][1] * weight[i1][1][1];
return;
}
/// here, we expect the tensor not to be transposed, but does same bmm
std::vector<torch::Tensor> bmm2x2_cuda_forward(
torch::Tensor input,
torch::Tensor weights) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
// std::cout<<"Batch Size "<<batch_size<<" Input Size "<<input.size(1)<<","<<input.size(2)<<std::endl;
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = std::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = std::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
// size_t threads_per_block = BLOCK_DIM*BLOCK_DIM;
// size_t blocks_per_grid = std::ceil(static_cast<double>(batch_size) /
// static_cast<double>(threads_per_block));
// const int threads_per_block = 1024; // default is 1024
// const dim3 blocks_per_grid((batch_size + threads - 1) / threads, batch_size);
auto output = torch::zeros_like(input);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_forward_cuda", ([&] {
bmm2x2_cuda_forward_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>(
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {output};
}
template <typename scalar_t>
__global__ void bmm2x2_cuda_forward_inference_kernel(
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
scalar_t tmp = input[i0][i1][0] * weight[i1][0][0] +
input[i0][i1][1] * weight[i1][1][0];
input[i0][i1][1] = input[i0][i1][0] * weight[i1][0][1] +
input[i0][i1][1] * weight[i1][1][1];
input[i0][i1][0] = tmp;
return;
}
std::vector<torch::Tensor> bmm2x2_cuda_forward_inference(
torch::Tensor input,
torch::Tensor weights) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = std::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = std::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_forward_cuda", ([&] {
bmm2x2_cuda_forward_inference_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>(
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {input};
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_output,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_input,
torch::PackedTensorAccessor32<scalar_t,4,torch::RestrictPtrTraits> del_weight,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
/// computing dX = dY.(W^t)
del_input[i0][i1][0] = del_output[i0][i1][0] * weight[i1][0][0] +
del_output[i0][i1][1] * weight[i1][0][1];
del_input[i0][i1][1] = del_output[i0][i1][0] * weight[i1][1][0] +
del_output[i0][i1][1] * weight[i1][1][1];
// d_mat1[i4] = d_out[i4] * mat_2[i4] + d_out[i4+1]*mat_2[i4+1];
// d_mat1[i4+1] = d_out[i4] * mat_2[i4+2] + d_out[i4+1]*mat_2[i4+3];
// d_mat1[i4+2] = d_out[i4+2] * mat_2[i4] + d_out[i4+3]*mat_2[i4+1];
// d_mat1[i4+3] = d_out[i4+2] * mat_2[i4+2] + d_out[i4+3]*mat_2[i4+3];
/// computing dW = X^t.dY
del_weight[i0][i1][0][0] = del_output[i0][i1][0]*input[i0][i1][0];
del_weight[i0][i1][0][1] = del_output[i0][i1][1]*input[i0][i1][0];
del_weight[i0][i1][1][0] = del_output[i0][i1][0]*input[i0][i1][1];
del_weight[i0][i1][1][1] = del_output[i0][i1][1]*input[i0][i1][1];
// d_mat2[i4] = mat_1[i4] * d_out[i4] + mat_1[i4+2]*d_out[i4+2];
// d_mat2[i4+1] = mat_1[i4] * d_out[i4+1] + mat_1[i4+2]*d_out[i4+3];
// d_mat2[i4+2] = mat_1[i4+1] * d_out[i4] + mat_1[i4+3]*d_out[i4+2];
// d_mat2[i4+3] = mat_1[i4+1] * d_out[i4+1] + mat_1[i4+3]*d_out[i4+3];
return;
}
std::vector<torch::Tensor> bmm2x2_cuda_backward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor grad_output) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = std::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = std::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
auto del_input = torch::zeros_like(input);
auto options = torch::TensorOptions().dtype(input.dtype()).device(input.device());
// auto del_weights = torch::empty({s0, s1, 2, 2}, input.device());
auto del_weights = torch::empty({s0, s1, 2, 2}, options);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_backward_cuda", ([&] {
bmm2x2_cuda_backward_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>(
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_weights.packed_accessor32<scalar_t,4,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {del_input, torch::sum(del_weights, 0)};
}
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////
/// this is for computing del_input and del_weight seperately
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_delinput_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> weight,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_output,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_input,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
/// computing dX = dY.(W^t)
del_input[i0][i1][0] = del_output[i0][i1][0] * weight[i1][0][0] +
del_output[i0][i1][1] * weight[i1][0][1];
del_input[i0][i1][1] = del_output[i0][i1][0] * weight[i1][1][0] +
del_output[i0][i1][1] * weight[i1][1][1];
return;
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x2_cuda_backward_delweight_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_output,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_weight,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i1 = blockIdx.x * blockDim.x + threadIdx.x; // input_dim//2
if (i1 >= s1){
return;
}
scalar_t t0=0, t1=0, t2=0, t3=0;
for (size_t k{0}; k < s0; ++k){
t0 += del_output[k][i1][0] * input[k][i1][0];
t1 += del_output[k][i1][1] * input[k][i1][0];
t2 += del_output[k][i1][0] * input[k][i1][1];
t3 += del_output[k][i1][1] * input[k][i1][1];
}
// scalar_t _s0 = static_cast<scalar_t>(s0);
del_weight[i1][0][0] = t0;
del_weight[i1][0][1] = t1;
del_weight[i1][1][0] = t2;
del_weight[i1][1][1] = t3;
return;
}
std::vector<torch::Tensor> bmm2x2_cuda_backward_v2(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor grad_output) {
const auto s0 = input.size(0);
const auto s1 = input.size(1);
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = std::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = std::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
auto del_input = torch::zeros_like(input);
auto del_weights = torch::empty_like(weights);
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_backward_input_cuda", ([&] {
bmm2x2_cuda_backward_delinput_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>(
weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
int threads = BLOCK_DIM*BLOCK_DIM;
int blocks = std::ceil(static_cast<double>(s1) /
static_cast<double>(threads));
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x2_backward_weight_cuda", ([&] {
// scalar_t _s0 = static_cast<scalar_t>(s0);
bmm2x2_cuda_backward_delweight_kernel<scalar_t><<<blocks, threads>>>(
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {del_input, del_weights};
}
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
//////////////////////////////////////////////////////
///////////// THIS PORTION CONTAINS CODE FOR BMM 2X1 (HALVER) FOR 2X2 PORTION
template <typename scalar_t>
__global__ void bmm2x1_cuda_forward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weight,
torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> output,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x1 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
output[i0][i1] = input[i0][i1][0] * weight[i1][0] +
input[i0][i1][1] * weight[i1][1];
return;
}
/// here, we expect the tensor not to be transposed, but does same bmm
std::vector<torch::Tensor> bmm2x1_cuda_forward(
torch::Tensor input,
torch::Tensor weights) {
/// input has shape -> batch size, n_grids, 2
/// weight has shape -> n_grids, 2 ## this is n_grids, 2, 1 for halving the number of inputs.
const auto s0 = input.size(0);
const auto s1 = input.size(1);
// std::cout<<"Batch Size "<<batch_size<<" Input Size "<<input.size(1)<<","<<input.size(2)<<std::endl;
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = std::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = std::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
// size_t threads_per_block = BLOCK_DIM*BLOCK_DIM;
// size_t blocks_per_grid = std::ceil(static_cast<double>(batch_size) /
// static_cast<double>(threads_per_block));
// const int threads_per_block = 1024; // default is 1024
// const dim3 blocks_per_grid((batch_size + threads - 1) / threads, batch_size);
auto output = torch::zeros({s0, s1}, input.device());
/// output has shape Batch, n_group
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x1_forward_cuda", ([&] {
bmm2x1_cuda_forward_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>(
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
output.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {output};
}
template <typename scalar_t> // mat1 is X, mat2 is W -> Y = X.W
__global__ void bmm2x1_cuda_backward_kernel(
const torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> input,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> weight,
const torch::PackedTensorAccessor32<scalar_t,2,torch::RestrictPtrTraits> del_output,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_input,
torch::PackedTensorAccessor32<scalar_t,3,torch::RestrictPtrTraits> del_weight,
size_t s0, size_t s1)
{
// Each thread computes one batch of 2x2 matmul.
size_t i0 = blockIdx.x * blockDim.x + threadIdx.x; // batch_size
size_t i1 = blockIdx.y * blockDim.y + threadIdx.y; // input_dim//2
if ((i0 >= s0) || (i1 >= s1)){
return;
}
/// computing dX = dY.(W^t)
del_input[i0][i1][0] = del_output[i0][i1] * weight[i1][0];
del_input[i0][i1][1] = del_output[i0][i1] * weight[i1][1];
/// computing dW = X^t.dY
del_weight[i0][i1][0] = del_output[i0][i1]*input[i0][i1][0];
del_weight[i0][i1][1] = del_output[i0][i1]*input[i0][i1][1];
return;
}
std::vector<torch::Tensor> bmm2x1_cuda_backward(
torch::Tensor input,
torch::Tensor weights,
torch::Tensor grad_output) {
/// input has shape -> batch size, n_grids, 2
/// weight has shape -> n_grids, 2
const auto s0 = input.size(0);
const auto s1 = input.size(1);
dim3 threads_per_block(BLOCK_DIM, BLOCK_DIM);
// spreading batch across multiple blocks and thread
dim3 blocks_per_grid(1, 1);
blocks_per_grid.x = std::ceil(static_cast<double>(s0) /
static_cast<double>(threads_per_block.x));
blocks_per_grid.y = std::ceil(static_cast<double>(s1) /
static_cast<double>(threads_per_block.y));
auto del_input = torch::zeros_like(input);
auto del_weights = torch::empty({s0, s1, 2}, input.device());
AT_DISPATCH_FLOATING_TYPES(input.type(), "bmm2x1_backward_cuda", ([&] {
bmm2x1_cuda_backward_kernel<scalar_t><<<blocks_per_grid, threads_per_block>>>(
input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
weights.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
grad_output.packed_accessor32<scalar_t,2,torch::RestrictPtrTraits>(),
del_input.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
del_weights.packed_accessor32<scalar_t,3,torch::RestrictPtrTraits>(),
s0, s1);
}));
return {del_input, torch::sum(del_weights, 0)};
}
|
8829b2f17f47d1970b45957e7897c2bea92f1d14.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Basic test to see if our CuRAND wrapper
* actually generates random numbers.
*/
#include <stdio.h>
#include <memory>
#include <hiprand/hiprand.h>
#include "CuDNN/CuDNN.hpp"
#include "CuRAND/CuRAND.hpp"
void print_buffer(double *buf, int size) {
for (int i = 0; i < size; i++) {
printf("%5.3f ", buf[i]);
if ((i + 1) % 10 == 0) {
printf("\n");
}
}
}
void print_tensor(double* tensor, int size) {
auto output = (double *) malloc(size * sizeof(double));
hipMemcpy(output, tensor, size * sizeof(double), hipMemcpyDeviceToHost);
print_buffer(output, size);
free(output);
}
int main(int argc, char *argv[]) {
auto w = 10;
auto h = 10;
auto size = w * h;
auto input = CuDNN::Tensor<double>::createNCHW(1, 1, h, w);
printf("\n");
printf("---------------------------------\n");
printf(" BEFORE RANDOM GENERATOR\n");
printf("---------------------------------\n");
print_tensor(input, size);
CuRAND::PseudoGenerator gen = CuRAND::PseudoGenerator::create(HIPRAND_RNG_PSEUDO_XORWOW, 42ULL);
auto runs = 3;
for(int i = 0; i < runs; i++) {
gen.generateUniform(input, size);
printf("\n\n");
printf("---------------------------------\n");
printf(" AFTER RANDOM GENERATOR\n");
printf("---------------------------------\n");
printf("Run: %d\n", i+1);
print_tensor(input, size);
}
return 0;
}
|
8829b2f17f47d1970b45957e7897c2bea92f1d14.cu
|
/*
* Basic test to see if our CuRAND wrapper
* actually generates random numbers.
*/
#include <stdio.h>
#include <memory>
#include <curand.h>
#include "CuDNN/CuDNN.hpp"
#include "CuRAND/CuRAND.hpp"
void print_buffer(double *buf, int size) {
for (int i = 0; i < size; i++) {
printf("%5.3f ", buf[i]);
if ((i + 1) % 10 == 0) {
printf("\n");
}
}
}
void print_tensor(double* tensor, int size) {
auto output = (double *) malloc(size * sizeof(double));
cudaMemcpy(output, tensor, size * sizeof(double), cudaMemcpyDeviceToHost);
print_buffer(output, size);
free(output);
}
int main(int argc, char *argv[]) {
auto w = 10;
auto h = 10;
auto size = w * h;
auto input = CuDNN::Tensor<double>::createNCHW(1, 1, h, w);
printf("\n");
printf("---------------------------------\n");
printf(" BEFORE RANDOM GENERATOR\n");
printf("---------------------------------\n");
print_tensor(input, size);
CuRAND::PseudoGenerator gen = CuRAND::PseudoGenerator::create(CURAND_RNG_PSEUDO_XORWOW, 42ULL);
auto runs = 3;
for(int i = 0; i < runs; i++) {
gen.generateUniform(input, size);
printf("\n\n");
printf("---------------------------------\n");
printf(" AFTER RANDOM GENERATOR\n");
printf("---------------------------------\n");
printf("Run: %d\n", i+1);
print_tensor(input, size);
}
return 0;
}
|
9b38e06a89e602301718e41e3a26092127e9e483.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <hdf5.h>
#define TPB 256 // Number of threads per block
__host__ void updateTimer(time_t t0, int tstep, char str[]) {
int elapsedTime=(int)(time(0)-t0);
sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60);
}
__host__ void exec(char *format, ...) {
char str[1024];
va_list ap;
va_start(ap, format);
vsprintf(str, format, ap);
system(str);
}
__host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) {
char filename[1024];
va_list ap;
va_start(ap, format);
vsprintf(filename, format, ap);
hid_t file, dataset, filespace, memspace;
hsize_t dimsm[3] = { Ni, Nj, Nk };
hsize_t start[3] = { is, js, ks };
hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke };
memspace = H5Screate_simple(3, dimsm, 0);
filespace = H5Screate_simple(3, count, 0);
file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT);
H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0);
H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]);
H5Dclose(dataset);
H5Sclose(filespace);
H5Sclose(memspace);
H5Fclose(file);
}
__host__ void print_array(int Nx, int Ny, int Nz, float ***a) {
int j,k;
for (j=0; j<Ny; j++) {
for (k=0; k<Nz; k++) {
printf("%1.4f\t", a[Nx/2][j][k]);
}
printf("\n");
}
printf("\n");
}
__host__ float ***makeArray(int Nx, int Ny, int Nz) {
float ***f;
f = (float ***) calloc (Nx, sizeof(float **));
f[0] = (float **) calloc (Ny*Nx, sizeof(float *));
f[0][0] = (float *) calloc (Nz*Ny*Nx, sizeof(float));
for (int i=0; i<Nx; i++) f[i] = f[0] + i*Ny;
for (int i=0; i<Ny*Nx; i++) f[0][i] = f[0][0] + i*Nz;
return f;
}
__host__ void set_geometry(int Nx, int Ny, int Nz,
float ***CEx, float ***CEy, float ***CEz) {
int i,j,k;
for (i=0; i<Nx; i++) {
for (j=0; j<Ny; j++) {
for (k=0; k<Nz; k++) {
CEx[i][j][k] = 0.5;
CEy[i][j][k] = 0.5;
CEz[i][j][k] = 0.5;
}
}
}
}
__global__ void initArrays(int Nx, int Ny, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < Nx*Ny*Nzpit ) {
Ex[idx] = 0;
Ey[idx] = 0;
Ez[idx] = 0;
Hx[idx] = 0;
Hy[idx] = 0;
Hz[idx] = 0;
}
}
__global__ void updateE(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz,
float *CEx, float *CEy, float *CEz) {
int tk, idx;
tk = threadIdx.x;
idx = blockIdx.x*TPB + tk;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < Nx*Ny*Nzpit ) {
int i,j,k;
int Nyz = Ny*Nzpit;
i = idx/Nyz;
j = ( idx - i*Nyz )/Nzpit;
k = idx - i*Nyz - j*Nzpit;
//printf("[%d](%d,%d,%d)\n",idx,i,j,k);
__shared__ float hx[TPB+1], hy[TPB+1], hz[TPB];
hx[tk] = Hx[idx];
hy[tk] = Hy[idx];
hz[tk] = Hz[idx];
if ( tk==TPB-1 && k<Nz-1 ) {
hx[tk+1] = Hx[idx+1];
hy[tk+1] = Hy[idx+1];
}
__syncthreads();
if ( k < Nz ) {
if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( Hz[idx+Nzpit] - hz[tk] - hy[tk+1] + hy[tk] );
if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( hx[tk+1] - hx[tk] - Hz[idx+Nyz] + hz[tk] );
if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyz] - hy[tk] - Hx[idx+Nzpit] + hx[tk] );
}
}
}
__global__ void updateSrc(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, int tstep) {
int idx, ijk;
idx = blockIdx.x*blockDim.x + threadIdx.x;
ijk = idx*(Ny)*(Nzpit) + (Ny/2)*(Nzpit) + (Nz/2);
//printf("idx=%d, ijk=%d\n", idx, ijk);
//Ex[ijk] += __sinf(0.1*tstep);
if ( idx < Nx ) {
Ex[ijk] += sin(0.1*tstep);
}
}
__global__ void updateH(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz) {
int tk, idx;
tk = threadIdx.x;
idx = blockIdx.x*TPB + tk;
if ( idx < Nx*Ny*Nzpit ) {
int i,j,k;
int Nyz = Ny*Nzpit;
i = idx/Nyz;
j = ( idx - i*Nyz )/Nzpit;
k = idx - i*Nyz - j*Nzpit;
__shared__ float ex[TPB+1], ey[TPB+1], ez[TPB];
ex[tk+1] = Ex[idx];
ey[tk+1] = Ey[idx];
ez[tk] = Ez[idx];
if ( tk==0 && k>0 ) {
ex[0] = Ex[idx-1];
ey[0] = Ey[idx-1];
}
__syncthreads();
if ( k < Nz ) {
if ( j>0 && k>0 ) Hx[idx] -= 0.5*( ez[tk] - Ez[idx-Nzpit] - ey[tk+1] + ey[tk] );
if ( i>0 && k>0 ) Hy[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[idx-Nyz] );
if ( i>0 && j>0 ) Hz[idx] -= 0.5*( ey[tk+1] - Ey[idx-Nyz] - ex[tk+1] + Ex[idx-Nzpit] );
}
}
}
int main() {
int tstep;
char time_str[32];
time_t t0;
// Set the parameters
int Nx, Ny, Nz, TMAX;
Nx = 100;
Ny = 200; //16;
Nz = 500; //20;
TMAX = 1000;
// Allocate host memory
//float ***Ex;
float ***CEx, ***CEy, ***CEz;
//Ex = makeArray(Nx, Ny, Nz);
CEx = makeArray(Nx, Ny, Nz);
CEy = makeArray(Nx, Ny, Nz);
CEz = makeArray(Nx, Ny, Nz);
// Geometry
set_geometry(Nx, Ny, Nz, CEx, CEy, CEz);
// Allocate device memory
float *devEx, *devEy, *devEz;
float *devHx, *devHy, *devHz;
float *devCEx, *devCEy, *devCEz;
int z_size = Nz*sizeof(float);
size_t pitch;
hipMallocPitch ( (void**) &devEx, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devEy, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devEz, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devCEx, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devCEy, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devCEz, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devHx, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devHy, &pitch, z_size, Nx*Ny );
hipMallocPitch ( (void**) &devHz, &pitch, z_size, Nx*Ny );
// Copy arrays from host to device
hipMemcpy2D ( devCEx, pitch, CEx[0][0], z_size, z_size, Nx*Ny, hipMemcpyHostToDevice );
hipMemcpy2D ( devCEy, pitch, CEy[0][0], z_size, z_size, Nx*Ny, hipMemcpyHostToDevice );
hipMemcpy2D ( devCEz, pitch, CEz[0][0], z_size, z_size, Nx*Ny, hipMemcpyHostToDevice );
int Nz_pitch = pitch/4;
printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch);
// Number of thread blocks in the grid
int N = Nx*Ny*Nz_pitch;
int BPG = N%TPB == 0 ? N/TPB : N/TPB + 1;
printf("TPB=%d, BPG=%d\n", TPB, BPG);
dim3 gridDim(BPG);
// Number of threads per block
dim3 blockDim(TPB);
//int BPGsrc = Nx%TPB == 0 ? Nx/TPB : Nx/TPB + 1;
int BPGsrc = 1;
dim3 gridDimsrc(BPGsrc);
dim3 blockDimsrc(Nx);
// Initialize the device arrays
hipLaunchKernelGGL(( initArrays) , dim3(gridDim),dim3(blockDim), 0, 0, Nx, Ny, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz );
// Main time loop
t0 = time(0);
//for ( tstep=1; tstep<=TMAX; tstep++) {
for ( tstep=1; tstep<=10; tstep++) {
// Update on the GPU
hipLaunchKernelGGL(( updateE) , dim3(gridDim),dim3(blockDim), 0, 0, Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz, devCEx, devCEy, devCEz );
//updateSrc <<<gridDimsrc,blockDimsrc>>> ( Nx, Ny, Nz, Nz_pitch, devEx, tstep );
//updateH <<<gridDim,blockDim>>> ( Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz );
/*
//if ( tstep/10*10 == tstep ) {
// Copy arrays from device to host
hipMemcpy2D( Ex[0][0], z_size, devEx, pitch, z_size, Nx*Ny, hipMemcpyDeviceToHost );
//print_array(Nx, Ny, Nz, Ex);
dumpToH5(Nx, Ny, Nz, Nx/2, 0, 0, Nx/2, Ny-1, Nz-1, Ex, "gpu_png/Ex-%05d.h5", tstep);
exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep);
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
//}
*/
}
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
|
9b38e06a89e602301718e41e3a26092127e9e483.cu
|
#include <stdlib.h>
#include <stdio.h>
#include <stdarg.h>
#include <math.h>
#include <hdf5.h>
#define TPB 256 // Number of threads per block
__host__ void updateTimer(time_t t0, int tstep, char str[]) {
int elapsedTime=(int)(time(0)-t0);
sprintf(str, "%02d:%02d:%02d", elapsedTime/3600, elapsedTime%3600/60, elapsedTime%60);
}
__host__ void exec(char *format, ...) {
char str[1024];
va_list ap;
va_start(ap, format);
vsprintf(str, format, ap);
system(str);
}
__host__ void dumpToH5(int Ni, int Nj, int Nk, int is, int js, int ks, int ie, int je, int ke, float ***f, char *format, ...) {
char filename[1024];
va_list ap;
va_start(ap, format);
vsprintf(filename, format, ap);
hid_t file, dataset, filespace, memspace;
hsize_t dimsm[3] = { Ni, Nj, Nk };
hsize_t start[3] = { is, js, ks };
hsize_t count[3] = { 1-is+ie, 1-js+je, 1-ks+ke };
memspace = H5Screate_simple(3, dimsm, 0);
filespace = H5Screate_simple(3, count, 0);
file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
dataset = H5Dcreate(file, "Data", H5T_NATIVE_FLOAT, filespace, H5P_DEFAULT);
H5Sselect_hyperslab(memspace, H5S_SELECT_SET, start, 0, count, 0);
H5Dwrite(dataset, H5T_NATIVE_FLOAT, memspace, filespace, H5P_DEFAULT, f[0][0]);
H5Dclose(dataset);
H5Sclose(filespace);
H5Sclose(memspace);
H5Fclose(file);
}
__host__ void print_array(int Nx, int Ny, int Nz, float ***a) {
int j,k;
for (j=0; j<Ny; j++) {
for (k=0; k<Nz; k++) {
printf("%1.4f\t", a[Nx/2][j][k]);
}
printf("\n");
}
printf("\n");
}
__host__ float ***makeArray(int Nx, int Ny, int Nz) {
float ***f;
f = (float ***) calloc (Nx, sizeof(float **));
f[0] = (float **) calloc (Ny*Nx, sizeof(float *));
f[0][0] = (float *) calloc (Nz*Ny*Nx, sizeof(float));
for (int i=0; i<Nx; i++) f[i] = f[0] + i*Ny;
for (int i=0; i<Ny*Nx; i++) f[0][i] = f[0][0] + i*Nz;
return f;
}
__host__ void set_geometry(int Nx, int Ny, int Nz,
float ***CEx, float ***CEy, float ***CEz) {
int i,j,k;
for (i=0; i<Nx; i++) {
for (j=0; j<Ny; j++) {
for (k=0; k<Nz; k++) {
CEx[i][j][k] = 0.5;
CEy[i][j][k] = 0.5;
CEz[i][j][k] = 0.5;
}
}
}
}
__global__ void initArrays(int Nx, int Ny, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz) {
int idx;
idx = blockIdx.x*blockDim.x + threadIdx.x;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < Nx*Ny*Nzpit ) {
Ex[idx] = 0;
Ey[idx] = 0;
Ez[idx] = 0;
Hx[idx] = 0;
Hy[idx] = 0;
Hz[idx] = 0;
}
}
__global__ void updateE(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz,
float *CEx, float *CEy, float *CEz) {
int tk, idx;
tk = threadIdx.x;
idx = blockIdx.x*TPB + tk;
//printf("gridDim.x=%d\n",gridDim.x);
//printf("blockIdx.x=%d, blockDim.x=%d, threadIdx.x=%d\n", blockIdx.x, blockDim.x, threadIdx.x);
if ( idx < Nx*Ny*Nzpit ) {
int i,j,k;
int Nyz = Ny*Nzpit;
i = idx/Nyz;
j = ( idx - i*Nyz )/Nzpit;
k = idx - i*Nyz - j*Nzpit;
//printf("[%d](%d,%d,%d)\n",idx,i,j,k);
__shared__ float hx[TPB+1], hy[TPB+1], hz[TPB];
hx[tk] = Hx[idx];
hy[tk] = Hy[idx];
hz[tk] = Hz[idx];
if ( tk==TPB-1 && k<Nz-1 ) {
hx[tk+1] = Hx[idx+1];
hy[tk+1] = Hy[idx+1];
}
__syncthreads();
if ( k < Nz ) {
if ( j<Ny-1 && k<Nz-1 ) Ex[idx] += CEx[idx]*( Hz[idx+Nzpit] - hz[tk] - hy[tk+1] + hy[tk] );
if ( i<Nx-1 && k<Nz-1 ) Ey[idx] += CEy[idx]*( hx[tk+1] - hx[tk] - Hz[idx+Nyz] + hz[tk] );
if ( i<Nx-1 && j<Ny-1 ) Ez[idx] += CEz[idx]*( Hy[idx+Nyz] - hy[tk] - Hx[idx+Nzpit] + hx[tk] );
}
}
}
__global__ void updateSrc(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, int tstep) {
int idx, ijk;
idx = blockIdx.x*blockDim.x + threadIdx.x;
ijk = idx*(Ny)*(Nzpit) + (Ny/2)*(Nzpit) + (Nz/2);
//printf("idx=%d, ijk=%d\n", idx, ijk);
//Ex[ijk] += __sinf(0.1*tstep);
if ( idx < Nx ) {
Ex[ijk] += sin(0.1*tstep);
}
}
__global__ void updateH(int Nx, int Ny, int Nz, int Nzpit,
float *Ex, float *Ey, float *Ez,
float *Hx, float *Hy, float *Hz) {
int tk, idx;
tk = threadIdx.x;
idx = blockIdx.x*TPB + tk;
if ( idx < Nx*Ny*Nzpit ) {
int i,j,k;
int Nyz = Ny*Nzpit;
i = idx/Nyz;
j = ( idx - i*Nyz )/Nzpit;
k = idx - i*Nyz - j*Nzpit;
__shared__ float ex[TPB+1], ey[TPB+1], ez[TPB];
ex[tk+1] = Ex[idx];
ey[tk+1] = Ey[idx];
ez[tk] = Ez[idx];
if ( tk==0 && k>0 ) {
ex[0] = Ex[idx-1];
ey[0] = Ey[idx-1];
}
__syncthreads();
if ( k < Nz ) {
if ( j>0 && k>0 ) Hx[idx] -= 0.5*( ez[tk] - Ez[idx-Nzpit] - ey[tk+1] + ey[tk] );
if ( i>0 && k>0 ) Hy[idx] -= 0.5*( ex[tk+1] - ex[tk] - ez[tk] + Ez[idx-Nyz] );
if ( i>0 && j>0 ) Hz[idx] -= 0.5*( ey[tk+1] - Ey[idx-Nyz] - ex[tk+1] + Ex[idx-Nzpit] );
}
}
}
int main() {
int tstep;
char time_str[32];
time_t t0;
// Set the parameters
int Nx, Ny, Nz, TMAX;
Nx = 100;
Ny = 200; //16;
Nz = 500; //20;
TMAX = 1000;
// Allocate host memory
//float ***Ex;
float ***CEx, ***CEy, ***CEz;
//Ex = makeArray(Nx, Ny, Nz);
CEx = makeArray(Nx, Ny, Nz);
CEy = makeArray(Nx, Ny, Nz);
CEz = makeArray(Nx, Ny, Nz);
// Geometry
set_geometry(Nx, Ny, Nz, CEx, CEy, CEz);
// Allocate device memory
float *devEx, *devEy, *devEz;
float *devHx, *devHy, *devHz;
float *devCEx, *devCEy, *devCEz;
int z_size = Nz*sizeof(float);
size_t pitch;
cudaMallocPitch ( (void**) &devEx, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devEy, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devEz, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devCEx, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devCEy, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devCEz, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devHx, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devHy, &pitch, z_size, Nx*Ny );
cudaMallocPitch ( (void**) &devHz, &pitch, z_size, Nx*Ny );
// Copy arrays from host to device
cudaMemcpy2D ( devCEx, pitch, CEx[0][0], z_size, z_size, Nx*Ny, cudaMemcpyHostToDevice );
cudaMemcpy2D ( devCEy, pitch, CEy[0][0], z_size, z_size, Nx*Ny, cudaMemcpyHostToDevice );
cudaMemcpy2D ( devCEz, pitch, CEz[0][0], z_size, z_size, Nx*Ny, cudaMemcpyHostToDevice );
int Nz_pitch = pitch/4;
printf("pitch= %u, Nz_pitch= %d\n", pitch, Nz_pitch);
// Number of thread blocks in the grid
int N = Nx*Ny*Nz_pitch;
int BPG = N%TPB == 0 ? N/TPB : N/TPB + 1;
printf("TPB=%d, BPG=%d\n", TPB, BPG);
dim3 gridDim(BPG);
// Number of threads per block
dim3 blockDim(TPB);
//int BPGsrc = Nx%TPB == 0 ? Nx/TPB : Nx/TPB + 1;
int BPGsrc = 1;
dim3 gridDimsrc(BPGsrc);
dim3 blockDimsrc(Nx);
// Initialize the device arrays
initArrays <<<gridDim,blockDim>>> ( Nx, Ny, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz );
// Main time loop
t0 = time(0);
//for ( tstep=1; tstep<=TMAX; tstep++) {
for ( tstep=1; tstep<=10; tstep++) {
// Update on the GPU
updateE <<<gridDim,blockDim>>> ( Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz, devCEx, devCEy, devCEz );
//updateSrc <<<gridDimsrc,blockDimsrc>>> ( Nx, Ny, Nz, Nz_pitch, devEx, tstep );
//updateH <<<gridDim,blockDim>>> ( Nx, Ny, Nz, Nz_pitch, devEx, devEy, devEz, devHx, devHy, devHz );
/*
//if ( tstep/10*10 == tstep ) {
// Copy arrays from device to host
cudaMemcpy2D( Ex[0][0], z_size, devEx, pitch, z_size, Nx*Ny, cudaMemcpyDeviceToHost );
//print_array(Nx, Ny, Nz, Ex);
dumpToH5(Nx, Ny, Nz, Nx/2, 0, 0, Nx/2, Ny-1, Nz-1, Ex, "gpu_png/Ex-%05d.h5", tstep);
exec("h5topng -ZM0.1 -x0 -S4 -c /usr/share/h5utils/colormaps/dkbluered gpu_png/Ex-%05d.h5", tstep);
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
//}
*/
}
updateTimer(t0, tstep, time_str);
printf("tstep=%d\t%s\n", tstep, time_str);
}
|
CalPathInfoGain.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/***
Simple implementation of information gains for specified topological branch
***/
extern "C" {
#include "tensor_field_nav_core/CalPathInfoGain.h"
}
__global__ void calPathInfoGain_device(const float *pointCollection,const float *pathPoints,int *pathInfoGain_tmp,int *pathInfoGain, int pointCollectionSize,int pathPointSize){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx< pathPointSize){
int curPointInfoGain=0;
for(int i=0; i<pointCollectionSize;i++){
float deta_x=pathPoints[2*idx]-pointCollection[3*i];
float deta_y=pathPoints[2*idx+1]-pointCollection[3*i+1];
float deta_z=0.3-pointCollection[3*i+2];
float dist_quad=(deta_x*deta_x+deta_y*deta_y+deta_z*deta_z);
if(dist_quad<1)
curPointInfoGain++;
}
pathInfoGain_tmp[idx]=curPointInfoGain;
}
__syncthreads();
if(idx<3){
pathInfoGain[idx]=pathInfoGain_tmp[idx];
}else if(idx>pathPointSize-3 && idx<pathPointSize){
pathInfoGain[idx]=pathInfoGain_tmp[idx];
}else if(idx>=3 &&idx<=pathPointSize-3){
int sum_infoGain=0;
for(int i=idx-2;i<idx+3;i++){
sum_infoGain=sum_infoGain+pathInfoGain_tmp[idx];
}
pathInfoGain[idx]=int(sum_infoGain/5);
}
}
//extern "C"
void calPathInfoGain(const float *pointCollection, const float *pathPoints, int *pathInfoGain,int pointCollectionSize, int pathPointsSize){
float *pointCollection_dev,*pathPoints_dev;
int *pathInfoGain_dev,*pathInfoGain_tmp_dev;
hipError_t cudaStatus= hipMalloc((void**)&pointCollection_dev, sizeof(float)*pointCollectionSize*3);
// hipMalloc((void**)&pointCollection_dev, sizeof(float)*pointCollectionSize*3);
if(cudaStatus !=hipSuccess){
fprintf(stderr, "memory malloc to pointCollection failed ");
return;
}
cudaStatus=hipMalloc((void**)&pathPoints_dev,sizeof(float)*pathPointsSize*2);
if(cudaStatus !=hipSuccess){
fprintf(stderr, "memory malloc to pathPoints failed");
return;
}
cudaStatus=hipMalloc((void**)&pathInfoGain_dev,sizeof(int)*pathPointsSize);
if(cudaStatus !=hipSuccess){
fprintf(stderr, "memory malloc to pathInfoGain_dev failed");
return;
}
cudaStatus=hipMalloc((void**)&pathInfoGain_tmp_dev,sizeof(int)*pathPointsSize);
if(cudaStatus !=hipSuccess){
fprintf(stderr, "memory malloc to pathInfoGain_dev failed");
return;
}
cudaStatus=hipMemcpy(pointCollection_dev,pointCollection,sizeof(float)*pointCollectionSize*3,hipMemcpyHostToDevice);
if(cudaStatus !=hipSuccess){
fprintf(stderr, "pointCollection memory host to device failed");
return;
}
cudaStatus=hipMemcpy(pathPoints_dev,pathPoints, sizeof(float)*pathPointsSize*2,hipMemcpyHostToDevice);
if(cudaStatus !=hipSuccess){
fprintf(stderr, "pathPoints memory host to device failed");
return;
}
hipLaunchKernelGGL(( calPathInfoGain_device), dim3((pathPointsSize+63)/64),dim3(64), 0, 0, pointCollection_dev,pathPoints_dev,pathInfoGain_tmp_dev,pathInfoGain_dev,pointCollectionSize,pathPointsSize);
cudaStatus=hipMemcpy(pathInfoGain,pathInfoGain_dev, sizeof(int)*pathPointsSize,hipMemcpyDeviceToHost);
if(cudaStatus !=hipSuccess){
fprintf(stderr, "pathPoints memory device to host failed");
return;
}
hipFree(pointCollection_dev);
hipFree(pathPoints_dev);
hipFree(pathInfoGain_dev);
hipFree(pathInfoGain_tmp_dev);
}
|
CalPathInfoGain.cu
|
/***
Simple implementation of information gains for specified topological branch
***/
extern "C" {
#include "tensor_field_nav_core/CalPathInfoGain.h"
}
__global__ void calPathInfoGain_device(const float *pointCollection,const float *pathPoints,int *pathInfoGain_tmp,int *pathInfoGain, int pointCollectionSize,int pathPointSize){
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx< pathPointSize){
int curPointInfoGain=0;
for(int i=0; i<pointCollectionSize;i++){
float deta_x=pathPoints[2*idx]-pointCollection[3*i];
float deta_y=pathPoints[2*idx+1]-pointCollection[3*i+1];
float deta_z=0.3-pointCollection[3*i+2];
float dist_quad=(deta_x*deta_x+deta_y*deta_y+deta_z*deta_z);
if(dist_quad<1)
curPointInfoGain++;
}
pathInfoGain_tmp[idx]=curPointInfoGain;
}
__syncthreads();
if(idx<3){
pathInfoGain[idx]=pathInfoGain_tmp[idx];
}else if(idx>pathPointSize-3 && idx<pathPointSize){
pathInfoGain[idx]=pathInfoGain_tmp[idx];
}else if(idx>=3 &&idx<=pathPointSize-3){
int sum_infoGain=0;
for(int i=idx-2;i<idx+3;i++){
sum_infoGain=sum_infoGain+pathInfoGain_tmp[idx];
}
pathInfoGain[idx]=int(sum_infoGain/5);
}
}
//extern "C"
void calPathInfoGain(const float *pointCollection, const float *pathPoints, int *pathInfoGain,int pointCollectionSize, int pathPointsSize){
float *pointCollection_dev,*pathPoints_dev;
int *pathInfoGain_dev,*pathInfoGain_tmp_dev;
cudaError_t cudaStatus= cudaMalloc((void**)&pointCollection_dev, sizeof(float)*pointCollectionSize*3);
// cudaMalloc((void**)&pointCollection_dev, sizeof(float)*pointCollectionSize*3);
if(cudaStatus !=cudaSuccess){
fprintf(stderr, "memory malloc to pointCollection failed ");
return;
}
cudaStatus=cudaMalloc((void**)&pathPoints_dev,sizeof(float)*pathPointsSize*2);
if(cudaStatus !=cudaSuccess){
fprintf(stderr, "memory malloc to pathPoints failed");
return;
}
cudaStatus=cudaMalloc((void**)&pathInfoGain_dev,sizeof(int)*pathPointsSize);
if(cudaStatus !=cudaSuccess){
fprintf(stderr, "memory malloc to pathInfoGain_dev failed");
return;
}
cudaStatus=cudaMalloc((void**)&pathInfoGain_tmp_dev,sizeof(int)*pathPointsSize);
if(cudaStatus !=cudaSuccess){
fprintf(stderr, "memory malloc to pathInfoGain_dev failed");
return;
}
cudaStatus=cudaMemcpy(pointCollection_dev,pointCollection,sizeof(float)*pointCollectionSize*3,cudaMemcpyHostToDevice);
if(cudaStatus !=cudaSuccess){
fprintf(stderr, "pointCollection memory host to device failed");
return;
}
cudaStatus=cudaMemcpy(pathPoints_dev,pathPoints, sizeof(float)*pathPointsSize*2,cudaMemcpyHostToDevice);
if(cudaStatus !=cudaSuccess){
fprintf(stderr, "pathPoints memory host to device failed");
return;
}
calPathInfoGain_device<<<(pathPointsSize+63)/64,64>>>(pointCollection_dev,pathPoints_dev,pathInfoGain_tmp_dev,pathInfoGain_dev,pointCollectionSize,pathPointsSize);
cudaStatus=cudaMemcpy(pathInfoGain,pathInfoGain_dev, sizeof(int)*pathPointsSize,cudaMemcpyDeviceToHost);
if(cudaStatus !=cudaSuccess){
fprintf(stderr, "pathPoints memory device to host failed");
return;
}
cudaFree(pointCollection_dev);
cudaFree(pathPoints_dev);
cudaFree(pathInfoGain_dev);
cudaFree(pathInfoGain_tmp_dev);
}
|
6e77ab1f8888ab92fc19972c46ef5c50a4d4a562.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "cunn_CriterionFilter_updateOutput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *target = NULL;
hipMalloc(&target, XSIZE*YSIZE);
float *ignored_label = NULL;
hipMalloc(&ignored_label, XSIZE*YSIZE);
int bound = 1;
int batch_size = XSIZE*YSIZE;
int map_nelem = 1;
int blocks_per_sample = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
cunn_CriterionFilter_updateOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, target,ignored_label,bound,batch_size,map_nelem,blocks_per_sample);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
cunn_CriterionFilter_updateOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, target,ignored_label,bound,batch_size,map_nelem,blocks_per_sample);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
cunn_CriterionFilter_updateOutput_kernel), dim3(gridBlock),dim3(threadBlock), 0, 0, target,ignored_label,bound,batch_size,map_nelem,blocks_per_sample);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
6e77ab1f8888ab92fc19972c46ef5c50a4d4a562.cu
|
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "cunn_CriterionFilter_updateOutput_kernel.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
float *target = NULL;
cudaMalloc(&target, XSIZE*YSIZE);
float *ignored_label = NULL;
cudaMalloc(&ignored_label, XSIZE*YSIZE);
int bound = 1;
int batch_size = XSIZE*YSIZE;
int map_nelem = 1;
int blocks_per_sample = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
cunn_CriterionFilter_updateOutput_kernel<<<gridBlock,threadBlock>>>(target,ignored_label,bound,batch_size,map_nelem,blocks_per_sample);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
cunn_CriterionFilter_updateOutput_kernel<<<gridBlock,threadBlock>>>(target,ignored_label,bound,batch_size,map_nelem,blocks_per_sample);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
cunn_CriterionFilter_updateOutput_kernel<<<gridBlock,threadBlock>>>(target,ignored_label,bound,batch_size,map_nelem,blocks_per_sample);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}}
|
2f8c99a8ddd714a8c95015349acb2a4b589375f1.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/char_types/char_types.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
#include <thrust/logical.h>
//
namespace cudf {
namespace strings {
namespace detail {
//
std::unique_ptr<column> all_characters_of_type(
strings_column_view const& strings,
string_character_types types,
string_character_types verify_types,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto strings_count = strings.size();
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create output column
auto results = make_numeric_column(data_type{BOOL8},
strings_count,
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
auto d_results = results_view.data<bool>();
// get the static character types table
auto d_flags = detail::get_character_flags_table();
// set the output values by checking the character types for each string
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
[d_column, d_flags, types, verify_types, d_results] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
auto d_str = d_column.element<string_view>(idx);
bool check = !d_str.empty(); // require at least one character
size_type check_count = 0;
for (auto itr = d_str.begin(); check && (itr != d_str.end()); ++itr) {
auto code_point = detail::utf8_to_codepoint(*itr);
// lookup flags in table by code-point
auto flag = code_point <= 0x00FFFF ? d_flags[code_point] : 0;
if ((verify_types & flag) || // should flag be verified
(flag == 0 && verify_types == ALL_TYPES)) // special edge case
{
check = (types & flag) > 0;
++check_count;
}
}
return check && (check_count > 0);
});
//
results->set_null_count(strings.null_count());
return results;
}
std::unique_ptr<column> is_integer(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create output column
auto results = make_numeric_column(data_type{BOOL8},
strings.size(),
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto d_results = results->mutable_view().data<bool>();
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings.size()),
d_results,
[d_column] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
return string::is_integer(d_column.element<string_view>(idx));
});
results->set_null_count(strings.null_count());
return results;
}
bool all_integer(strings_column_view const& strings, hipStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), [d_column] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
return string::is_integer(d_column.element<string_view>(idx));
});
return thrust::all_of(rmm::exec_policy(stream)->on(stream),
transformer_itr,
transformer_itr + strings.size(),
thrust::identity<bool>());
}
std::unique_ptr<column> is_float(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
hipStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create output column
auto results = make_numeric_column(data_type{BOOL8},
strings.size(),
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto d_results = results->mutable_view().data<bool>();
// check strings for valid float chars
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings.size()),
d_results,
[d_column] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
return string::is_float(d_column.element<string_view>(idx));
});
results->set_null_count(strings.null_count());
return results;
}
bool all_float(strings_column_view const& strings, hipStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), [d_column] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
return string::is_float(d_column.element<string_view>(idx));
});
return thrust::all_of(rmm::exec_policy(stream)->on(stream),
transformer_itr,
transformer_itr + strings.size(),
thrust::identity<bool>());
}
} // namespace detail
// external API
std::unique_ptr<column> all_characters_of_type(strings_column_view const& strings,
string_character_types types,
string_character_types verify_types,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::all_characters_of_type(strings, types, verify_types, mr);
}
std::unique_ptr<column> is_integer(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_integer(strings, mr);
}
std::unique_ptr<column> is_float(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_float(strings, mr);
}
bool all_integer(strings_column_view const& strings)
{
CUDF_FUNC_RANGE();
return detail::all_integer(strings);
}
bool all_float(strings_column_view const& strings)
{
CUDF_FUNC_RANGE();
return detail::all_float(strings);
}
} // namespace strings
} // namespace cudf
|
2f8c99a8ddd714a8c95015349acb2a4b589375f1.cu
|
/*
* Copyright (c) 2019-2020, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cudf/column/column.hpp>
#include <cudf/column/column_device_view.cuh>
#include <cudf/column/column_factories.hpp>
#include <cudf/detail/nvtx/ranges.hpp>
#include <cudf/strings/char_types/char_types.hpp>
#include <cudf/strings/detail/utilities.hpp>
#include <cudf/strings/string.cuh>
#include <cudf/strings/string_view.cuh>
#include <cudf/strings/strings_column_view.hpp>
#include <strings/utilities.cuh>
#include <strings/utilities.hpp>
#include <thrust/logical.h>
//
namespace cudf {
namespace strings {
namespace detail {
//
std::unique_ptr<column> all_characters_of_type(
strings_column_view const& strings,
string_character_types types,
string_character_types verify_types,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto strings_count = strings.size();
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create output column
auto results = make_numeric_column(data_type{BOOL8},
strings_count,
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto results_view = results->mutable_view();
auto d_results = results_view.data<bool>();
// get the static character types table
auto d_flags = detail::get_character_flags_table();
// set the output values by checking the character types for each string
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings_count),
d_results,
[d_column, d_flags, types, verify_types, d_results] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
auto d_str = d_column.element<string_view>(idx);
bool check = !d_str.empty(); // require at least one character
size_type check_count = 0;
for (auto itr = d_str.begin(); check && (itr != d_str.end()); ++itr) {
auto code_point = detail::utf8_to_codepoint(*itr);
// lookup flags in table by code-point
auto flag = code_point <= 0x00FFFF ? d_flags[code_point] : 0;
if ((verify_types & flag) || // should flag be verified
(flag == 0 && verify_types == ALL_TYPES)) // special edge case
{
check = (types & flag) > 0;
++check_count;
}
}
return check && (check_count > 0);
});
//
results->set_null_count(strings.null_count());
return results;
}
std::unique_ptr<column> is_integer(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create output column
auto results = make_numeric_column(data_type{BOOL8},
strings.size(),
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto d_results = results->mutable_view().data<bool>();
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings.size()),
d_results,
[d_column] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
return string::is_integer(d_column.element<string_view>(idx));
});
results->set_null_count(strings.null_count());
return results;
}
bool all_integer(strings_column_view const& strings, cudaStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), [d_column] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
return string::is_integer(d_column.element<string_view>(idx));
});
return thrust::all_of(rmm::exec_policy(stream)->on(stream),
transformer_itr,
transformer_itr + strings.size(),
thrust::identity<bool>());
}
std::unique_ptr<column> is_float(
strings_column_view const& strings,
rmm::mr::device_memory_resource* mr = rmm::mr::get_default_resource(),
cudaStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
// create output column
auto results = make_numeric_column(data_type{BOOL8},
strings.size(),
copy_bitmask(strings.parent(), stream, mr),
strings.null_count(),
stream,
mr);
auto d_results = results->mutable_view().data<bool>();
// check strings for valid float chars
thrust::transform(rmm::exec_policy(stream)->on(stream),
thrust::make_counting_iterator<size_type>(0),
thrust::make_counting_iterator<size_type>(strings.size()),
d_results,
[d_column] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
return string::is_float(d_column.element<string_view>(idx));
});
results->set_null_count(strings.null_count());
return results;
}
bool all_float(strings_column_view const& strings, cudaStream_t stream = 0)
{
auto strings_column = column_device_view::create(strings.parent(), stream);
auto d_column = *strings_column;
auto transformer_itr = thrust::make_transform_iterator(
thrust::make_counting_iterator<size_type>(0), [d_column] __device__(size_type idx) {
if (d_column.is_null(idx)) return false;
return string::is_float(d_column.element<string_view>(idx));
});
return thrust::all_of(rmm::exec_policy(stream)->on(stream),
transformer_itr,
transformer_itr + strings.size(),
thrust::identity<bool>());
}
} // namespace detail
// external API
std::unique_ptr<column> all_characters_of_type(strings_column_view const& strings,
string_character_types types,
string_character_types verify_types,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::all_characters_of_type(strings, types, verify_types, mr);
}
std::unique_ptr<column> is_integer(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_integer(strings, mr);
}
std::unique_ptr<column> is_float(strings_column_view const& strings,
rmm::mr::device_memory_resource* mr)
{
CUDF_FUNC_RANGE();
return detail::is_float(strings, mr);
}
bool all_integer(strings_column_view const& strings)
{
CUDF_FUNC_RANGE();
return detail::all_integer(strings);
}
bool all_float(strings_column_view const& strings)
{
CUDF_FUNC_RANGE();
return detail::all_float(strings);
}
} // namespace strings
} // namespace cudf
|
a0528ccb3505381f560c1ede43311237141697af.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "cuda_link.h"
#include "quadrature.h"
#include "mesh.h"
#include "xsection.h"
#include "sourceparams.h"
#include "solverparams.h"
#include "outwriter.h"
//#include <string>
#include <stdio.h>
#include <hip/hip_runtime_api.h>
void reportGpuData()
{
std::cout << "Reporting GPU resources" << std::endl;
// Check the number of GPU resources
int nDevices;
hipGetDeviceCount(&nDevices);
std::cout << "Found " << nDevices << " CUDA devices" << std::endl;
for(unsigned int i = 0; i < nDevices; i++)
{
// Find a gpu
hipDeviceProp_t props;
checkCudaErrors(hipGetDeviceProperties(&props, i));
std::cout << "Device " << i << ": " << props.name << " with compute "
<< props.major << "." << props.minor << " capability" << std::endl;
std::cout << "Max threads per block: " << props.maxThreadsPerBlock << std::endl;
std::cout << "Max grid size: " << props.maxGridSize[0] << " x " << props.maxGridSize[1] << " x " << props.maxGridSize[2] << std::endl;
std::cout << "Memory Clock Rate (KHz): " << props.memoryClockRate << std::endl;
std::cout << "Memory Bus Width (bits): " << props.memoryBusWidth << std::endl;
std::cout << "Peak Memory Bandwidth (GB/s): " << (2.0*props.memoryClockRate*(props.memoryBusWidth/8)/1.0e6) << std::endl;
int cores = 0;
int mp = props.multiProcessorCount;
switch (props.major){
case 2: // Fermi
if (props.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (props.minor == 1) cores = mp * 128;
else if (props.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
std::cout << "SMs: " << mp << std::endl;
std::cout << "CUDA Cores: " << cores << '\n' << std::endl;
}
}
int *alloc_gpuInt(const int gpuId, const int elements, const int *data)
{
hipError_t cudaerr;
if((cudaerr = hipSetDevice(gpuId)) != hipSuccess)
std::cout << "alloc_gpuInt failed to set the device with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
int *gpu_data;
if((cudaerr = hipMalloc(&gpu_data, elements*sizeof(int))) != hipSuccess)
std::cout << "alloc_gpuInt threw an error while allocating CUDA memory with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
if(data != NULL)
{
if((cudaerr = hipMemcpyAsync(gpu_data, data, elements*sizeof(int), hipMemcpyHostToDevice)) != hipSuccess)
std::cout << "alloc_gpuInt failed while copying data with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
}
return gpu_data;
}
/*
RAY_T *alloc_gpuFloat(const int gpuId, const int elements, const RAY_T *cpuData)
{
hipError_t cudaerr;
if((cudaerr = hipSetDevice(gpuId)) != hipSuccess)
std::cout << "alloc_gpuFloat failed to set the device with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
RAY_T *gpuData;
if((cudaerr = hipMalloc(&gpuData, elements*sizeof(RAY_T))) != hipSuccess)
std::cout << "alloc_gpuFloat threw an error while allocating CUDA memory with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
if(cpuData != NULL)
{
if((cudaerr = hipMemcpyAsync(gpuData, cpuData, elements*sizeof(RAY_T), hipMemcpyHostToDevice)) != hipSuccess)
std::cout << "alloc_gpuFloat failed while copying data with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
}
return gpuData;
}
*/
/*
void release_gpu(int gpuId, float *gpu_data)
{
hipError_t cudaerr;
if((cudaerr = hipSetDevice(gpuId)) != hipSuccess)
std::cout << "release_gpu (float) failed to set the device with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
if((cudaerr = hipFree(gpu_data)) != hipSuccess)
std::cout << "relase_gpu (float) threw an error while deallocating CUDA memory with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
}
void release_gpu(int gpuId, int *gpu_data)
{
hipError_t cudaerr;
if((cudaerr = hipSetDevice(gpuId)) != hipSuccess)
std::cout << "release_gpu (int) failed to set the device with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
if((cudaerr = hipFree(gpu_data)) != hipSuccess)
std::cout << "relase_gpu (int) threw an error while deallocating int CUDA memory with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
}
*/
/*
void updateCpuData(int gpuId, float *cpuData, float *gpuData, size_t elements, int cpuOffset)
{
hipError_t cudaerr;
if((cudaerr = hipSetDevice(gpuId)) != hipSuccess)
std::cout << "updateCpuData (float) failed to set the device with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
if((cudaerr = hipMemcpyAsync(cpuData+cpuOffset, gpuData, elements*sizeof(float), hipMemcpyDeviceToHost)) != hipSuccess)
std::cout << "updateCpuData (float) MemcpyAsync failed with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
}
void updateCpuData(int gpuId, int *cpuData, int *gpuData, size_t elements, int cpuOffset)
{
hipError_t cudaerr;
if((cudaerr = hipSetDevice(gpuId)) != hipSuccess)
std::cout << "updateCpuData (int) failed to set the device with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
if((cudaerr = hipMemcpyAsync(cpuData+cpuOffset, gpuData, elements*sizeof(int), hipMemcpyDeviceToHost)) != hipSuccess)
std::cout << "updateCpuData (int) MemcpyAsync failed with error code: " << cudaerr << ": " << hipGetErrorString(cudaerr) << std::endl;
}
*/
int launch_isoRayKernel(const Quadrature *quad, const Mesh *mesh, const XSection *xs, const SolverParams *solPar, const SourceParams *srcPar, std::vector<RAY_T> *uflux)
{
reportGpuData();
if(uflux == NULL)
{
std::cout << "STOP!" << std::endl;
return -1;
}
int gpuId = 0;
// Allocate memory space for the solution vector
//std::cout << "Allocating uflux" << std::endl;
RAY_T *gpuUflux = alloc_gpuFloat<RAY_T>(gpuId, mesh->voxelCount() * xs->groupCount(), NULL);
// Copy the xyzNode values
float *gpuXNodes = alloc_gpuFloat<float>(gpuId, mesh->xNodes.size(), &mesh->xNodes[0]);
float *gpuYNodes = alloc_gpuFloat<float>(gpuId, mesh->xNodes.size(), &mesh->yNodes[0]);
float *gpuZNodes = alloc_gpuFloat<float>(gpuId, mesh->xNodes.size(), &mesh->zNodes[0]);
// Copy the dxyz values
float *gpuDx = alloc_gpuFloat<float>(gpuId, mesh->dx.size(), &mesh->dx[0]);
float *gpuDy = alloc_gpuFloat<float>(gpuId, mesh->dy.size(), &mesh->dy[0]);
float *gpuDz = alloc_gpuFloat<float>(gpuId, mesh->dz.size(), &mesh->dz[0]);
// Copy the zone id number
int *gpuZoneId = alloc_gpuInt(gpuId, mesh->zoneId.size(), &mesh->zoneId[0]);
// Copy the atom density
float *gpuAtomDensity = alloc_gpuFloat<float>(gpuId, mesh->atomDensity.size(), &mesh->atomDensity[0]);
// Copy the xs data
float *gpuTot1d = alloc_gpuFloat<float>(gpuId, xs->m_tot1d.size(), &xs->m_tot1d[0]);
// Copy the source strength
//std::cout << "Allocating source strength" << std::endl;
float *gpuSrcStrength = alloc_gpuFloat<float>(gpuId, srcPar->spectraIntensity.size(), &srcPar->spectraIntensity[0]);
//int ixSrc, iySrc, izSrc;
unsigned int ixSrc = 0;
unsigned int iySrc = 0;
unsigned int izSrc = 0;
while(mesh->xNodes[ixSrc+1] < srcPar->sourceX)
ixSrc++;
while(mesh->yNodes[iySrc+1] < srcPar->sourceY)
iySrc++;
while(mesh->zNodes[izSrc+1] < srcPar->sourceZ)
izSrc++;
dim3 dimGrid(mesh->xElemCt, mesh->yElemCt);
dim3 dimBlock(mesh->zElemCt);
//std::cout << "Grid: " << dimGrid.x << "x" << dimGrid.y << ", Block: " << dimBlock.x << "x" << dimBlock.y << std::endl;
hipLaunchKernelGGL(( isoRayKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
gpuUflux,
gpuXNodes, gpuYNodes, gpuZNodes,
gpuDx, gpuDy, gpuDz,
gpuZoneId,
gpuAtomDensity,
gpuTot1d,
gpuSrcStrength,
xs->groupCount(),
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt,
srcPar->sourceX, srcPar->sourceY, srcPar->sourceZ,
ixSrc, iySrc, izSrc);
size_t elements = mesh->voxelCount() * xs->groupCount();
//uflux = new RAY_T[elements];
uflux->resize(elements);
//hipDeviceSynchronize();
updateCpuDataBlocking(gpuId, &(*uflux)[0], gpuUflux, elements);
//OutWriter::writeArray("uflux.dat", *uflux);
release_gpu(gpuId, gpuUflux);
release_gpu(gpuId, gpuXNodes);
release_gpu(gpuId, gpuYNodes);
release_gpu(gpuId, gpuZNodes);
release_gpu(gpuId, gpuDx);
release_gpu(gpuId, gpuDy);
release_gpu(gpuId, gpuDz);
release_gpu(gpuId, gpuZoneId);
release_gpu(gpuId, gpuAtomDensity);
release_gpu(gpuId, gpuTot1d);
release_gpu(gpuId, gpuSrcStrength);
std::cout << "Most recent CUDA Error: " << hipGetErrorString(hipGetLastError()) << std::endl;
//if(hipFree(gpu_data) != hipSuccess)
// std::cout << "alloc_gpuInt failed while copying data" << std::endl;
return EXIT_SUCCESS;
}
int launch_isoSolKernel(const Quadrature *quad, const Mesh *mesh, const XSection *xs, const SolverParams *solPar, const SourceParams *srcPar, const std::vector<RAY_T> *cpuUFlux, std::vector<SOL_T> *cpuCFlux)
{
//std::cout << "Launching solver kernel" << std::endl;
if(cpuUFlux == NULL)
{
std::cout << "STOP!" << std::endl;
return -1;
}
if(cpuCFlux == NULL)
{
std::cout << "STOP!" << std::endl;
return -2;
}
int gpuId = 0;
std::vector<SOL_T> errMaxList;
std::vector<std::vector<SOL_T> > errList;
std::vector<std::vector<SOL_T> > errIntList;
std::vector<int> converganceIters;
std::vector<SOL_T> converganceTracker;
errMaxList.resize(xs->groupCount());
errList.resize(xs->groupCount());
errIntList.resize(xs->groupCount());
converganceIters.resize(xs->groupCount());
converganceTracker.resize(xs->groupCount());
std::clock_t startTime = std::clock();
const int maxIterations = 25;
const SOL_T epsilon = static_cast<SOL_T>(0.001);
cpuCFlux->resize(xs->groupCount() * mesh->voxelCount());
std::vector<SOL_T> cpuCFluxTmp(mesh->voxelCount(), static_cast<SOL_T>(0.0));
//std::vector<SOL_T> errMaxList;
//errMaxList.resize(xs->groupCount());
if(cpuUFlux == NULL && srcPar == NULL)
{
std::cout << "uFlux and srcPar cannot both be NULL" << std::endl;
return 55;
}
// Computed the highest energy group actually used
bool noDownscatterYet = true;
unsigned int highestEnergy = 0;
//std::cout << "About to do high check" << std::endl;
while(noDownscatterYet)
{
SOL_T dmax = 0.0;
unsigned int vc = mesh->voxelCount();
for(unsigned int ir = 0; ir < vc; ir++)
{
dmax = (dmax > (*cpuUFlux)[highestEnergy*vc + ir]) ? dmax : (*cpuUFlux)[highestEnergy*vc + ir];
}
if(dmax <= 0.0)
{
std::cout << "No external source or downscatter, skipping energy group " << highestEnergy << std::endl;
highestEnergy++;
}
else
{
noDownscatterYet = false;
}
if(highestEnergy >= xs->groupCount())
{
std::cout << "Zero flux everywhere from the raytracer" << std::endl;
return 57;
}
}
// Allocate GPU resources for the external source computation
RAY_T *gpuUFlux = alloc_gpuFloat<RAY_T>(gpuId, cpuUFlux->size(), &(*cpuUFlux)[0]);
SOL_T *gpuColFlux = alloc_gpuFloat<SOL_T>(gpuId, cpuCFlux->size(), NULL);
float *gpuVol = alloc_gpuFloat<float>(gpuId, mesh->vol.size(), &mesh->vol[0]);
float *gpuAtomDensity = alloc_gpuFloat<float>(gpuId, mesh->atomDensity.size(), &mesh->atomDensity[0]);
int *gpuZoneId = alloc_gpuInt(gpuId, mesh->zoneId.size(), &mesh->zoneId[0]);
float *gpuScatXs2d = alloc_gpuFloat<float>(gpuId, xs->m_scat2d.size(), &xs->m_scat2d[0]);
// Allocate additional GPU resources for the solver
SOL_T *gpuTempFlux = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
SOL_T *gpu1stSource = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
SOL_T *gpuTotalSource = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
//unsigned int anglesPerOctant = quad->angleCount()/8;
//SOL_T *gpuOutboundFluxX = alloc_gpuFloat<SOL_T>(gpuId, mesh->yElemCt * mesh->zElemCt * anglesPerOctant, NULL);
//SOL_T *gpuOutboundFluxY = alloc_gpuFloat<SOL_T>(gpuId, mesh->yElemCt * mesh->zElemCt * anglesPerOctant, NULL);
//SOL_T *gpuOutboundFluxZ = alloc_gpuFloat<SOL_T>(gpuId, mesh->yElemCt * mesh->zElemCt * anglesPerOctant, NULL);
SOL_T *gpuOutboundFluxX = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
SOL_T *gpuOutboundFluxY = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
SOL_T *gpuOutboundFluxZ = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
float *gpuAxy = alloc_gpuFloat<float>(gpuId, mesh->Axy.size(), &mesh->Axy[0]);
float *gpuAxz = alloc_gpuFloat<float>(gpuId, mesh->Axz.size(), &mesh->Axz[0]);
float *gpuAyz = alloc_gpuFloat<float>(gpuId, mesh->Ayz.size(), &mesh->Ayz[0]);
float *gpuMu = alloc_gpuFloat<float>(gpuId, quad->mu.size(), &quad->mu[0]);
float *gpuEta = alloc_gpuFloat<float>(gpuId, quad->eta.size(), &quad->eta[0]);
float *gpuXi = alloc_gpuFloat<float>(gpuId, quad->zi.size(), &quad->zi[0]);
float *gpuWt = alloc_gpuFloat<float>(gpuId, quad->wt.size(), &quad->wt[0]);
float *gpuTotXs1d = alloc_gpuFloat<float>(gpuId, xs->m_tot1d.size(), &xs->m_tot1d[0]);
// Zero the scalar flux
int erblocks = 64;
int ergrids = cpuCFlux->size() / erblocks;
if(cpuCFlux->size() % erblocks != 0)
ergrids += 1; // Account for lengths not divisible by 64
hipLaunchKernelGGL(( zeroKernel), dim3(dim3(ergrids)), dim3(dim3(erblocks)), 0, 0, cpuCFlux->size(), gpuColFlux);
// Generate the sweep index block
int totalSubsweeps = mesh->xElemCt + mesh->yElemCt + mesh->zElemCt - 2;
std::vector<int> threadIndexToGlobalIndex(mesh->voxelCount());
std::vector<int> subSweepStartIndex(totalSubsweeps);
std::vector<int> subSweepVoxelCount(totalSubsweeps);
// Trivial edge cases that aren't computed during the loop
subSweepStartIndex[0] = 0;
threadIndexToGlobalIndex[0] = 0;
subSweepVoxelCount[totalSubsweeps-1] = 1;
for(unsigned int iSubSweep = 0; iSubSweep < totalSubsweeps; iSubSweep++)
{
int iSubSweepPrev = iSubSweep - 1;
int C = (iSubSweepPrev+1) * (iSubSweepPrev+2) / 2;
int dx = max(iSubSweepPrev+1 - (signed)mesh->xElemCt, 0);
int dy = max(iSubSweepPrev+1 - (signed)mesh->yElemCt, 0);
int dz = max(iSubSweepPrev+1 - (signed)mesh->zElemCt, 0);
int dxy = max(iSubSweepPrev+1 - (signed)mesh->xElemCt - (signed)mesh->yElemCt, 0);
int dxz = max(iSubSweepPrev+1 - (signed)mesh->xElemCt - (signed)mesh->zElemCt, 0);
int dyz = max(iSubSweepPrev+1 - (signed)mesh->yElemCt - (signed)mesh->zElemCt, 0);
int Lx = dx * (dx + 1) / 2;
int Ly = dy * (dy + 1) / 2;
int Lz = dz * (dz + 1) / 2;
int Gxy = dxy * (dxy + 1) / 2;
int Gxz = dxz * (dxz + 1) / 2;
int Gyz = dyz * (dyz + 1) / 2;
int voxPrevSubSweep = C - Lx - Ly - Lz + Gxy + Gxz + Gyz;
subSweepStartIndex[iSubSweep] = subSweepStartIndex[iSubSweepPrev] + voxPrevSubSweep;
subSweepVoxelCount[iSubSweepPrev] = voxPrevSubSweep;
int voxelsSoFar = 0;
for(int ix = 0; ix <= min(mesh->xElemCt-1, iSubSweep); ix++)
for(int iy = 0; iy <= min(mesh->yElemCt-1, iSubSweep-ix); iy++)
{
int iz = iSubSweep - ix - iy;
if(iz >= mesh->zElemCt)
continue;
int ir = ix*mesh->yElemCt*mesh->zElemCt + iy*mesh->zElemCt + iz;
threadIndexToGlobalIndex[subSweepStartIndex[iSubSweep] + voxelsSoFar] = ir;
voxelsSoFar++;
}
}
int *gpuThreadIndexToGlobalIndex = alloc_gpuInt(gpuId, threadIndexToGlobalIndex.size(), &threadIndexToGlobalIndex[0]);
dim3 dimGrid(mesh->xElemCt, mesh->yElemCt);
dim3 dimBlock(mesh->zElemCt);
for(unsigned int ie = highestEnergy; ie < xs->groupCount(); ie++) // for every energy group
{
std::cout << "ie=" << ie << std::endl;
int iterNum = 1;
SOL_T maxDiff = 1.0;
SOL_T totDiff = 1.0E10; // Should be very large
SOL_T totDiffPre = 1.0E11; // Should be larger than totDiff
// Needs to be done before the first clearSweepKernel<<<>>> call
int rblocks = 64;
int rgrids = mesh->voxelCount() / rblocks;
if(mesh->voxelCount() % rblocks != 0)
rgrids += 1; // Account for lengths not divisible by 64
hipLaunchKernelGGL(( zeroKernel), dim3(dim3(rgrids)), dim3(dim3(rblocks)), 0, 0, mesh->voxelCount(), gpuTempFlux);
//zeroKernel<<<dim3(rgrids), dim3(rblocks)>>>(mesh->voxelCount(), gpu1stSource);
hipLaunchKernelGGL(( isoSrcKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
gpuUFlux,
gpu1stSource,
gpuVol, gpuAtomDensity, gpuZoneId,
gpuScatXs2d,
mesh->voxelCount(), xs->groupCount(), solPar->pn+1, highestEnergy, ie,
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt);
//hipDeviceSynchronize();
//std::cout << "About to write the source results" << std::endl;
//std::vector<float> cpuExtSrc;
//cpuExtSrc.resize(mesh->voxelCount());
//updateCpuDataBlocking(gpuId, &cpuExtSrc[0], gpu1stSource, mesh->voxelCount());
//char ieString[256];
//sprintf(ieString, "%d", ie);
//OutWriter::writeArray(std::string("gpuExtSrc") + ieString + ".dat", cpuExtSrc);
hipLaunchKernelGGL(( downscatterKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
gpuTotalSource,
highestEnergy, ie,
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt, xs->groupCount(), solPar->pn+1,
gpuZoneId,
gpuColFlux,
gpuScatXs2d,
gpuAtomDensity, gpuVol,
gpu1stSource);
for(unsigned int i = 0; i < cpuCFluxTmp.size(); i++)
cpuCFluxTmp[i] = 0.0f;
//printf("Begin Total crit=%e\n", totDiff/totDiffPre);
while(iterNum <= maxIterations && maxDiff > epsilon && totDiff/totDiffPre < 1.0) // while not converged
{
//std::cout << "it: " << iterNum << std::endl;
hipLaunchKernelGGL(( clearSweepKernel), dim3(dimGrid), dim3(dimBlock), 0, 0,
gpuColFlux, gpuTempFlux,
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt, ie);
for(unsigned int i = 0; i < cpuCFluxTmp.size(); i++)
{
//cpuCFlux[ie*mesh->voxelCount() + i] = cpuCFluxTmp[i];
cpuCFluxTmp[i] = 0.0f;
}
for(unsigned int iang = 0; iang < quad->angleCount(); iang++) // for every angle
//for(unsigned int io = 0; io < 8; io++) // for every octant
{
// Find the correct direction to sweep
int diz = 1; // Sweep direction
if(quad->eta[iang] < 0) // Condition to sweep backward
{
diz = -1; // Sweep toward zero
}
int diy = 1;
if(quad->zi[iang] < 0)
{
diy = -1;
}
int dix = 1;
if(quad->mu[iang] < 0)
{
dix = -1;
}
//int dix = io / 4 == 0 ? 1 : -1; // + x x, positive are first
//int diy = (io/2) % 2 == 0 ? 1 : -1; // x + x, alternate every other octant
//int diz = io % 2 == 0 ? 1 : -1; // x x + alternate every octant
//std::cout << "io = " << io << ", dix = " << dix << ", diy = " << diy << ", diz = " << diz << std::endl;
for(unsigned int subSweepId = 0; subSweepId < totalSubsweeps; subSweepId++)
{
//std::cout << "Subsweep " << subSweepId << std::endl;
int raise = subSweepVoxelCount[subSweepId] % 64 == 0 ? 0 : 1;
dim3 dimGridS(subSweepVoxelCount[subSweepId] / 64 + raise);
dim3 dimBlockS(64);
hipLaunchKernelGGL(( isoSolKernel), dim3(dimGridS), dim3(dimBlockS), 0, 0,
gpuColFlux, gpuTempFlux,
gpuTotalSource,
gpuTotXs1d, gpuScatXs2d,
gpuAxy, gpuAxz, gpuAyz,
gpuZoneId, gpuAtomDensity, gpuVol,
gpuMu, gpuEta, gpuXi, gpuWt,
gpuOutboundFluxX, gpuOutboundFluxY, gpuOutboundFluxZ,
ie, iang,
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt, xs->groupCount(), quad->angleCount(), solPar->pn+1, // Send pn+1 to send number of pn groups
dix, diy, diz,
subSweepStartIndex[subSweepId], subSweepVoxelCount[subSweepId], gpuThreadIndexToGlobalIndex);
//hipDeviceSynchronize();
//std::cout << "Launched subSweepId=" << subSweepId << "(" << dimGridS.x << ", " << dimGridS.y << " : " << dimBlockS.x << ", " << dimBlockS.y << " )" << std::endl;
//std::cin.ignore(1024, '\n');
//std::cout << "Ran angle " << iang << std::endl;
//std::cin.get();
}
updateCpuData(gpuId, &cpuCFluxTmp[0], gpuTempFlux, mesh->voxelCount());
} // end of all angles
//updateCpuDataBlocking(gpuId, &cpuCFluxTmp[0], gpuTempFlux, mesh->voxelCount());
//char iterString[3]; // 2 digits + NULL
//char ieString[3]; // 2 digits + NULL
//sprintf(iterString, "%d", iterNum);
//sprintf(ieString, "%d", ie);
//OutWriter::writeArray(std::string("gpuScalarFlux_") + std::string(ieString) + "_" + std::string(iterString) + ".dat", cpuCFluxTmp);
// Make sure all kernels and data transfers finish before advancing
hipDeviceSynchronize();
maxDiff = -1.0e35f;
totDiffPre = totDiff;
totDiff = 0.0f;
//float totDiff = 0.0f;
for(unsigned int i = 0; i < mesh->voxelCount(); i++)
{
maxDiff = max( abs((cpuCFluxTmp[i]-(*cpuCFlux)[ie*mesh->voxelCount() + i])/cpuCFluxTmp[i]), maxDiff);
totDiff += abs(cpuCFluxTmp[i]-(*cpuCFlux)[ie*mesh->voxelCount() + i]);
}
for(unsigned int i = 0; i < cpuCFluxTmp.size(); i++)
{
(*cpuCFlux)[ie*mesh->voxelCount() + i] = cpuCFluxTmp[i];
}
//printf("Diff=%e, tot=%e, pre=%e, condition=%e\n", maxDiff, totDiff, totDiffPre, totDiff/totDiffPre);
errList[ie].push_back(maxDiff);
errIntList[ie].push_back(totDiff);
errMaxList[ie] = maxDiff;
converganceIters[ie] = iterNum;
//for(unsigned int i = 0; i < mesh->voxelCount(); i++)
// cpuCFluxTmp[i] = (*cpuCFlux)[ie*mesh->voxelCount() + i];
//std::cout << "Max diff = " << maxDiff << std::endl;
iterNum++;
} // end not converged
if(!(iterNum <= maxIterations))
{
std::cout << "Max iterations hit" << std::endl;
}
else if(!(maxDiff > epsilon))
{
std::cout << "Converged on relative error" << std::endl;
}
else
{
std::cout << "Converged on precsion bound" << std::endl;
}
//iterNum <= maxIterations && maxDiff > epsilon && totDiff/totDiffPre < 1.0
//printf("End Total crit=%e\n", totDiff/totDiffPre);
} // end each energy group
std::cout << "Time to complete: " << (std::clock() - startTime)/(double)(CLOCKS_PER_SEC/1000) << " ms" << std::endl;
// Release the GPU resources
release_gpu(gpuId, gpuUFlux);
release_gpu(gpuId, gpuZoneId);
release_gpu(gpuId, gpuAtomDensity);
release_gpu(gpuId, gpuAxy);
release_gpu(gpuId, gpuAxz);
release_gpu(gpuId, gpuAyz);
release_gpu(gpuId, gpuMu);
release_gpu(gpuId, gpuEta);
release_gpu(gpuId, gpuXi);
release_gpu(gpuId, gpuWt);
release_gpu(gpuId, gpuTotXs1d);
release_gpu(gpuId, gpuScatXs2d);
for(unsigned int i = 0; i < errList.size(); i++)
{
std::cout << "%Group: " << i << " maxDiff: " << errMaxList[i] << " Iterations: " << converganceIters[i] << '\n';
std::cout << "gpu" << i << " = [";
for(unsigned int j = 0; j < errList[i].size(); j++)
std::cout << errList[i][j] << ",\t";
std::cout << "];\ngpu" << i << "i = [";
for(unsigned int j = 0; j < errIntList[i].size(); j++)
std::cout << errIntList[i][j] << ",\t";
std::cout << "];" << std::endl;
}
std::cout << "Most recent CUDA Error: " << hipGetErrorString(hipGetLastError()) << std::endl;
hipDeviceReset();
hipProfilerStop();
return EXIT_SUCCESS;
}
/*
template <class T>
void reduce(int size, int threads, int blocks, T *d_idata, T *d_odata)
{
int numBlocks = 0;
int numThreads = 0;
int maxBlocks = 64;
int maxThreads = 256;
//getNumBlocksAndThreads(0, size, maxBlocks, maxThreads, numBlocks, numThreads);
hipDeviceProp_t prop;
//int device;
// checkCudaErrors(hipGetDevice(&gpuId));
checkCudaErrors(hipGetDeviceProperties(&prop, gpuId));
numThreads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
numBlocks = (n + (numThreads * 2 - 1)) / (numThreads * 2);
if ((float)numThreads*numBlocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
if (numBlocks > prop.maxGridSize[0])
{
printf("Grid size <%d> excceeds the device capability <%d>, set block size as %d (original %d)\n",
numBlocks, prop.maxGridSize[0], numThreads*2, numThreads);
numBlocks /= 2;
numThreads *= 2;
}
numBlocks = MIN(maxBlocks, numBlocks);
// allocate mem for the result on host side
T *h_odata = (T *) malloc(numBlocks*sizeof(T));
printf("%d blocks\n\n", numBlocks);
// allocate device memory and data
T *d_idata = NULL;
T *d_odata = NULL;
checkCudaErrors(hipMalloc((void **) &d_idata, bytes));
checkCudaErrors(hipMalloc((void **) &d_odata, numBlocks*sizeof(T)));
// copy data directly to device memory
checkCudaErrors(hipMemcpy(d_idata, h_idata, bytes, hipMemcpyHostToDevice));
checkCudaErrors(hipMemcpy(d_odata, h_idata, numBlocks*sizeof(T), hipMemcpyHostToDevice));
// warm-up
//reduce<T>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
if (isPow2(size))
{
switch (threads)
{
case 512:
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
}
*/
/*
void getNumBlocksAndThreads(int gpuId, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
//get device capability, to avoid block/grid size excceed the upbound
hipDeviceProp_t prop;
//int device;
// checkCudaErrors(hipGetDevice(&gpuId));
checkCudaErrors(hipGetDeviceProperties(&prop, gpuId));
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if ((float)threads*blocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
if (blocks > prop.maxGridSize[0])
{
printf("Grid size <%d> excceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
blocks = MIN(maxBlocks, blocks);
}
*/
|
a0528ccb3505381f560c1ede43311237141697af.cu
|
#include "cuda_link.h"
#include "quadrature.h"
#include "mesh.h"
#include "xsection.h"
#include "sourceparams.h"
#include "solverparams.h"
#include "outwriter.h"
//#include <string>
#include <stdio.h>
#include <cuda_profiler_api.h>
void reportGpuData()
{
std::cout << "Reporting GPU resources" << std::endl;
// Check the number of GPU resources
int nDevices;
cudaGetDeviceCount(&nDevices);
std::cout << "Found " << nDevices << " CUDA devices" << std::endl;
for(unsigned int i = 0; i < nDevices; i++)
{
// Find a gpu
cudaDeviceProp props;
checkCudaErrors(cudaGetDeviceProperties(&props, i));
std::cout << "Device " << i << ": " << props.name << " with compute "
<< props.major << "." << props.minor << " capability" << std::endl;
std::cout << "Max threads per block: " << props.maxThreadsPerBlock << std::endl;
std::cout << "Max grid size: " << props.maxGridSize[0] << " x " << props.maxGridSize[1] << " x " << props.maxGridSize[2] << std::endl;
std::cout << "Memory Clock Rate (KHz): " << props.memoryClockRate << std::endl;
std::cout << "Memory Bus Width (bits): " << props.memoryBusWidth << std::endl;
std::cout << "Peak Memory Bandwidth (GB/s): " << (2.0*props.memoryClockRate*(props.memoryBusWidth/8)/1.0e6) << std::endl;
int cores = 0;
int mp = props.multiProcessorCount;
switch (props.major){
case 2: // Fermi
if (props.minor == 1) cores = mp * 48;
else cores = mp * 32;
break;
case 3: // Kepler
cores = mp * 192;
break;
case 5: // Maxwell
cores = mp * 128;
break;
case 6: // Pascal
if (props.minor == 1) cores = mp * 128;
else if (props.minor == 0) cores = mp * 64;
else printf("Unknown device type\n");
break;
default:
printf("Unknown device type\n");
break;
}
std::cout << "SMs: " << mp << std::endl;
std::cout << "CUDA Cores: " << cores << '\n' << std::endl;
}
}
int *alloc_gpuInt(const int gpuId, const int elements, const int *data)
{
cudaError_t cudaerr;
if((cudaerr = cudaSetDevice(gpuId)) != cudaSuccess)
std::cout << "alloc_gpuInt failed to set the device with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
int *gpu_data;
if((cudaerr = cudaMalloc(&gpu_data, elements*sizeof(int))) != cudaSuccess)
std::cout << "alloc_gpuInt threw an error while allocating CUDA memory with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
if(data != NULL)
{
if((cudaerr = cudaMemcpyAsync(gpu_data, data, elements*sizeof(int), cudaMemcpyHostToDevice)) != cudaSuccess)
std::cout << "alloc_gpuInt failed while copying data with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
}
return gpu_data;
}
/*
RAY_T *alloc_gpuFloat(const int gpuId, const int elements, const RAY_T *cpuData)
{
cudaError_t cudaerr;
if((cudaerr = cudaSetDevice(gpuId)) != cudaSuccess)
std::cout << "alloc_gpuFloat failed to set the device with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
RAY_T *gpuData;
if((cudaerr = cudaMalloc(&gpuData, elements*sizeof(RAY_T))) != cudaSuccess)
std::cout << "alloc_gpuFloat threw an error while allocating CUDA memory with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
if(cpuData != NULL)
{
if((cudaerr = cudaMemcpyAsync(gpuData, cpuData, elements*sizeof(RAY_T), cudaMemcpyHostToDevice)) != cudaSuccess)
std::cout << "alloc_gpuFloat failed while copying data with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
}
return gpuData;
}
*/
/*
void release_gpu(int gpuId, float *gpu_data)
{
cudaError_t cudaerr;
if((cudaerr = cudaSetDevice(gpuId)) != cudaSuccess)
std::cout << "release_gpu (float) failed to set the device with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
if((cudaerr = cudaFree(gpu_data)) != cudaSuccess)
std::cout << "relase_gpu (float) threw an error while deallocating CUDA memory with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
}
void release_gpu(int gpuId, int *gpu_data)
{
cudaError_t cudaerr;
if((cudaerr = cudaSetDevice(gpuId)) != cudaSuccess)
std::cout << "release_gpu (int) failed to set the device with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
if((cudaerr = cudaFree(gpu_data)) != cudaSuccess)
std::cout << "relase_gpu (int) threw an error while deallocating int CUDA memory with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
}
*/
/*
void updateCpuData(int gpuId, float *cpuData, float *gpuData, size_t elements, int cpuOffset)
{
cudaError_t cudaerr;
if((cudaerr = cudaSetDevice(gpuId)) != cudaSuccess)
std::cout << "updateCpuData (float) failed to set the device with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
if((cudaerr = cudaMemcpyAsync(cpuData+cpuOffset, gpuData, elements*sizeof(float), cudaMemcpyDeviceToHost)) != cudaSuccess)
std::cout << "updateCpuData (float) MemcpyAsync failed with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
}
void updateCpuData(int gpuId, int *cpuData, int *gpuData, size_t elements, int cpuOffset)
{
cudaError_t cudaerr;
if((cudaerr = cudaSetDevice(gpuId)) != cudaSuccess)
std::cout << "updateCpuData (int) failed to set the device with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
if((cudaerr = cudaMemcpyAsync(cpuData+cpuOffset, gpuData, elements*sizeof(int), cudaMemcpyDeviceToHost)) != cudaSuccess)
std::cout << "updateCpuData (int) MemcpyAsync failed with error code: " << cudaerr << ": " << cudaGetErrorString(cudaerr) << std::endl;
}
*/
int launch_isoRayKernel(const Quadrature *quad, const Mesh *mesh, const XSection *xs, const SolverParams *solPar, const SourceParams *srcPar, std::vector<RAY_T> *uflux)
{
reportGpuData();
if(uflux == NULL)
{
std::cout << "STOP!" << std::endl;
return -1;
}
int gpuId = 0;
// Allocate memory space for the solution vector
//std::cout << "Allocating uflux" << std::endl;
RAY_T *gpuUflux = alloc_gpuFloat<RAY_T>(gpuId, mesh->voxelCount() * xs->groupCount(), NULL);
// Copy the xyzNode values
float *gpuXNodes = alloc_gpuFloat<float>(gpuId, mesh->xNodes.size(), &mesh->xNodes[0]);
float *gpuYNodes = alloc_gpuFloat<float>(gpuId, mesh->xNodes.size(), &mesh->yNodes[0]);
float *gpuZNodes = alloc_gpuFloat<float>(gpuId, mesh->xNodes.size(), &mesh->zNodes[0]);
// Copy the dxyz values
float *gpuDx = alloc_gpuFloat<float>(gpuId, mesh->dx.size(), &mesh->dx[0]);
float *gpuDy = alloc_gpuFloat<float>(gpuId, mesh->dy.size(), &mesh->dy[0]);
float *gpuDz = alloc_gpuFloat<float>(gpuId, mesh->dz.size(), &mesh->dz[0]);
// Copy the zone id number
int *gpuZoneId = alloc_gpuInt(gpuId, mesh->zoneId.size(), &mesh->zoneId[0]);
// Copy the atom density
float *gpuAtomDensity = alloc_gpuFloat<float>(gpuId, mesh->atomDensity.size(), &mesh->atomDensity[0]);
// Copy the xs data
float *gpuTot1d = alloc_gpuFloat<float>(gpuId, xs->m_tot1d.size(), &xs->m_tot1d[0]);
// Copy the source strength
//std::cout << "Allocating source strength" << std::endl;
float *gpuSrcStrength = alloc_gpuFloat<float>(gpuId, srcPar->spectraIntensity.size(), &srcPar->spectraIntensity[0]);
//int ixSrc, iySrc, izSrc;
unsigned int ixSrc = 0;
unsigned int iySrc = 0;
unsigned int izSrc = 0;
while(mesh->xNodes[ixSrc+1] < srcPar->sourceX)
ixSrc++;
while(mesh->yNodes[iySrc+1] < srcPar->sourceY)
iySrc++;
while(mesh->zNodes[izSrc+1] < srcPar->sourceZ)
izSrc++;
dim3 dimGrid(mesh->xElemCt, mesh->yElemCt);
dim3 dimBlock(mesh->zElemCt);
//std::cout << "Grid: " << dimGrid.x << "x" << dimGrid.y << ", Block: " << dimBlock.x << "x" << dimBlock.y << std::endl;
isoRayKernel<<<dimGrid, dimBlock>>>(
gpuUflux,
gpuXNodes, gpuYNodes, gpuZNodes,
gpuDx, gpuDy, gpuDz,
gpuZoneId,
gpuAtomDensity,
gpuTot1d,
gpuSrcStrength,
xs->groupCount(),
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt,
srcPar->sourceX, srcPar->sourceY, srcPar->sourceZ,
ixSrc, iySrc, izSrc);
size_t elements = mesh->voxelCount() * xs->groupCount();
//uflux = new RAY_T[elements];
uflux->resize(elements);
//cudaDeviceSynchronize();
updateCpuDataBlocking(gpuId, &(*uflux)[0], gpuUflux, elements);
//OutWriter::writeArray("uflux.dat", *uflux);
release_gpu(gpuId, gpuUflux);
release_gpu(gpuId, gpuXNodes);
release_gpu(gpuId, gpuYNodes);
release_gpu(gpuId, gpuZNodes);
release_gpu(gpuId, gpuDx);
release_gpu(gpuId, gpuDy);
release_gpu(gpuId, gpuDz);
release_gpu(gpuId, gpuZoneId);
release_gpu(gpuId, gpuAtomDensity);
release_gpu(gpuId, gpuTot1d);
release_gpu(gpuId, gpuSrcStrength);
std::cout << "Most recent CUDA Error: " << cudaGetErrorString(cudaGetLastError()) << std::endl;
//if(cudaFree(gpu_data) != cudaSuccess)
// std::cout << "alloc_gpuInt failed while copying data" << std::endl;
return EXIT_SUCCESS;
}
int launch_isoSolKernel(const Quadrature *quad, const Mesh *mesh, const XSection *xs, const SolverParams *solPar, const SourceParams *srcPar, const std::vector<RAY_T> *cpuUFlux, std::vector<SOL_T> *cpuCFlux)
{
//std::cout << "Launching solver kernel" << std::endl;
if(cpuUFlux == NULL)
{
std::cout << "STOP!" << std::endl;
return -1;
}
if(cpuCFlux == NULL)
{
std::cout << "STOP!" << std::endl;
return -2;
}
int gpuId = 0;
std::vector<SOL_T> errMaxList;
std::vector<std::vector<SOL_T> > errList;
std::vector<std::vector<SOL_T> > errIntList;
std::vector<int> converganceIters;
std::vector<SOL_T> converganceTracker;
errMaxList.resize(xs->groupCount());
errList.resize(xs->groupCount());
errIntList.resize(xs->groupCount());
converganceIters.resize(xs->groupCount());
converganceTracker.resize(xs->groupCount());
std::clock_t startTime = std::clock();
const int maxIterations = 25;
const SOL_T epsilon = static_cast<SOL_T>(0.001);
cpuCFlux->resize(xs->groupCount() * mesh->voxelCount());
std::vector<SOL_T> cpuCFluxTmp(mesh->voxelCount(), static_cast<SOL_T>(0.0));
//std::vector<SOL_T> errMaxList;
//errMaxList.resize(xs->groupCount());
if(cpuUFlux == NULL && srcPar == NULL)
{
std::cout << "uFlux and srcPar cannot both be NULL" << std::endl;
return 55;
}
// Computed the highest energy group actually used
bool noDownscatterYet = true;
unsigned int highestEnergy = 0;
//std::cout << "About to do high check" << std::endl;
while(noDownscatterYet)
{
SOL_T dmax = 0.0;
unsigned int vc = mesh->voxelCount();
for(unsigned int ir = 0; ir < vc; ir++)
{
dmax = (dmax > (*cpuUFlux)[highestEnergy*vc + ir]) ? dmax : (*cpuUFlux)[highestEnergy*vc + ir];
}
if(dmax <= 0.0)
{
std::cout << "No external source or downscatter, skipping energy group " << highestEnergy << std::endl;
highestEnergy++;
}
else
{
noDownscatterYet = false;
}
if(highestEnergy >= xs->groupCount())
{
std::cout << "Zero flux everywhere from the raytracer" << std::endl;
return 57;
}
}
// Allocate GPU resources for the external source computation
RAY_T *gpuUFlux = alloc_gpuFloat<RAY_T>(gpuId, cpuUFlux->size(), &(*cpuUFlux)[0]);
SOL_T *gpuColFlux = alloc_gpuFloat<SOL_T>(gpuId, cpuCFlux->size(), NULL);
float *gpuVol = alloc_gpuFloat<float>(gpuId, mesh->vol.size(), &mesh->vol[0]);
float *gpuAtomDensity = alloc_gpuFloat<float>(gpuId, mesh->atomDensity.size(), &mesh->atomDensity[0]);
int *gpuZoneId = alloc_gpuInt(gpuId, mesh->zoneId.size(), &mesh->zoneId[0]);
float *gpuScatXs2d = alloc_gpuFloat<float>(gpuId, xs->m_scat2d.size(), &xs->m_scat2d[0]);
// Allocate additional GPU resources for the solver
SOL_T *gpuTempFlux = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
SOL_T *gpu1stSource = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
SOL_T *gpuTotalSource = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
//unsigned int anglesPerOctant = quad->angleCount()/8;
//SOL_T *gpuOutboundFluxX = alloc_gpuFloat<SOL_T>(gpuId, mesh->yElemCt * mesh->zElemCt * anglesPerOctant, NULL);
//SOL_T *gpuOutboundFluxY = alloc_gpuFloat<SOL_T>(gpuId, mesh->yElemCt * mesh->zElemCt * anglesPerOctant, NULL);
//SOL_T *gpuOutboundFluxZ = alloc_gpuFloat<SOL_T>(gpuId, mesh->yElemCt * mesh->zElemCt * anglesPerOctant, NULL);
SOL_T *gpuOutboundFluxX = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
SOL_T *gpuOutboundFluxY = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
SOL_T *gpuOutboundFluxZ = alloc_gpuFloat<SOL_T>(gpuId, mesh->voxelCount(), NULL);
float *gpuAxy = alloc_gpuFloat<float>(gpuId, mesh->Axy.size(), &mesh->Axy[0]);
float *gpuAxz = alloc_gpuFloat<float>(gpuId, mesh->Axz.size(), &mesh->Axz[0]);
float *gpuAyz = alloc_gpuFloat<float>(gpuId, mesh->Ayz.size(), &mesh->Ayz[0]);
float *gpuMu = alloc_gpuFloat<float>(gpuId, quad->mu.size(), &quad->mu[0]);
float *gpuEta = alloc_gpuFloat<float>(gpuId, quad->eta.size(), &quad->eta[0]);
float *gpuXi = alloc_gpuFloat<float>(gpuId, quad->zi.size(), &quad->zi[0]);
float *gpuWt = alloc_gpuFloat<float>(gpuId, quad->wt.size(), &quad->wt[0]);
float *gpuTotXs1d = alloc_gpuFloat<float>(gpuId, xs->m_tot1d.size(), &xs->m_tot1d[0]);
// Zero the scalar flux
int erblocks = 64;
int ergrids = cpuCFlux->size() / erblocks;
if(cpuCFlux->size() % erblocks != 0)
ergrids += 1; // Account for lengths not divisible by 64
zeroKernel<<<dim3(ergrids), dim3(erblocks)>>>(cpuCFlux->size(), gpuColFlux);
// Generate the sweep index block
int totalSubsweeps = mesh->xElemCt + mesh->yElemCt + mesh->zElemCt - 2;
std::vector<int> threadIndexToGlobalIndex(mesh->voxelCount());
std::vector<int> subSweepStartIndex(totalSubsweeps);
std::vector<int> subSweepVoxelCount(totalSubsweeps);
// Trivial edge cases that aren't computed during the loop
subSweepStartIndex[0] = 0;
threadIndexToGlobalIndex[0] = 0;
subSweepVoxelCount[totalSubsweeps-1] = 1;
for(unsigned int iSubSweep = 0; iSubSweep < totalSubsweeps; iSubSweep++)
{
int iSubSweepPrev = iSubSweep - 1;
int C = (iSubSweepPrev+1) * (iSubSweepPrev+2) / 2;
int dx = max(iSubSweepPrev+1 - (signed)mesh->xElemCt, 0);
int dy = max(iSubSweepPrev+1 - (signed)mesh->yElemCt, 0);
int dz = max(iSubSweepPrev+1 - (signed)mesh->zElemCt, 0);
int dxy = max(iSubSweepPrev+1 - (signed)mesh->xElemCt - (signed)mesh->yElemCt, 0);
int dxz = max(iSubSweepPrev+1 - (signed)mesh->xElemCt - (signed)mesh->zElemCt, 0);
int dyz = max(iSubSweepPrev+1 - (signed)mesh->yElemCt - (signed)mesh->zElemCt, 0);
int Lx = dx * (dx + 1) / 2;
int Ly = dy * (dy + 1) / 2;
int Lz = dz * (dz + 1) / 2;
int Gxy = dxy * (dxy + 1) / 2;
int Gxz = dxz * (dxz + 1) / 2;
int Gyz = dyz * (dyz + 1) / 2;
int voxPrevSubSweep = C - Lx - Ly - Lz + Gxy + Gxz + Gyz;
subSweepStartIndex[iSubSweep] = subSweepStartIndex[iSubSweepPrev] + voxPrevSubSweep;
subSweepVoxelCount[iSubSweepPrev] = voxPrevSubSweep;
int voxelsSoFar = 0;
for(int ix = 0; ix <= min(mesh->xElemCt-1, iSubSweep); ix++)
for(int iy = 0; iy <= min(mesh->yElemCt-1, iSubSweep-ix); iy++)
{
int iz = iSubSweep - ix - iy;
if(iz >= mesh->zElemCt)
continue;
int ir = ix*mesh->yElemCt*mesh->zElemCt + iy*mesh->zElemCt + iz;
threadIndexToGlobalIndex[subSweepStartIndex[iSubSweep] + voxelsSoFar] = ir;
voxelsSoFar++;
}
}
int *gpuThreadIndexToGlobalIndex = alloc_gpuInt(gpuId, threadIndexToGlobalIndex.size(), &threadIndexToGlobalIndex[0]);
dim3 dimGrid(mesh->xElemCt, mesh->yElemCt);
dim3 dimBlock(mesh->zElemCt);
for(unsigned int ie = highestEnergy; ie < xs->groupCount(); ie++) // for every energy group
{
std::cout << "ie=" << ie << std::endl;
int iterNum = 1;
SOL_T maxDiff = 1.0;
SOL_T totDiff = 1.0E10; // Should be very large
SOL_T totDiffPre = 1.0E11; // Should be larger than totDiff
// Needs to be done before the first clearSweepKernel<<<>>> call
int rblocks = 64;
int rgrids = mesh->voxelCount() / rblocks;
if(mesh->voxelCount() % rblocks != 0)
rgrids += 1; // Account for lengths not divisible by 64
zeroKernel<<<dim3(rgrids), dim3(rblocks)>>>(mesh->voxelCount(), gpuTempFlux);
//zeroKernel<<<dim3(rgrids), dim3(rblocks)>>>(mesh->voxelCount(), gpu1stSource);
isoSrcKernel<<<dimGrid, dimBlock>>>(
gpuUFlux,
gpu1stSource,
gpuVol, gpuAtomDensity, gpuZoneId,
gpuScatXs2d,
mesh->voxelCount(), xs->groupCount(), solPar->pn+1, highestEnergy, ie,
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt);
//cudaDeviceSynchronize();
//std::cout << "About to write the source results" << std::endl;
//std::vector<float> cpuExtSrc;
//cpuExtSrc.resize(mesh->voxelCount());
//updateCpuDataBlocking(gpuId, &cpuExtSrc[0], gpu1stSource, mesh->voxelCount());
//char ieString[256];
//sprintf(ieString, "%d", ie);
//OutWriter::writeArray(std::string("gpuExtSrc") + ieString + ".dat", cpuExtSrc);
downscatterKernel<<<dimGrid, dimBlock>>>(
gpuTotalSource,
highestEnergy, ie,
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt, xs->groupCount(), solPar->pn+1,
gpuZoneId,
gpuColFlux,
gpuScatXs2d,
gpuAtomDensity, gpuVol,
gpu1stSource);
for(unsigned int i = 0; i < cpuCFluxTmp.size(); i++)
cpuCFluxTmp[i] = 0.0f;
//printf("Begin Total crit=%e\n", totDiff/totDiffPre);
while(iterNum <= maxIterations && maxDiff > epsilon && totDiff/totDiffPre < 1.0) // while not converged
{
//std::cout << "it: " << iterNum << std::endl;
clearSweepKernel<<<dimGrid, dimBlock>>>(
gpuColFlux, gpuTempFlux,
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt, ie);
for(unsigned int i = 0; i < cpuCFluxTmp.size(); i++)
{
//cpuCFlux[ie*mesh->voxelCount() + i] = cpuCFluxTmp[i];
cpuCFluxTmp[i] = 0.0f;
}
for(unsigned int iang = 0; iang < quad->angleCount(); iang++) // for every angle
//for(unsigned int io = 0; io < 8; io++) // for every octant
{
// Find the correct direction to sweep
int diz = 1; // Sweep direction
if(quad->eta[iang] < 0) // Condition to sweep backward
{
diz = -1; // Sweep toward zero
}
int diy = 1;
if(quad->zi[iang] < 0)
{
diy = -1;
}
int dix = 1;
if(quad->mu[iang] < 0)
{
dix = -1;
}
//int dix = io / 4 == 0 ? 1 : -1; // + x x, positive are first
//int diy = (io/2) % 2 == 0 ? 1 : -1; // x + x, alternate every other octant
//int diz = io % 2 == 0 ? 1 : -1; // x x + alternate every octant
//std::cout << "io = " << io << ", dix = " << dix << ", diy = " << diy << ", diz = " << diz << std::endl;
for(unsigned int subSweepId = 0; subSweepId < totalSubsweeps; subSweepId++)
{
//std::cout << "Subsweep " << subSweepId << std::endl;
int raise = subSweepVoxelCount[subSweepId] % 64 == 0 ? 0 : 1;
dim3 dimGridS(subSweepVoxelCount[subSweepId] / 64 + raise);
dim3 dimBlockS(64);
isoSolKernel<<<dimGridS, dimBlockS>>>(
gpuColFlux, gpuTempFlux,
gpuTotalSource,
gpuTotXs1d, gpuScatXs2d,
gpuAxy, gpuAxz, gpuAyz,
gpuZoneId, gpuAtomDensity, gpuVol,
gpuMu, gpuEta, gpuXi, gpuWt,
gpuOutboundFluxX, gpuOutboundFluxY, gpuOutboundFluxZ,
ie, iang,
mesh->xElemCt, mesh->yElemCt, mesh->zElemCt, xs->groupCount(), quad->angleCount(), solPar->pn+1, // Send pn+1 to send number of pn groups
dix, diy, diz,
subSweepStartIndex[subSweepId], subSweepVoxelCount[subSweepId], gpuThreadIndexToGlobalIndex);
//cudaDeviceSynchronize();
//std::cout << "Launched subSweepId=" << subSweepId << "(" << dimGridS.x << ", " << dimGridS.y << " : " << dimBlockS.x << ", " << dimBlockS.y << " )" << std::endl;
//std::cin.ignore(1024, '\n');
//std::cout << "Ran angle " << iang << std::endl;
//std::cin.get();
}
updateCpuData(gpuId, &cpuCFluxTmp[0], gpuTempFlux, mesh->voxelCount());
} // end of all angles
//updateCpuDataBlocking(gpuId, &cpuCFluxTmp[0], gpuTempFlux, mesh->voxelCount());
//char iterString[3]; // 2 digits + NULL
//char ieString[3]; // 2 digits + NULL
//sprintf(iterString, "%d", iterNum);
//sprintf(ieString, "%d", ie);
//OutWriter::writeArray(std::string("gpuScalarFlux_") + std::string(ieString) + "_" + std::string(iterString) + ".dat", cpuCFluxTmp);
// Make sure all kernels and data transfers finish before advancing
cudaDeviceSynchronize();
maxDiff = -1.0e35f;
totDiffPre = totDiff;
totDiff = 0.0f;
//float totDiff = 0.0f;
for(unsigned int i = 0; i < mesh->voxelCount(); i++)
{
maxDiff = max( abs((cpuCFluxTmp[i]-(*cpuCFlux)[ie*mesh->voxelCount() + i])/cpuCFluxTmp[i]), maxDiff);
totDiff += abs(cpuCFluxTmp[i]-(*cpuCFlux)[ie*mesh->voxelCount() + i]);
}
for(unsigned int i = 0; i < cpuCFluxTmp.size(); i++)
{
(*cpuCFlux)[ie*mesh->voxelCount() + i] = cpuCFluxTmp[i];
}
//printf("Diff=%e, tot=%e, pre=%e, condition=%e\n", maxDiff, totDiff, totDiffPre, totDiff/totDiffPre);
errList[ie].push_back(maxDiff);
errIntList[ie].push_back(totDiff);
errMaxList[ie] = maxDiff;
converganceIters[ie] = iterNum;
//for(unsigned int i = 0; i < mesh->voxelCount(); i++)
// cpuCFluxTmp[i] = (*cpuCFlux)[ie*mesh->voxelCount() + i];
//std::cout << "Max diff = " << maxDiff << std::endl;
iterNum++;
} // end not converged
if(!(iterNum <= maxIterations))
{
std::cout << "Max iterations hit" << std::endl;
}
else if(!(maxDiff > epsilon))
{
std::cout << "Converged on relative error" << std::endl;
}
else
{
std::cout << "Converged on precsion bound" << std::endl;
}
//iterNum <= maxIterations && maxDiff > epsilon && totDiff/totDiffPre < 1.0
//printf("End Total crit=%e\n", totDiff/totDiffPre);
} // end each energy group
std::cout << "Time to complete: " << (std::clock() - startTime)/(double)(CLOCKS_PER_SEC/1000) << " ms" << std::endl;
// Release the GPU resources
release_gpu(gpuId, gpuUFlux);
release_gpu(gpuId, gpuZoneId);
release_gpu(gpuId, gpuAtomDensity);
release_gpu(gpuId, gpuAxy);
release_gpu(gpuId, gpuAxz);
release_gpu(gpuId, gpuAyz);
release_gpu(gpuId, gpuMu);
release_gpu(gpuId, gpuEta);
release_gpu(gpuId, gpuXi);
release_gpu(gpuId, gpuWt);
release_gpu(gpuId, gpuTotXs1d);
release_gpu(gpuId, gpuScatXs2d);
for(unsigned int i = 0; i < errList.size(); i++)
{
std::cout << "%Group: " << i << " maxDiff: " << errMaxList[i] << " Iterations: " << converganceIters[i] << '\n';
std::cout << "gpu" << i << " = [";
for(unsigned int j = 0; j < errList[i].size(); j++)
std::cout << errList[i][j] << ",\t";
std::cout << "];\ngpu" << i << "i = [";
for(unsigned int j = 0; j < errIntList[i].size(); j++)
std::cout << errIntList[i][j] << ",\t";
std::cout << "];" << std::endl;
}
std::cout << "Most recent CUDA Error: " << cudaGetErrorString(cudaGetLastError()) << std::endl;
cudaDeviceReset();
cudaProfilerStop();
return EXIT_SUCCESS;
}
/*
template <class T>
void reduce(int size, int threads, int blocks, T *d_idata, T *d_odata)
{
int numBlocks = 0;
int numThreads = 0;
int maxBlocks = 64;
int maxThreads = 256;
//getNumBlocksAndThreads(0, size, maxBlocks, maxThreads, numBlocks, numThreads);
cudaDeviceProp prop;
//int device;
// checkCudaErrors(cudaGetDevice(&gpuId));
checkCudaErrors(cudaGetDeviceProperties(&prop, gpuId));
numThreads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
numBlocks = (n + (numThreads * 2 - 1)) / (numThreads * 2);
if ((float)numThreads*numBlocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
if (numBlocks > prop.maxGridSize[0])
{
printf("Grid size <%d> excceeds the device capability <%d>, set block size as %d (original %d)\n",
numBlocks, prop.maxGridSize[0], numThreads*2, numThreads);
numBlocks /= 2;
numThreads *= 2;
}
numBlocks = MIN(maxBlocks, numBlocks);
// allocate mem for the result on host side
T *h_odata = (T *) malloc(numBlocks*sizeof(T));
printf("%d blocks\n\n", numBlocks);
// allocate device memory and data
T *d_idata = NULL;
T *d_odata = NULL;
checkCudaErrors(cudaMalloc((void **) &d_idata, bytes));
checkCudaErrors(cudaMalloc((void **) &d_odata, numBlocks*sizeof(T)));
// copy data directly to device memory
checkCudaErrors(cudaMemcpy(d_idata, h_idata, bytes, cudaMemcpyHostToDevice));
checkCudaErrors(cudaMemcpy(d_odata, h_idata, numBlocks*sizeof(T), cudaMemcpyHostToDevice));
// warm-up
//reduce<T>(size, numThreads, numBlocks, whichKernel, d_idata, d_odata);
dim3 dimBlock(threads, 1, 1);
dim3 dimGrid(blocks, 1, 1);
// when there is only one warp per block, we need to allocate two warps
// worth of shared memory so that we don't index shared memory out of bounds
int smemSize = (threads <= 32) ? 2 * threads * sizeof(T) : threads * sizeof(T);
if (isPow2(size))
{
switch (threads)
{
case 512:
reduce6<T, 512, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, true><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
else
{
switch (threads)
{
case 512:
reduce6<T, 512, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 256:
reduce6<T, 256, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 128:
reduce6<T, 128, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 64:
reduce6<T, 64, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 32:
reduce6<T, 32, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 16:
reduce6<T, 16, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 8:
reduce6<T, 8, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 4:
reduce6<T, 4, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 2:
reduce6<T, 2, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
case 1:
reduce6<T, 1, false><<< dimGrid, dimBlock, smemSize >>>(d_idata, d_odata, size);
break;
}
}
}
*/
/*
void getNumBlocksAndThreads(int gpuId, int n, int maxBlocks, int maxThreads, int &blocks, int &threads)
{
//get device capability, to avoid block/grid size excceed the upbound
cudaDeviceProp prop;
//int device;
// checkCudaErrors(cudaGetDevice(&gpuId));
checkCudaErrors(cudaGetDeviceProperties(&prop, gpuId));
threads = (n < maxThreads*2) ? nextPow2((n + 1)/ 2) : maxThreads;
blocks = (n + (threads * 2 - 1)) / (threads * 2);
if ((float)threads*blocks > (float)prop.maxGridSize[0] * prop.maxThreadsPerBlock)
{
printf("n is too large, please choose a smaller number!\n");
}
if (blocks > prop.maxGridSize[0])
{
printf("Grid size <%d> excceeds the device capability <%d>, set block size as %d (original %d)\n",
blocks, prop.maxGridSize[0], threads*2, threads);
blocks /= 2;
threads *= 2;
}
blocks = MIN(maxBlocks, blocks);
}
*/
|
0f53080760b6b2009fd0104fd4f57faf7eee388e.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
__device__ float getTheta(float x, float y)
{
float PI = 3.14159265358979323846;
float rtn = 0;
if (y < 0)
{
rtn = atan2(y, x) * -1;
}
else
{
rtn = PI + (PI - atan2(y, x));
}
return rtn;
}
__global__ void equi2cube(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, int drows, int dcols, int srows, int scols)
{
float PI = 3.14159265358979323846;
float inputHeight = srows;
float inputWidth = scols;
float sqr = inputWidth / 4.0;
int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
float tx = 0;
float ty = 0;
float x = 0;
float y = 0;
float z = 0;
float rho = 0;
float normTheta = 0;
float normPhi = 0;
float iX;
float iY;
// iterate over pixels output image
// height and width inclusive
if (dst_x < dcols && dst_y < drows)
{
dst_x += 1;
dst_y += 1;
// local coordinates for the cube map face.
tx = 0;
ty = 0;
// normalized local coordinates
x = 0;
y = 0;
z = 0;
// top half
if (dst_y < sqr + 1) {
// top left box[Y + ]
if (dst_x < sqr + 1) {
tx = dst_x;
ty = dst_y;
x = tx - 0.5 * sqr;
y = 0.5 * sqr;
z = ty - 0.5 * sqr;
}
// top middle[X + ]
else if (dst_x < 2 * sqr + 1) {
tx = dst_x - sqr;
ty = dst_y;
x = 0.5 * sqr;
y = (tx - 0.5 * sqr) * -1;
z = ty - 0.5 * sqr;
}
// top right[Y - ]
else {
tx = dst_x - sqr * 2;
ty = dst_y;
x = (tx - 0.5 * sqr) * -1;
y = -0.5 * sqr;
z = ty - 0.5 * sqr;
}
}
// bottom half
else {
// bottom left box[X - ]
if (dst_x < sqr + 1) {
tx = dst_x;
ty = dst_y - sqr;
x = int(-0.5 * sqr);
y = int(tx - 0.5 * sqr);
z = int(ty - 0.5 * sqr);
}
// bottom middle[Z - ]
else if (dst_x < 2 * sqr + 1) {
tx = dst_x - sqr;
ty = dst_y - sqr;
x = (ty - 0.5 * sqr) * -1;
y = (tx - 0.5 * sqr) * -1;
z = 0.5 * sqr;
}
// bottom right[Z + ]
else {
tx = dst_x - sqr * 2;
ty = dst_y - sqr;
x = ty - 0.5 * sqr;
y = (tx - 0.5 * sqr) * -1;
z = -0.5 * sqr;
}
}
// now find out the polar coordinates
rho = sqrt(x * x + y * y + z * z);
normTheta = getTheta(x, y) / (2 * PI);
normPhi = (PI - acos(z / rho)) / PI;
iX = normTheta * inputWidth;
iY = normPhi * inputHeight;
// catch possible overflows
if (iX >= inputWidth) {
iX = iX - (inputWidth);
}
if (iY >= inputHeight) {
iY = iY - (inputHeight);
}
dst(dst_y - 1, dst_x - 1).x = src(int(iY), int(iX)).x;
dst(dst_y - 1, dst_x - 1).y = src(int(iY), int(iX)).y;
dst(dst_y - 1, dst_x - 1).z = src(int(iY), int(iX)).z;
}
}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void equi2cubeCUDA(cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst)
{
const dim3 block(32, 32);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
equi2cube << <grid, block >> > (src, dst, dst.rows, dst.cols, src.rows, src.cols);
}
|
0f53080760b6b2009fd0104fd4f57faf7eee388e.cu
|
#include<stdio.h>
#include<stdlib.h>
#include <opencv2/opencv.hpp>
#include <cfloat>
#include <opencv2/core/cuda/common.hpp>
#include <opencv2/core/cuda/border_interpolate.hpp>
#include <opencv2/core/cuda/vec_traits.hpp>
#include <opencv2/core/cuda/vec_math.hpp>
__device__ float getTheta(float x, float y)
{
float PI = 3.14159265358979323846;
float rtn = 0;
if (y < 0)
{
rtn = atan2(y, x) * -1;
}
else
{
rtn = PI + (PI - atan2(y, x));
}
return rtn;
}
__global__ void equi2cube(const cv::cuda::PtrStep<uchar3> src, cv::cuda::PtrStep<uchar3> dst, int drows, int dcols, int srows, int scols)
{
float PI = 3.14159265358979323846;
float inputHeight = srows;
float inputWidth = scols;
float sqr = inputWidth / 4.0;
int dst_x = blockDim.x * blockIdx.x + threadIdx.x;
int dst_y = blockDim.y * blockIdx.y + threadIdx.y;
float tx = 0;
float ty = 0;
float x = 0;
float y = 0;
float z = 0;
float rho = 0;
float normTheta = 0;
float normPhi = 0;
float iX;
float iY;
// iterate over pixels output image
// height and width inclusive
if (dst_x < dcols && dst_y < drows)
{
dst_x += 1;
dst_y += 1;
// local coordinates for the cube map face.
tx = 0;
ty = 0;
// normalized local coordinates
x = 0;
y = 0;
z = 0;
// top half
if (dst_y < sqr + 1) {
// top left box[Y + ]
if (dst_x < sqr + 1) {
tx = dst_x;
ty = dst_y;
x = tx - 0.5 * sqr;
y = 0.5 * sqr;
z = ty - 0.5 * sqr;
}
// top middle[X + ]
else if (dst_x < 2 * sqr + 1) {
tx = dst_x - sqr;
ty = dst_y;
x = 0.5 * sqr;
y = (tx - 0.5 * sqr) * -1;
z = ty - 0.5 * sqr;
}
// top right[Y - ]
else {
tx = dst_x - sqr * 2;
ty = dst_y;
x = (tx - 0.5 * sqr) * -1;
y = -0.5 * sqr;
z = ty - 0.5 * sqr;
}
}
// bottom half
else {
// bottom left box[X - ]
if (dst_x < sqr + 1) {
tx = dst_x;
ty = dst_y - sqr;
x = int(-0.5 * sqr);
y = int(tx - 0.5 * sqr);
z = int(ty - 0.5 * sqr);
}
// bottom middle[Z - ]
else if (dst_x < 2 * sqr + 1) {
tx = dst_x - sqr;
ty = dst_y - sqr;
x = (ty - 0.5 * sqr) * -1;
y = (tx - 0.5 * sqr) * -1;
z = 0.5 * sqr;
}
// bottom right[Z + ]
else {
tx = dst_x - sqr * 2;
ty = dst_y - sqr;
x = ty - 0.5 * sqr;
y = (tx - 0.5 * sqr) * -1;
z = -0.5 * sqr;
}
}
// now find out the polar coordinates
rho = sqrt(x * x + y * y + z * z);
normTheta = getTheta(x, y) / (2 * PI);
normPhi = (PI - acos(z / rho)) / PI;
iX = normTheta * inputWidth;
iY = normPhi * inputHeight;
// catch possible overflows
if (iX >= inputWidth) {
iX = iX - (inputWidth);
}
if (iY >= inputHeight) {
iY = iY - (inputHeight);
}
dst(dst_y - 1, dst_x - 1).x = src(int(iY), int(iX)).x;
dst(dst_y - 1, dst_x - 1).y = src(int(iY), int(iX)).y;
dst(dst_y - 1, dst_x - 1).z = src(int(iY), int(iX)).z;
}
}
int divUp(int a, int b)
{
return ((a % b) != 0) ? (a / b + 1) : (a / b);
}
void equi2cubeCUDA(cv::cuda::GpuMat& src, cv::cuda::GpuMat& dst)
{
const dim3 block(32, 32);
const dim3 grid(divUp(dst.cols, block.x), divUp(dst.rows, block.y));
equi2cube << <grid, block >> > (src, dst, dst.rows, dst.cols, src.rows, src.cols);
}
|
0e1b05c449e62cb4caf08607b345758f957e6eb1.hip
|
// !!! This is a file automatically generated by hipify!!!
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file adabelief.cu
* \brief Optimizer operators
* \author khaotik
*/
#include "./adabelief-inl.h"
namespace mxnet {
namespace op {
namespace adabelief {
template <>
void GetScaleFloat<gpu>(mshadow::Stream<gpu>* s, const TBlob& scale_blob, float* pScalef) {
MSHADOW_REAL_TYPE_SWITCH(scale_blob.type_flag_, DType, {
DType scale = 0;
hipStream_t stream = mshadow::Stream<gpu>::GetStream(s);
CUDA_CALL(hipMemcpyAsync(
&scale, scale_blob.dptr<DType>(), sizeof(DType), hipMemcpyDeviceToHost, stream));
CUDA_CALL(hipStreamSynchronize(stream));
*pScalef = static_cast<float>(scale);
})
}
} // namespace adabelief
NNVM_REGISTER_OP(_adabelief_update)
.set_attr<FCompute>("FCompute<gpu>", adabelief::MPUpdate<gpu, adabelief::AdaBeliefUpdate<gpu>>);
NNVM_REGISTER_OP(_mp_adabelief_update)
.set_attr<FCompute>("FCompute<gpu>",
adabelief::MPUpdate<gpu, adabelief::MPAdaBeliefUpdate<gpu>>);
NNVM_REGISTER_OP(_multi_adabelief_update)
.set_attr<FCompute>("FCompute<gpu>", adabelief::multiMPUpdate<gpu, false>);
NNVM_REGISTER_OP(_multi_mp_adabelief_update)
.set_attr<FCompute>("FCompute<gpu>", adabelief::multiMPUpdate<gpu, true>);
} // namespace op
} // namespace mxnet
|
0e1b05c449e62cb4caf08607b345758f957e6eb1.cu
|
/*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
/*!
* \file adabelief.cu
* \brief Optimizer operators
* \author khaotik
*/
#include "./adabelief-inl.h"
namespace mxnet {
namespace op {
namespace adabelief {
template <>
void GetScaleFloat<gpu>(mshadow::Stream<gpu>* s, const TBlob& scale_blob, float* pScalef) {
MSHADOW_REAL_TYPE_SWITCH(scale_blob.type_flag_, DType, {
DType scale = 0;
cudaStream_t stream = mshadow::Stream<gpu>::GetStream(s);
CUDA_CALL(cudaMemcpyAsync(
&scale, scale_blob.dptr<DType>(), sizeof(DType), cudaMemcpyDeviceToHost, stream));
CUDA_CALL(cudaStreamSynchronize(stream));
*pScalef = static_cast<float>(scale);
})
}
} // namespace adabelief
NNVM_REGISTER_OP(_adabelief_update)
.set_attr<FCompute>("FCompute<gpu>", adabelief::MPUpdate<gpu, adabelief::AdaBeliefUpdate<gpu>>);
NNVM_REGISTER_OP(_mp_adabelief_update)
.set_attr<FCompute>("FCompute<gpu>",
adabelief::MPUpdate<gpu, adabelief::MPAdaBeliefUpdate<gpu>>);
NNVM_REGISTER_OP(_multi_adabelief_update)
.set_attr<FCompute>("FCompute<gpu>", adabelief::multiMPUpdate<gpu, false>);
NNVM_REGISTER_OP(_multi_mp_adabelief_update)
.set_attr<FCompute>("FCompute<gpu>", adabelief::multiMPUpdate<gpu, true>);
} // namespace op
} // namespace mxnet
|
516f9f7972b76cb8f47611dd28402b4d18a18e86.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// See 'LICENSE_PHANTASY_ENGINE' for copyright and contributors.
#include "kernels/ProcessGBufferKernel.hpp"
#include "CudaHelpers.hpp"
#include "CudaSfzVectorCompatibility.cuh"
#include "GBufferRead.cuh"
namespace phe {
using sfz::vec3;
using sfz::vec4;
// Helper functions
// ------------------------------------------------------------------------------------------------
// Assumes both parameters are normalized
static __device__ vec3 reflect(vec3 in, vec3 normal) noexcept
{
return in - 2.0f * dot(normal, in) * normal;
}
// Kernels
// ------------------------------------------------------------------------------------------------
static __global__ void tempWriteColorKernel(hipSurfaceObject_t surface, vec2i res,
hipSurfaceObject_t normalTex)
{
// Calculate surface coordinates
vec2i loc = vec2i(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (loc.x >= res.x || loc.y >= res.y) return;
float4 tmp = surf2Dread<float4>(normalTex, loc.x * sizeof(float4), loc.y);
vec4 color = vec4(1.0f, 0.0f, 0.0f, 1.0f);
//surf2Dwrite(toFloat4(color), surface, loc.x * sizeof(float4), loc.y)
surf2Dwrite(tmp, surface, loc.x * sizeof(float4), loc.y);
}
static __global__ void createReflectRaysKernel(vec3 camPos, vec2i res,
hipSurfaceObject_t posTex,
hipSurfaceObject_t normalTex,
hipSurfaceObject_t albedoTex,
hipSurfaceObject_t materialTex,
RayIn* raysOut)
{
// Calculate surface coordinates
vec2u loc = vec2u(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (loc.x >= res.x || loc.y >= res.y) return;
// Read GBuffer
GBufferValue pixelVal = readGBuffer(posTex, normalTex, albedoTex, materialTex, loc);
// Calculate reflect direction
vec3 camDir = normalize(pixelVal.pos - camPos);
vec3 reflected = reflect(camDir, pixelVal.normal);
// Create ray
RayIn ray;
ray.setDir(reflected);
ray.setOrigin(pixelVal.pos);
ray.setMinDist(0.0001f);
ray.setMaxDist(FLT_MAX);
// Write ray to array
uint32_t id = loc.y * res.x + loc.x;
raysOut[id] = ray;
}
// Kernel cpu interfaces
// ------------------------------------------------------------------------------------------------
void launchCreateReflectRaysKernel(const CreateReflectRaysInput& input, RayIn* raysOut) noexcept
{
// Calculate number of threads and blocks to run
dim3 threadsPerBlock(8, 8);
dim3 numBlocks((input.res.x + threadsPerBlock.x - 1) / threadsPerBlock.x,
(input.res.y + threadsPerBlock.y - 1) / threadsPerBlock.y);
// Run cuda ray tracer kernel
hipLaunchKernelGGL(( createReflectRaysKernel), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, input.camPos, input.res,
input.posTex, input.normalTex,
input.albedoTex, input.materialTex,
raysOut);
CHECK_CUDA_ERROR(hipGetLastError());
CHECK_CUDA_ERROR(hipDeviceSynchronize());
}
} // namespace phe
|
516f9f7972b76cb8f47611dd28402b4d18a18e86.cu
|
// See 'LICENSE_PHANTASY_ENGINE' for copyright and contributors.
#include "kernels/ProcessGBufferKernel.hpp"
#include "CudaHelpers.hpp"
#include "CudaSfzVectorCompatibility.cuh"
#include "GBufferRead.cuh"
namespace phe {
using sfz::vec3;
using sfz::vec4;
// Helper functions
// ------------------------------------------------------------------------------------------------
// Assumes both parameters are normalized
static __device__ vec3 reflect(vec3 in, vec3 normal) noexcept
{
return in - 2.0f * dot(normal, in) * normal;
}
// Kernels
// ------------------------------------------------------------------------------------------------
static __global__ void tempWriteColorKernel(cudaSurfaceObject_t surface, vec2i res,
cudaSurfaceObject_t normalTex)
{
// Calculate surface coordinates
vec2i loc = vec2i(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (loc.x >= res.x || loc.y >= res.y) return;
float4 tmp = surf2Dread<float4>(normalTex, loc.x * sizeof(float4), loc.y);
vec4 color = vec4(1.0f, 0.0f, 0.0f, 1.0f);
//surf2Dwrite(toFloat4(color), surface, loc.x * sizeof(float4), loc.y)
surf2Dwrite(tmp, surface, loc.x * sizeof(float4), loc.y);
}
static __global__ void createReflectRaysKernel(vec3 camPos, vec2i res,
cudaSurfaceObject_t posTex,
cudaSurfaceObject_t normalTex,
cudaSurfaceObject_t albedoTex,
cudaSurfaceObject_t materialTex,
RayIn* raysOut)
{
// Calculate surface coordinates
vec2u loc = vec2u(blockIdx.x * blockDim.x + threadIdx.x,
blockIdx.y * blockDim.y + threadIdx.y);
if (loc.x >= res.x || loc.y >= res.y) return;
// Read GBuffer
GBufferValue pixelVal = readGBuffer(posTex, normalTex, albedoTex, materialTex, loc);
// Calculate reflect direction
vec3 camDir = normalize(pixelVal.pos - camPos);
vec3 reflected = reflect(camDir, pixelVal.normal);
// Create ray
RayIn ray;
ray.setDir(reflected);
ray.setOrigin(pixelVal.pos);
ray.setMinDist(0.0001f);
ray.setMaxDist(FLT_MAX);
// Write ray to array
uint32_t id = loc.y * res.x + loc.x;
raysOut[id] = ray;
}
// Kernel cpu interfaces
// ------------------------------------------------------------------------------------------------
void launchCreateReflectRaysKernel(const CreateReflectRaysInput& input, RayIn* raysOut) noexcept
{
// Calculate number of threads and blocks to run
dim3 threadsPerBlock(8, 8);
dim3 numBlocks((input.res.x + threadsPerBlock.x - 1) / threadsPerBlock.x,
(input.res.y + threadsPerBlock.y - 1) / threadsPerBlock.y);
// Run cuda ray tracer kernel
createReflectRaysKernel<<<numBlocks, threadsPerBlock>>>(input.camPos, input.res,
input.posTex, input.normalTex,
input.albedoTex, input.materialTex,
raysOut);
CHECK_CUDA_ERROR(cudaGetLastError());
CHECK_CUDA_ERROR(cudaDeviceSynchronize());
}
} // namespace phe
|
1d03124d8a04088fe4d3ad611a335c464a1f2f40.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialSubSampling.cu"
#else
#include "../common.h"
void THNN_(SpatialSubSampling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
int kW, int kH,
int dW, int dH)
{
real *weight_data = THCTensor_(data)(state, weight);
real *bias_data = THCTensor_(data)(state, bias);
real *output_data;
real *input_data;
int nInputPlane = THCTensor_(size)(state, weight, 0);
THCUNN_assertSameGPU(state, 4, input, output, weight, bias);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
THArgCheck(input->size[0] == nInputPlane, 2, "invalid number of input planes");
THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCTensor_(newContiguous)(state, input);
input_data = THCTensor_(data)(state, input);
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
output_data = THCTensor_(data)(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run subsample kernel
hipLaunchKernelGGL(( subsample<real, accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
input_data, output_data, weight_data, bias_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
THCudaCheck(hipGetLastError());
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
THArgCheck(input->size[1] == nInputPlane, 2, "invalid number of input planes");
THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCTensor_(newContiguous)(state, input);
input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols);
output_data = THCTensor_(data)(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run subsample kernel
hipLaunchKernelGGL(( subsample<real, accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
input_data, output_data, weight_data, bias_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
THCudaCheck(hipGetLastError());
}
// clean
THCTensor_(free)(state, input);
}
void THNN_(SpatialSubSampling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
int kW, int kH,
int dW, int dH)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, weight, gradInput);
int nInputPlane = THCTensor_(size)(state, weight, 0);
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
real *weight_data = THCTensor_(data)(state, weight);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *gradInput_data;
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
gradInput_data = THCTensor_(data)(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
if (kH <= dH && kW <= dW) {
hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
gradInput_data, gradOutput_data, weight_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
hipLaunchKernelGGL(( subgradinputAtomic) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
gradInput_data, gradOutput_data, weight_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
THCudaCheck(hipGetLastError());
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
real *weight_data = THCTensor_(data)(state, weight);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *gradInput_data;
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
gradInput_data = THCTensor_(data)(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
if (kH <= dH && kW <= dW) {
hipLaunchKernelGGL(( subgradinput) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
gradInput_data, gradOutput_data, weight_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
hipLaunchKernelGGL(( subgradinputAtomic) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
gradInput_data, gradOutput_data, weight_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
THCudaCheck(hipGetLastError());
}
}
void THNN_(SpatialSubSampling_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
int kW, int kH,
int dW, int dH,
float scale)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradWeight, gradBias);
int nInputPlane = THCTensor_(size)(state, gradWeight, 0);
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
real *gradWeight_data = THCTensor_(data)(state, gradWeight);
real *gradBias_data = THCTensor_(data)(state, gradBias);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *input_data;
input = THCTensor_(newContiguous)(state, input);
input_data = THCTensor_(data)(state, input);
// cuda blocks & threads:
dim3 blocks(nInputPlane);
dim3 threads(32,8);
// run gradweight kernel
hipLaunchKernelGGL(( subgradweight<real, accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
input_data, gradOutput_data, gradWeight_data, gradBias_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale);
THCudaCheck(hipGetLastError());
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
real *gradWeight_data = THCTensor_(data)(state, gradWeight);
real *gradBias_data = THCTensor_(data)(state, gradBias);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *input_data;
input = THCTensor_(newContiguous)(state, input);
input_data = THCTensor_(data)(state, input);
// cuda blocks & threads:
dim3 blocks(nInputPlane);
dim3 threads(32,8);
// run gradweight kernel
long sl;
for (sl=0; sl<nbatch; sl++) {
hipLaunchKernelGGL(( subgradweight<real, accreal>) , dim3(blocks), dim3(threads), 0, THCState_getCurrentStream(state),
input_data + sl*input->stride[0],
gradOutput_data + sl*gradOutput->stride[0],
gradWeight_data, gradBias_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale);
}
THCudaCheck(hipGetLastError());
}
// clean
THCTensor_(free)(state, input);
}
#endif
|
1d03124d8a04088fe4d3ad611a335c464a1f2f40.cu
|
#ifndef THC_GENERIC_FILE
#define THC_GENERIC_FILE "generic/SpatialSubSampling.cu"
#else
#include "../common.h"
void THNN_(SpatialSubSampling_updateOutput)(
THCState *state,
THCTensor *input,
THCTensor *output,
THCTensor *weight,
THCTensor *bias,
int kW, int kH,
int dW, int dH)
{
real *weight_data = THCTensor_(data)(state, weight);
real *bias_data = THCTensor_(data)(state, bias);
real *output_data;
real *input_data;
int nInputPlane = THCTensor_(size)(state, weight, 0);
THCUNN_assertSameGPU(state, 4, input, output, weight, bias);
THArgCheck(input->nDimension == 3 || input->nDimension == 4, 2, "3D or 4D (batch) tensor expected");
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
THArgCheck(input->size[0] == nInputPlane, 2, "invalid number of input planes");
THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCTensor_(newContiguous)(state, input);
input_data = THCTensor_(data)(state, input);
THCTensor_(resize3d)(state, output, nInputPlane, nOutputRows, nOutputCols);
output_data = THCTensor_(data)(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run subsample kernel
subsample<real, accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (
input_data, output_data, weight_data, bias_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
THCudaCheck(cudaGetLastError());
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
long nOutputCols = (nInputCols - kW) / dW + 1;
long nOutputRows = (nInputRows - kH) / dH + 1;
THArgCheck(input->size[1] == nInputPlane, 2, "invalid number of input planes");
THArgCheck(nInputCols >= kW && nInputRows >= kH, 2, "input image smaller than kernel size");
input = THCTensor_(newContiguous)(state, input);
input_data = THCTensor_(data)(state, input);
THCTensor_(resize4d)(state, output, nbatch, nInputPlane, nOutputRows, nOutputCols);
output_data = THCTensor_(data)(state, output);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run subsample kernel
subsample<real, accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (
input_data, output_data, weight_data, bias_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
THCudaCheck(cudaGetLastError());
}
// clean
THCTensor_(free)(state, input);
}
void THNN_(SpatialSubSampling_updateGradInput)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradInput,
THCTensor *weight,
int kW, int kH,
int dW, int dH)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, weight, gradInput);
int nInputPlane = THCTensor_(size)(state, weight, 0);
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
real *weight_data = THCTensor_(data)(state, weight);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *gradInput_data;
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
gradInput_data = THCTensor_(data)(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
if (kH <= dH && kW <= dW) {
subgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (
gradInput_data, gradOutput_data, weight_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
subgradinputAtomic <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (
gradInput_data, gradOutput_data, weight_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
THCudaCheck(cudaGetLastError());
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
real *weight_data = THCTensor_(data)(state, weight);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *gradInput_data;
THCTensor_(resizeAs)(state, gradInput, input);
THCTensor_(zero)(state, gradInput);
gradInput_data = THCTensor_(data)(state, gradInput);
// cuda blocks & threads:
int yblocks = (int)(16L / nInputPlane);
yblocks = yblocks < 1 ? 1 : yblocks;
dim3 blocks(nInputPlane*nbatch,yblocks);
dim3 threads(32,8);
// run updateGradInput kernel
if (kH <= dH && kW <= dW) {
subgradinput <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (
gradInput_data, gradOutput_data, weight_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
} else {
subgradinputAtomic <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (
gradInput_data, gradOutput_data, weight_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW);
}
THCudaCheck(cudaGetLastError());
}
}
void THNN_(SpatialSubSampling_accGradParameters)(
THCState *state,
THCTensor *input,
THCTensor *gradOutput,
THCTensor *gradWeight,
THCTensor *gradBias,
int kW, int kH,
int dW, int dH,
float scale)
{
THCUNN_assertSameGPU(state, 4, input, gradOutput, gradWeight, gradBias);
int nInputPlane = THCTensor_(size)(state, gradWeight, 0);
if (input->nDimension == 3) {
long nInputCols = input->size[2];
long nInputRows = input->size[1];
real *gradWeight_data = THCTensor_(data)(state, gradWeight);
real *gradBias_data = THCTensor_(data)(state, gradBias);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *input_data;
input = THCTensor_(newContiguous)(state, input);
input_data = THCTensor_(data)(state, input);
// cuda blocks & threads:
dim3 blocks(nInputPlane);
dim3 threads(32,8);
// run gradweight kernel
subgradweight<real, accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (
input_data, gradOutput_data, gradWeight_data, gradBias_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale);
THCudaCheck(cudaGetLastError());
} else {
long nInputCols = input->size[3];
long nInputRows = input->size[2];
long nbatch = input->size[0];
real *gradWeight_data = THCTensor_(data)(state, gradWeight);
real *gradBias_data = THCTensor_(data)(state, gradBias);
real *gradOutput_data = THCTensor_(data)(state, gradOutput);
real *input_data;
input = THCTensor_(newContiguous)(state, input);
input_data = THCTensor_(data)(state, input);
// cuda blocks & threads:
dim3 blocks(nInputPlane);
dim3 threads(32,8);
// run gradweight kernel
long sl;
for (sl=0; sl<nbatch; sl++) {
subgradweight<real, accreal> <<<blocks, threads, 0, THCState_getCurrentStream(state)>>> (
input_data + sl*input->stride[0],
gradOutput_data + sl*gradOutput->stride[0],
gradWeight_data, gradBias_data,
nInputPlane, nInputRows, nInputCols, kH, kW, dH, dW, scale);
}
THCudaCheck(cudaGetLastError());
}
// clean
THCTensor_(free)(state, input);
}
#endif
|
aaa66b8572982b731f3972d46e099653c2160c4f.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
extern "C" {
#include "regex.h"
#include "nfa.h"
}
#define MAX_FILE_SIZE 1 << 30
const int CHUNK = 1 << 15;
const int MAX_CONTEXT_SIZE = 500;
const int N_RESULTS = 150;
const int MAX_THREADS_PER_BLOCK = 1024;
typedef struct search_result {
char* context;
int line;
} res;
__global__ void regex_kernel(char** contents, res*** results, const re_t __restrict__ pattern, int file_no){
int res_idx = 0;
int line = 1;
/* Local variables that keep track of the start and end of the context */
/* TODO: out_before: needs to be initialized by going back until previous newline is found */
int out_before = -1;
uint8_t matched = 0;
/* Read the ith file, check for pattern and write to result */
char* start = &(contents[file_no][threadIdx.x * CHUNK]);
res* result_loc = &(results[file_no][threadIdx.x][0]);
char c;
int i;
for(i = 0; i < threadIdx.x * CHUNK && *(start-i) != '\n' && *(start-i) != '\0'; i++);
out_before = -1 * i - (i == 0);
for(i = 0; i < CHUNK && ((c = *(start + i)) != '\0'); i++){
line += (c == '\n');
if(matched && (c == '\n')){
/* Copy context from the previous newline character to the present character */
/* NOTE: Each line is only counted once - irrespective of number of occurances */
/* TODO: allocated only 100 bytes of space - if it exceeds, do a check and malloc as necessary */
memcpy((result_loc + res_idx)->context, (void*)(start + out_before+1), i - out_before - 1);
(result_loc + res_idx)->line = line - 1;
res_idx += 1;
matched = 0;
}
/* Complicated way of avoiding control divergence to keep track of the previous new line occurance */
out_before = out_before * (c != '\n') + i * (c == '\n');
/* Need to remember whether some valid match occured on this line before - so || to not lose previous data */
matched += re_matchp(pattern, (start + i));
}
/* There might be some matched string still waiting to find its ending newline character */
if(matched){
for(; (c = *(start + i) != '\n' && c != '\0'); i++);
memcpy((result_loc + res_idx)->context, (void*)(start + out_before+1), i - out_before - 1);
(result_loc + res_idx)->line = line - 1;
}
}
extern "C" void regex_match(char** file_names, file_info* info, int n_files, char* pattern){
/* Copying file related data to device memory */
char** device_contents;
char** temp = (char**) malloc(n_files * sizeof(char*));
hipMalloc(&device_contents, n_files * sizeof(char*));
hipStream_t streams[n_files];
for(int i = 0; i < n_files; i++){
/* hipMalloc(&temp[i], MAX_FILE_SIZE * sizeof(char)); */
/* hipMemcpy(temp[i], info[i].mmap, info[i].size, hipMemcpyHostToDevice); */
/* hipMemcpy(device_contents + i, &(temp[i]), sizeof(char*), hipMemcpyHostToDevice); */
hipStreamCreate(&streams[i]);
hipHostRegister(info[i].mmap, info[i].size, 0);
hipHostMalloc(&temp[i], info[i].size);
hipMemcpyAsync(temp[i], info[i].mmap, info[i].size, hipMemcpyHostToDevice, streams[i]);
hipMemcpyAsync(device_contents + i, &(temp[i]), sizeof(char*), hipMemcpyHostToDevice, streams[i]);
}
re_t re_pattern = re_compile(pattern);
/* Creating an array of array of array of results: */
res*** results;
int* threads_size = (int*) malloc(n_files * sizeof(int));
/* First pointer to index the file being grepped */
hipMallocManaged(&results, n_files * sizeof(res**));
for(int i = 0; i < n_files; i++){
/* Second malloc to index the thread doing the computation */
int n_chunks = info[i].size/CHUNK + 1;
hipMallocManaged(&(results[i]), n_chunks * sizeof(res*));
threads_size[i] = n_chunks;
for(int j = 0; j < n_chunks; j++){
/* Third to index the result that the thread found */
/* TODO: Fourth to index the dynamic array for that result which will have a next pointer */
hipMallocManaged(&(results[i][j]), N_RESULTS * sizeof(res));
for(int k = 0; k < N_RESULTS; k++){
hipMallocManaged(&(results[i][j][k].context), MAX_CONTEXT_SIZE);
}
}
}
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
for(int i = 0; i < n_files; i++){
if(threads_size[i] > MAX_THREADS_PER_BLOCK){
hipEventRecord(start, streams[0]);
int n_blocks = threads_size[i]/MAX_THREADS_PER_BLOCK + 1;
hipLaunchKernelGGL(( regex_kernel) , dim3(n_blocks), dim3(MAX_THREADS_PER_BLOCK), 0, streams[i] , device_contents, results, re_pattern, i);
hipEventRecord(stop, streams[0]);
}
else{
hipLaunchKernelGGL(( regex_kernel) , dim3(1), dim3(threads_size[i]), 0, streams[i] , device_contents, results, re_pattern, i);
}
/* Unpinning the memory */
hipHostUnregister(info[i].mmap);
}
hipEventSynchronize(stop);
float milliseconds = 0;
hipEventElapsedTime(&milliseconds, start, stop);
printf("Kernel: %f\n", milliseconds);
hipDeviceSynchronize();
res result;
for(int i = 0; i < n_files; i++){
for(int j = 0; j < threads_size[i]; j++){
for(int k = 0; k < N_RESULTS; k++){
result = results[i][j][k];
if(result.line != 0)
printf("%s\n", result.context);
}
}
}
hipFree(results);
hipFree(device_contents);
hipFree(re_pattern);
/* printf("Kernel: %f\n", (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec)/(double)1000); */
}
|
aaa66b8572982b731f3972d46e099653c2160c4f.cu
|
#include <stdio.h>
#include <cuda.h>
#include <sys/time.h>
extern "C" {
#include "regex.h"
#include "nfa.h"
}
#define MAX_FILE_SIZE 1 << 30
const int CHUNK = 1 << 15;
const int MAX_CONTEXT_SIZE = 500;
const int N_RESULTS = 150;
const int MAX_THREADS_PER_BLOCK = 1024;
typedef struct search_result {
char* context;
int line;
} res;
__global__ void regex_kernel(char** contents, res*** results, const re_t __restrict__ pattern, int file_no){
int res_idx = 0;
int line = 1;
/* Local variables that keep track of the start and end of the context */
/* TODO: out_before: needs to be initialized by going back until previous newline is found */
int out_before = -1;
uint8_t matched = 0;
/* Read the ith file, check for pattern and write to result */
char* start = &(contents[file_no][threadIdx.x * CHUNK]);
res* result_loc = &(results[file_no][threadIdx.x][0]);
char c;
int i;
for(i = 0; i < threadIdx.x * CHUNK && *(start-i) != '\n' && *(start-i) != '\0'; i++);
out_before = -1 * i - (i == 0);
for(i = 0; i < CHUNK && ((c = *(start + i)) != '\0'); i++){
line += (c == '\n');
if(matched && (c == '\n')){
/* Copy context from the previous newline character to the present character */
/* NOTE: Each line is only counted once - irrespective of number of occurances */
/* TODO: allocated only 100 bytes of space - if it exceeds, do a check and malloc as necessary */
memcpy((result_loc + res_idx)->context, (void*)(start + out_before+1), i - out_before - 1);
(result_loc + res_idx)->line = line - 1;
res_idx += 1;
matched = 0;
}
/* Complicated way of avoiding control divergence to keep track of the previous new line occurance */
out_before = out_before * (c != '\n') + i * (c == '\n');
/* Need to remember whether some valid match occured on this line before - so || to not lose previous data */
matched += re_matchp(pattern, (start + i));
}
/* There might be some matched string still waiting to find its ending newline character */
if(matched){
for(; (c = *(start + i) != '\n' && c != '\0'); i++);
memcpy((result_loc + res_idx)->context, (void*)(start + out_before+1), i - out_before - 1);
(result_loc + res_idx)->line = line - 1;
}
}
extern "C" void regex_match(char** file_names, file_info* info, int n_files, char* pattern){
/* Copying file related data to device memory */
char** device_contents;
char** temp = (char**) malloc(n_files * sizeof(char*));
cudaMalloc(&device_contents, n_files * sizeof(char*));
cudaStream_t streams[n_files];
for(int i = 0; i < n_files; i++){
/* cudaMalloc(&temp[i], MAX_FILE_SIZE * sizeof(char)); */
/* cudaMemcpy(temp[i], info[i].mmap, info[i].size, cudaMemcpyHostToDevice); */
/* cudaMemcpy(device_contents + i, &(temp[i]), sizeof(char*), cudaMemcpyHostToDevice); */
cudaStreamCreate(&streams[i]);
cudaHostRegister(info[i].mmap, info[i].size, 0);
cudaMallocHost(&temp[i], info[i].size);
cudaMemcpyAsync(temp[i], info[i].mmap, info[i].size, cudaMemcpyHostToDevice, streams[i]);
cudaMemcpyAsync(device_contents + i, &(temp[i]), sizeof(char*), cudaMemcpyHostToDevice, streams[i]);
}
re_t re_pattern = re_compile(pattern);
/* Creating an array of array of array of results: */
res*** results;
int* threads_size = (int*) malloc(n_files * sizeof(int));
/* First pointer to index the file being grepped */
cudaMallocManaged(&results, n_files * sizeof(res**));
for(int i = 0; i < n_files; i++){
/* Second malloc to index the thread doing the computation */
int n_chunks = info[i].size/CHUNK + 1;
cudaMallocManaged(&(results[i]), n_chunks * sizeof(res*));
threads_size[i] = n_chunks;
for(int j = 0; j < n_chunks; j++){
/* Third to index the result that the thread found */
/* TODO: Fourth to index the dynamic array for that result which will have a next pointer */
cudaMallocManaged(&(results[i][j]), N_RESULTS * sizeof(res));
for(int k = 0; k < N_RESULTS; k++){
cudaMallocManaged(&(results[i][j][k].context), MAX_CONTEXT_SIZE);
}
}
}
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
for(int i = 0; i < n_files; i++){
if(threads_size[i] > MAX_THREADS_PER_BLOCK){
cudaEventRecord(start, streams[0]);
int n_blocks = threads_size[i]/MAX_THREADS_PER_BLOCK + 1;
regex_kernel <<< n_blocks, MAX_THREADS_PER_BLOCK, 0, streams[i] >>> (device_contents, results, re_pattern, i);
cudaEventRecord(stop, streams[0]);
}
else{
regex_kernel <<< 1, threads_size[i], 0, streams[i] >>> (device_contents, results, re_pattern, i);
}
/* Unpinning the memory */
cudaHostUnregister(info[i].mmap);
}
cudaEventSynchronize(stop);
float milliseconds = 0;
cudaEventElapsedTime(&milliseconds, start, stop);
printf("Kernel: %f\n", milliseconds);
cudaDeviceSynchronize();
res result;
for(int i = 0; i < n_files; i++){
for(int j = 0; j < threads_size[i]; j++){
for(int k = 0; k < N_RESULTS; k++){
result = results[i][j][k];
if(result.line != 0)
printf("%s\n", result.context);
}
}
}
cudaFree(results);
cudaFree(device_contents);
cudaFree(re_pattern);
/* printf("Kernel: %f\n", (end.tv_sec - start.tv_sec) * 1000 + (end.tv_usec - start.tv_usec)/(double)1000); */
}
|
52b0cb8bcb07d6f0522804c79fdc6470e64b946a.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <chrono>
#include <SDL2/SDL.h>
#include "Parametres.hpp"
#include "Mandel.hpp"
#include "Events.hpp"
#include "Affichage.hpp"
#include "BigFloat.hpp"
using namespace std;
int main(int argc, char** argv)
{
Affichage display;
if (display.initSDLAffichage() < 0)
return 0;
/* Calcul de la fractale */
Events::initialDisplay(&display);
/* Affichage de la fractale */
display.dessin();
/* Boucle des evenements */
bool quit = false;
SDL_Event event;
while (!quit)
{
SDL_WaitEvent(&event);
bool buttonDown;
switch (event.type)
{
case SDL_MOUSEBUTTONDOWN:
switch (event.button.button)
{
case SDL_BUTTON_LEFT:
buttonDown = true;
Events::clicGauche(event, &display);
while (buttonDown)
{
SDL_PumpEvents();
if (SDL_GetMouseState(&(event.button.x), &(event.button.y)) & SDL_BUTTON(SDL_BUTTON_LEFT)) {
Events::clicGauche(event, &display);
}
else {
buttonDown = false;
}
}
break;
case SDL_BUTTON_RIGHT:
buttonDown = true;
Events::clicDroit(event, &display);
while (buttonDown)
{
SDL_PumpEvents();
if (SDL_GetMouseState(&(event.button.x), &(event.button.y)) & SDL_BUTTON(SDL_BUTTON_RIGHT)) {
Events::clicDroit(event, &display);
}
else {
buttonDown = false;
}
}
break;
default:
SDL_ShowSimpleMessageBox(0, "Mouse", "Some other button was pressed!", display.win);
break;
}
break;
case SDL_QUIT:
quit = true;
break;
}
}
return 0;
}
|
52b0cb8bcb07d6f0522804c79fdc6470e64b946a.cu
|
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
#include <iostream>
#include <sstream>
#include <chrono>
#include <SDL2/SDL.h>
#include "Parametres.hpp"
#include "Mandel.hpp"
#include "Events.hpp"
#include "Affichage.hpp"
#include "BigFloat.hpp"
using namespace std;
int main(int argc, char** argv)
{
Affichage display;
if (display.initSDLAffichage() < 0)
return 0;
/* Calcul de la fractale */
Events::initialDisplay(&display);
/* Affichage de la fractale */
display.dessin();
/* Boucle des evenements */
bool quit = false;
SDL_Event event;
while (!quit)
{
SDL_WaitEvent(&event);
bool buttonDown;
switch (event.type)
{
case SDL_MOUSEBUTTONDOWN:
switch (event.button.button)
{
case SDL_BUTTON_LEFT:
buttonDown = true;
Events::clicGauche(event, &display);
while (buttonDown)
{
SDL_PumpEvents();
if (SDL_GetMouseState(&(event.button.x), &(event.button.y)) & SDL_BUTTON(SDL_BUTTON_LEFT)) {
Events::clicGauche(event, &display);
}
else {
buttonDown = false;
}
}
break;
case SDL_BUTTON_RIGHT:
buttonDown = true;
Events::clicDroit(event, &display);
while (buttonDown)
{
SDL_PumpEvents();
if (SDL_GetMouseState(&(event.button.x), &(event.button.y)) & SDL_BUTTON(SDL_BUTTON_RIGHT)) {
Events::clicDroit(event, &display);
}
else {
buttonDown = false;
}
}
break;
default:
SDL_ShowSimpleMessageBox(0, "Mouse", "Some other button was pressed!", display.win);
break;
}
break;
case SDL_QUIT:
quit = true;
break;
}
}
return 0;
}
|
19d231d146790e3cfdfa3d6ab8498843010d0a73.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "device_launch_parameters.h"
#include <hip/hip_runtime.h>
#include <hiprand/hiprand_kernel.h>
#include "htpysupport.cuh"
#include "rungekutta.cuh"
// Runge-Kutta settings
#define sinit 1. // Initial step size
#define smax 100. // Maximum step size
#define smin 0.000001 // Minimum step size
#define RKtol 0.001 // Runge-Kutta tolerance
#define cautionSteps 2 // After a step rejection, stepsize is not allowed to increase again until after a few successful steps
// Other settings
#define NewtIts 10 // Max number of Newton iterations during the corrector step
#define NewtTol 0.00000001 // Desired accuracy for constraint satisfaction during the Newton corrector step
#define NewtResidual 1 // 0 to use norm(H), 1 to use norm(Ydiff)
#define NewtAttempts 3 // Number of times to try Newton's method with different patches
#define SharpIts 30 // Max number of Newton iterations for final sharpening
#define SharpTol 0.000000000001 // Desired accuracy for constraint satisfaction for final sharpening
#define SharpResidual 1 // 0 to use norm(H), 1 to use norm(Ydiff)
#define SharpAttempts 3 // Number of times to try sharpening with different patches
#define SharpTrigger 0.00001 // How close to t = 1 we must be to trigger final sharpening
#define NudgeToOneTrigger 0.00001 // If t is very close to 1, push it a to one
#define maxSteps 2000 // max number of steps, counting each advancement of t
#define maxMinisteps 3000 // max number of ministeps, counting every call to either RK or Newton functions
// Constants
#define UpperRand 2. // upper limit of randomly generated numbers
#define LowerRand -2. // lower limit of randomly generated numbers
__device__ void TrackPath(Vect<nV> *spt, Vect<nP> *spm, Vect<nP> *fpm, Cplx *gamma,
hiprandState_t randState, Vect<nV + 2> *Yfinal, int *errorCode){
Vect<nV + 2> space[4];
Vect<nV + 2> *Vc = &space[0];
Vect<nV + 2> *Yn = &space[1];
Vect<nV + 2> *Vn = &space[2];
Vect<nV + 2> *YN = &space[3];
/*----------Initialization----------*/
// Initialize the tracked root Yc
Vect<nV + 2> *Yc = Yfinal;
#pragma unroll
for (int i = 0; i < nV; ++i){ Yc[0].vals[i] = spt[0].vals[i]; } // copy in start point values
Yc[0].vals[nV] = { 1, 0 }; // set homogeneous coordinate to 1
Yc[0].vals[nV + 1] = { 0, 0 }; // set t=0
Vect<nV + 2> u; // assign random patch values
#pragma unroll 1
for (int i = 0; i < nV + 2; ++i){
u.vals[i].x = (UpperRand - LowerRand)*hiprand_uniform_double(&randState) + LowerRand;
u.vals[i].y = (UpperRand - LowerRand)*hiprand_uniform_double(&randState) + LowerRand;
}
SwitchPatch(&u, Yc, Yc); // adjust Yc for the new patch
// assign a random velocity patch for initially computing Vc, the result is the same for any patch
#pragma unroll 1
for (int i = 0; i < nV + 2; ++i){
Vc[0].vals[i].x = (UpperRand - LowerRand)*hiprand_uniform_double(&randState) + LowerRand;
Vc[0].vals[i].y = (UpperRand - LowerRand)*hiprand_uniform_double(&randState) + LowerRand;
}
IVP(false, Yc, Vc, &u, spm, fpm, gamma, Vc); // compute initial velocity
Real s, err, scale, residual;
s = sinit; // initial step size
bool tstepQ = false; // track via arclength, not t
int streak = cautionSteps - 1; // streak of successful steps
int step = 0; // step number
int ministep = 0;
*errorCode = 0;
while (true){ // Begin main loop
if (step == maxSteps){ *errorCode = 1; return; }
while (true){ // Begin correction loop
while (true){ // Begin final steps loop
while (true){ // Begin predictor loop
// Perform Runge-Kutta prediction
DormandPrince65(s, RKtol, tstepQ, Yc, Vc, &u, spm, fpm, gamma, Yn, Vn, &err, &scale);
if (ministep++ == maxMinisteps){ *errorCode = 2; return; } // to prevent non-exiting loop
if (err <= RKtol){ break; } // If within tolerance, the prediction is accepted
// If it was unsuccessful, check for error condition
if (s <= smin){ *errorCode = 3; return; }
s *= scale;// If it was unsuccessful and there is no error condition, adjust the step size and try again
if (s < smin){ s = smin*0.9; }
streak = 0; // Mark the step rejection by setting the success streak to zero
} // End predictor loop
// If t is less than 0.99..., continue to Newton iterations
if (Yn[0].vals[nV + 1].x <= 1. - NudgeToOneTrigger){ break; }
// If t is very close to 1, head to sharpening
if (abs(1. - Yn[0].vals[nV + 1].x) < SharpTrigger){ break; }
// Set up a final prediction step to land on t = 1
tstepQ = true;
//s = 1 - Yc[0].vals[nV + 1].x - SharpTrigger*0.1;
s = 1 - Yc[0].vals[nV + 1].x;
} // End final steps loop
// If t is very close to 1, head to sharpening
if (abs(1. - Yn[0].vals[nV + 1].x) < SharpTrigger){ break; }
int i = 0; // Begin Newton patch switching loop
while (i < NewtAttempts){
// Perform Newton's method
*YN = *Yn; // set Newton start point
Newton(Yn[0].vals[nV + 1].x, Vn, &u, spm, fpm, gamma, NewtIts, NewtTol, NewtResidual, YN, &residual);
if (ministep++ == maxMinisteps){ *errorCode = 2; return; } // to prevent non-exiting loop
if (residual < NewtTol){ break; } // If Newton's method was successful, move on
// If not, generate a new projective patch, then try again
#pragma unroll 1
for (int j = 0; j < nV + 2; ++j){
u.vals[j].x = (UpperRand - LowerRand)*hiprand_uniform_double(&randState) + LowerRand;
u.vals[j].y = (UpperRand - LowerRand)*hiprand_uniform_double(&randState) + LowerRand;
}
SwitchPatch(&u, Yn, Yn); // update Yn for new patch
i++;
} // End Newton patch switching loop
if (residual < NewtTol){ break; } // If Newton's method was successful, move on
// If it was unsuccessful, check for error condition
if (s <= smin){ *errorCode = 4; return; }
s *= 0.5; // If it was unsuccessful and there is no error condition, then halve the stepsize and try again
if (s < smin) { s = smin*0.9; }
streak = 0; // Mark the step rejection by setting the success streak to zero
} // End correction loop
// If t is very close to 1, head to sharpening
if (abs(1. - Yn[0].vals[nV + 1].x) < SharpTrigger){ break; }
*Yc = *YN; // update position
*Vc = *Vn; // update velocity
streak++; // increment the streak of successes
if (streak < cautionSteps && scale > 1.){ scale = 1.; } // limit step scale if the streak of successes is too low
s *= scale; // update step size
if (s > smax){ s = smax; } // step size must obey limits
else if (s < smin){ s = smin*0.9; }
step++; // increment homotopy step number
} // End main loop
// Begin Sharpening
int i = 0;
while (i < SharpAttempts){ // Begin sharpening patch switching loop
// Perform final sharpening
*Yfinal = *Yn;
Newton(1., Vn, &u, spm, fpm, gamma, SharpIts, SharpTol, SharpResidual, Yfinal, &residual); // consider normalizing Vn
if (ministep++ == maxMinisteps){ *errorCode = 2; return; } // to prevent non-exiting loop
if (residual < SharpTol){ break; } // Check if sharpening was successful
// If not, generate a new projective patch, then try again
#pragma unroll 1
for (int j = 0; j < nV + 2; ++j){
u.vals[j].x = (UpperRand - LowerRand)*hiprand_uniform_double(&randState) + LowerRand;
u.vals[j].y = (UpperRand - LowerRand)*hiprand_uniform_double(&randState) + LowerRand;
}
SwitchPatch(&u, Yn, Yn); // update Yn for new patch
i++;
} // End sharpening patch switching loop
// Check if sharpening was successful
if (residual >= SharpTol){ *errorCode = 5; return; }
}
|
19d231d146790e3cfdfa3d6ab8498843010d0a73.cu
|
#include "device_launch_parameters.h"
#include <cuda_runtime.h>
#include <curand_kernel.h>
#include "htpysupport.cuh"
#include "rungekutta.cuh"
// Runge-Kutta settings
#define sinit 1. // Initial step size
#define smax 100. // Maximum step size
#define smin 0.000001 // Minimum step size
#define RKtol 0.001 // Runge-Kutta tolerance
#define cautionSteps 2 // After a step rejection, stepsize is not allowed to increase again until after a few successful steps
// Other settings
#define NewtIts 10 // Max number of Newton iterations during the corrector step
#define NewtTol 0.00000001 // Desired accuracy for constraint satisfaction during the Newton corrector step
#define NewtResidual 1 // 0 to use norm(H), 1 to use norm(Ydiff)
#define NewtAttempts 3 // Number of times to try Newton's method with different patches
#define SharpIts 30 // Max number of Newton iterations for final sharpening
#define SharpTol 0.000000000001 // Desired accuracy for constraint satisfaction for final sharpening
#define SharpResidual 1 // 0 to use norm(H), 1 to use norm(Ydiff)
#define SharpAttempts 3 // Number of times to try sharpening with different patches
#define SharpTrigger 0.00001 // How close to t = 1 we must be to trigger final sharpening
#define NudgeToOneTrigger 0.00001 // If t is very close to 1, push it a to one
#define maxSteps 2000 // max number of steps, counting each advancement of t
#define maxMinisteps 3000 // max number of ministeps, counting every call to either RK or Newton functions
// Constants
#define UpperRand 2. // upper limit of randomly generated numbers
#define LowerRand -2. // lower limit of randomly generated numbers
__device__ void TrackPath(Vect<nV> *spt, Vect<nP> *spm, Vect<nP> *fpm, Cplx *gamma,
curandState randState, Vect<nV + 2> *Yfinal, int *errorCode){
Vect<nV + 2> space[4];
Vect<nV + 2> *Vc = &space[0];
Vect<nV + 2> *Yn = &space[1];
Vect<nV + 2> *Vn = &space[2];
Vect<nV + 2> *YN = &space[3];
/*----------Initialization----------*/
// Initialize the tracked root Yc
Vect<nV + 2> *Yc = Yfinal;
#pragma unroll
for (int i = 0; i < nV; ++i){ Yc[0].vals[i] = spt[0].vals[i]; } // copy in start point values
Yc[0].vals[nV] = { 1, 0 }; // set homogeneous coordinate to 1
Yc[0].vals[nV + 1] = { 0, 0 }; // set t=0
Vect<nV + 2> u; // assign random patch values
#pragma unroll 1
for (int i = 0; i < nV + 2; ++i){
u.vals[i].x = (UpperRand - LowerRand)*curand_uniform_double(&randState) + LowerRand;
u.vals[i].y = (UpperRand - LowerRand)*curand_uniform_double(&randState) + LowerRand;
}
SwitchPatch(&u, Yc, Yc); // adjust Yc for the new patch
// assign a random velocity patch for initially computing Vc, the result is the same for any patch
#pragma unroll 1
for (int i = 0; i < nV + 2; ++i){
Vc[0].vals[i].x = (UpperRand - LowerRand)*curand_uniform_double(&randState) + LowerRand;
Vc[0].vals[i].y = (UpperRand - LowerRand)*curand_uniform_double(&randState) + LowerRand;
}
IVP(false, Yc, Vc, &u, spm, fpm, gamma, Vc); // compute initial velocity
Real s, err, scale, residual;
s = sinit; // initial step size
bool tstepQ = false; // track via arclength, not t
int streak = cautionSteps - 1; // streak of successful steps
int step = 0; // step number
int ministep = 0;
*errorCode = 0;
while (true){ // Begin main loop
if (step == maxSteps){ *errorCode = 1; return; }
while (true){ // Begin correction loop
while (true){ // Begin final steps loop
while (true){ // Begin predictor loop
// Perform Runge-Kutta prediction
DormandPrince65(s, RKtol, tstepQ, Yc, Vc, &u, spm, fpm, gamma, Yn, Vn, &err, &scale);
if (ministep++ == maxMinisteps){ *errorCode = 2; return; } // to prevent non-exiting loop
if (err <= RKtol){ break; } // If within tolerance, the prediction is accepted
// If it was unsuccessful, check for error condition
if (s <= smin){ *errorCode = 3; return; }
s *= scale;// If it was unsuccessful and there is no error condition, adjust the step size and try again
if (s < smin){ s = smin*0.9; }
streak = 0; // Mark the step rejection by setting the success streak to zero
} // End predictor loop
// If t is less than 0.99..., continue to Newton iterations
if (Yn[0].vals[nV + 1].x <= 1. - NudgeToOneTrigger){ break; }
// If t is very close to 1, head to sharpening
if (abs(1. - Yn[0].vals[nV + 1].x) < SharpTrigger){ break; }
// Set up a final prediction step to land on t = 1
tstepQ = true;
//s = 1 - Yc[0].vals[nV + 1].x - SharpTrigger*0.1;
s = 1 - Yc[0].vals[nV + 1].x;
} // End final steps loop
// If t is very close to 1, head to sharpening
if (abs(1. - Yn[0].vals[nV + 1].x) < SharpTrigger){ break; }
int i = 0; // Begin Newton patch switching loop
while (i < NewtAttempts){
// Perform Newton's method
*YN = *Yn; // set Newton start point
Newton(Yn[0].vals[nV + 1].x, Vn, &u, spm, fpm, gamma, NewtIts, NewtTol, NewtResidual, YN, &residual);
if (ministep++ == maxMinisteps){ *errorCode = 2; return; } // to prevent non-exiting loop
if (residual < NewtTol){ break; } // If Newton's method was successful, move on
// If not, generate a new projective patch, then try again
#pragma unroll 1
for (int j = 0; j < nV + 2; ++j){
u.vals[j].x = (UpperRand - LowerRand)*curand_uniform_double(&randState) + LowerRand;
u.vals[j].y = (UpperRand - LowerRand)*curand_uniform_double(&randState) + LowerRand;
}
SwitchPatch(&u, Yn, Yn); // update Yn for new patch
i++;
} // End Newton patch switching loop
if (residual < NewtTol){ break; } // If Newton's method was successful, move on
// If it was unsuccessful, check for error condition
if (s <= smin){ *errorCode = 4; return; }
s *= 0.5; // If it was unsuccessful and there is no error condition, then halve the stepsize and try again
if (s < smin) { s = smin*0.9; }
streak = 0; // Mark the step rejection by setting the success streak to zero
} // End correction loop
// If t is very close to 1, head to sharpening
if (abs(1. - Yn[0].vals[nV + 1].x) < SharpTrigger){ break; }
*Yc = *YN; // update position
*Vc = *Vn; // update velocity
streak++; // increment the streak of successes
if (streak < cautionSteps && scale > 1.){ scale = 1.; } // limit step scale if the streak of successes is too low
s *= scale; // update step size
if (s > smax){ s = smax; } // step size must obey limits
else if (s < smin){ s = smin*0.9; }
step++; // increment homotopy step number
} // End main loop
// Begin Sharpening
int i = 0;
while (i < SharpAttempts){ // Begin sharpening patch switching loop
// Perform final sharpening
*Yfinal = *Yn;
Newton(1., Vn, &u, spm, fpm, gamma, SharpIts, SharpTol, SharpResidual, Yfinal, &residual); // consider normalizing Vn
if (ministep++ == maxMinisteps){ *errorCode = 2; return; } // to prevent non-exiting loop
if (residual < SharpTol){ break; } // Check if sharpening was successful
// If not, generate a new projective patch, then try again
#pragma unroll 1
for (int j = 0; j < nV + 2; ++j){
u.vals[j].x = (UpperRand - LowerRand)*curand_uniform_double(&randState) + LowerRand;
u.vals[j].y = (UpperRand - LowerRand)*curand_uniform_double(&randState) + LowerRand;
}
SwitchPatch(&u, Yn, Yn); // update Yn for new patch
i++;
} // End sharpening patch switching loop
// Check if sharpening was successful
if (residual >= SharpTol){ *errorCode = 5; return; }
}
|
c9b1ddccb39438c8d28625bbd1e2fce5e56202d6.hip
|
// !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <utils/tools.hpp>
#include <utils/common.hpp>
#include <processing/process_normalized.hpp>
#include <chrono/chronoGPU.hpp>
namespace process {
__device__ float4 normalizeRGB(float r, float g, float b, float o) {
float4 RGBColorNormalized = make_float4(r / 255.f, g / 255.f, b / 255.f, o / 255.f);
return RGBColorNormalized;
}
__global__ void
normalizePixel(const size_t imgWidth, const size_t imgHeight, float4 *output) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
uint32_t idy = (blockIdx.y * blockDim.y + threadIdx.y);
uint32_t gridBlockDimX = gridDim.x * blockDim.x;
uint32_t gridBlockDimY = gridDim.y * blockDim.y;
float4 RGBcolorNomalized = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (idx < imgWidth && idy < imgHeight) {
for (uint32_t x = idy; x < imgHeight; x += gridBlockDimX) {
for (uint32_t y = idx; y < imgWidth; y += gridBlockDimY) {
uchar4 imgInput = tex2D<uchar4>(texInput, float(y), float(x));
RGBcolorNomalized = normalizeRGB(float(imgInput.x), float(imgInput.y), float(imgInput.z),
float(imgInput.w));
const uint32_t idOut = x * imgWidth + y;
output[idOut] = RGBcolorNomalized;
}
}
}
}
void processNormalizer(const std::vector<uchar4> &inputImg, // Input image
const uint imgWidth, const uint imgHeight, // Image size
std::vector<float4> &output) {
uchar4 *dev_inputU4 = nullptr;
float4 *dev_outputF4 = nullptr;
chrono::ChronoGPU chrGPU;
const size_t ImgSize = imgHeight * imgWidth;
size_t ImgBytes = ImgSize * sizeof(float4);
size_t width = imgWidth;
size_t height = imgHeight;
size_t widthBytes = width * sizeof(uchar4);
size_t offset = 0;
size_t pitch;
size_t spitch = widthBytes;
texInput.addressMode[0] = hipAddressModeBorder;
texInput.addressMode[1] = hipAddressModeBorder;
texInput.filterMode = hipFilterModePoint;
texInput.normalized = false;
hipChannelFormatDesc channelDesc = hipCreateChannelDesc<uchar4>();
const uint32_t blockSizeX = (imgWidth % 32 == 0 ? imgWidth / 32 : imgWidth / 32 + 1);
const uint32_t blockSizeY = (imgHeight % 32 == 0 ? imgHeight / 32 : imgHeight / 32 + 1);
/*********************************************************************************/
std::cout << "Allocating arrays: " << (ImgBytes >> 20) << " MB on Device" << std::endl;
chrGPU.start();
HANDLE_ERROR(hipMalloc((void **) &dev_outputF4, ImgBytes));
HANDLE_ERROR(hipMallocPitch((void **) &dev_inputU4, &pitch, widthBytes, height));
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl;
/*********************************************************************************/
/*********************************************************************************/
std::cout << "Copy data from host to devices (input arrays) " << (ImgBytes >> 20) << " MB on Device"
<< std::endl;
/*for (auto &it : inputImg){
std::cout << static_cast<int>(it.x) << " " << static_cast<int>(it.y) << " " << static_cast<int>(it.z) << std::endl;
}*/
chrGPU.start();
HANDLE_ERROR(hipMemcpy2D((void **) dev_inputU4, pitch, (void **) inputImg.data(), spitch, widthBytes, height,
hipMemcpyHostToDevice));
chrGPU.stop();
std::cout << "Bind 2D Texture with devices Input " << (ImgBytes >> 20) << " MB on Device" << std::endl;
chrGPU.start();
HANDLE_ERROR(hipBindTexture2D(&offset, texInput, dev_inputU4, channelDesc, width, height,
pitch)); // pitch instead ImgBytes
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl;
/*********************************************************************************/
/*********************************************************************************/
std::cout << "Process on GPU -- Kernel " << std::endl;
std::cout << "width : " << width << " height : " << height << std::endl;
chrGPU.start();
hipLaunchKernelGGL(( normalizePixel) , dim3(dim3(blockSizeX, blockSizeY)), dim3(dim3(32, 32)) , 0, 0, width, height, dev_outputF4);
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl;
hipDeviceSynchronize();
/*********************************************************************************/
std::cout << "Copy data from devices to host (output arrays) " << (ImgBytes >> 20) << " MB on Device"
<< std::endl;
//chrGPU.start();
HANDLE_ERROR(hipMemcpy(output.data(), dev_outputF4, ImgBytes, hipMemcpyDeviceToHost));
//chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl;
/***********************************************outputArray**********************************/
/**FREE**AND**UNBIND**/
hipUnbindTexture(texInput);
hipFree(dev_inputU4);
hipFree(dev_outputF4);
}
}
|
c9b1ddccb39438c8d28625bbd1e2fce5e56202d6.cu
|
#include <iostream>
#include <utils/tools.hpp>
#include <utils/common.hpp>
#include <processing/process_normalized.hpp>
#include <chrono/chronoGPU.hpp>
namespace process {
__device__ float4 normalizeRGB(float r, float g, float b, float o) {
float4 RGBColorNormalized = make_float4(r / 255.f, g / 255.f, b / 255.f, o / 255.f);
return RGBColorNormalized;
}
__global__ void
normalizePixel(const size_t imgWidth, const size_t imgHeight, float4 *output) {
uint32_t idx = (blockIdx.x * blockDim.x + threadIdx.x);
uint32_t idy = (blockIdx.y * blockDim.y + threadIdx.y);
uint32_t gridBlockDimX = gridDim.x * blockDim.x;
uint32_t gridBlockDimY = gridDim.y * blockDim.y;
float4 RGBcolorNomalized = make_float4(0.0f, 0.0f, 0.0f, 0.0f);
if (idx < imgWidth && idy < imgHeight) {
for (uint32_t x = idy; x < imgHeight; x += gridBlockDimX) {
for (uint32_t y = idx; y < imgWidth; y += gridBlockDimY) {
uchar4 imgInput = tex2D<uchar4>(texInput, float(y), float(x));
RGBcolorNomalized = normalizeRGB(float(imgInput.x), float(imgInput.y), float(imgInput.z),
float(imgInput.w));
const uint32_t idOut = x * imgWidth + y;
output[idOut] = RGBcolorNomalized;
}
}
}
}
void processNormalizer(const std::vector<uchar4> &inputImg, // Input image
const uint imgWidth, const uint imgHeight, // Image size
std::vector<float4> &output) {
uchar4 *dev_inputU4 = nullptr;
float4 *dev_outputF4 = nullptr;
chrono::ChronoGPU chrGPU;
const size_t ImgSize = imgHeight * imgWidth;
size_t ImgBytes = ImgSize * sizeof(float4);
size_t width = imgWidth;
size_t height = imgHeight;
size_t widthBytes = width * sizeof(uchar4);
size_t offset = 0;
size_t pitch;
size_t spitch = widthBytes;
texInput.addressMode[0] = cudaAddressModeBorder;
texInput.addressMode[1] = cudaAddressModeBorder;
texInput.filterMode = cudaFilterModePoint;
texInput.normalized = false;
cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<uchar4>();
const uint32_t blockSizeX = (imgWidth % 32 == 0 ? imgWidth / 32 : imgWidth / 32 + 1);
const uint32_t blockSizeY = (imgHeight % 32 == 0 ? imgHeight / 32 : imgHeight / 32 + 1);
/*********************************************************************************/
std::cout << "Allocating arrays: " << (ImgBytes >> 20) << " MB on Device" << std::endl;
chrGPU.start();
HANDLE_ERROR(cudaMalloc((void **) &dev_outputF4, ImgBytes));
HANDLE_ERROR(cudaMallocPitch((void **) &dev_inputU4, &pitch, widthBytes, height));
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl;
/*********************************************************************************/
/*********************************************************************************/
std::cout << "Copy data from host to devices (input arrays) " << (ImgBytes >> 20) << " MB on Device"
<< std::endl;
/*for (auto &it : inputImg){
std::cout << static_cast<int>(it.x) << " " << static_cast<int>(it.y) << " " << static_cast<int>(it.z) << std::endl;
}*/
chrGPU.start();
HANDLE_ERROR(cudaMemcpy2D((void **) dev_inputU4, pitch, (void **) inputImg.data(), spitch, widthBytes, height,
cudaMemcpyHostToDevice));
chrGPU.stop();
std::cout << "Bind 2D Texture with devices Input " << (ImgBytes >> 20) << " MB on Device" << std::endl;
chrGPU.start();
HANDLE_ERROR(cudaBindTexture2D(&offset, texInput, dev_inputU4, channelDesc, width, height,
pitch)); // pitch instead ImgBytes
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl;
/*********************************************************************************/
/*********************************************************************************/
std::cout << "Process on GPU -- Kernel " << std::endl;
std::cout << "width : " << width << " height : " << height << std::endl;
chrGPU.start();
normalizePixel <<< dim3(blockSizeX, blockSizeY), dim3(32, 32) >>>(width, height, dev_outputF4);
chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl;
cudaDeviceSynchronize();
/*********************************************************************************/
std::cout << "Copy data from devices to host (output arrays) " << (ImgBytes >> 20) << " MB on Device"
<< std::endl;
//chrGPU.start();
HANDLE_ERROR(cudaMemcpy(output.data(), dev_outputF4, ImgBytes, cudaMemcpyDeviceToHost));
//chrGPU.stop();
std::cout << " -> Done : " << chrGPU.elapsedTime() << " ms" << std::endl << std::endl;
/***********************************************outputArray**********************************/
/**FREE**AND**UNBIND**/
cudaUnbindTexture(texInput);
cudaFree(dev_inputU4);
cudaFree(dev_outputF4);
}
}
|
26e2ec9560ebb9fbf5189c3adfcc8248f1ed8124.hip
|
// !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2015-2022 by XGBoost Contributors
* \file regression_obj.cu
* \brief Definition of single-value regression and classification objectives.
* \author Tianqi Chen, Kailong Chen
*/
#include <dmlc/omp.h>
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <xgboost/tree_model.h>
#include <cmath>
#include <memory>
#include <vector>
#include "../common/common.h"
#include "../common/linalg_op.h"
#include "../common/pseudo_huber.h"
#include "../common/threading_utils.h"
#include "../common/transform.h"
#include "./regression_loss.h"
#include "adaptive.h"
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/linalg.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#if defined(XGBOOST_USE_CUDA)
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#endif // defined(XGBOOST_USE_CUDA)
namespace xgboost {
namespace obj {
namespace {
void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) {
CHECK_EQ(info.labels.Shape(0), info.num_row_) << "Invalid shape of labels.";
CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels.";
if (!info.weights_.Empty()) {
CHECK_EQ(info.weights_.Size(), info.num_row_)
<< "Number of weights should be equal to number of data points.";
}
}
} // anonymous namespace
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(regression_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct RegLossParam : public XGBoostParameter<RegLossParam> {
float scale_pos_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(RegLossParam) {
DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f)
.describe("Scale the weight of positive examples by this factor");
}
};
template<typename Loss>
class RegLossObj : public ObjFunction {
protected:
HostDeviceVector<float> additional_input_;
public:
// 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight
RegLossObj(): additional_input_(3) {}
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return Loss::Info(); }
uint32_t Targets(MetaInfo const& info) const override {
// Multi-target regression.
return ::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag
bool is_null_weight = info.weights_.Size() == 0;
auto scale_pos_weight = param_.scale_pos_weight;
additional_input_.HostVector().begin()[1] = scale_pos_weight;
additional_input_.HostVector().begin()[2] = is_null_weight;
const size_t nthreads = ctx_->Threads();
bool on_device = device >= 0;
// On CPU we run the transformation each thread processing a contigious block of data
// for better performance.
const size_t n_data_blocks = ::max(static_cast<size_t>(1), (on_device ? ndata : nthreads));
const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks);
auto const n_targets = ::max(info.labels.Shape(1), static_cast<size_t>(1));
common::Transform<>::Init(
[block_size, ndata, n_targets] XGBOOST_DEVICE(
size_t data_block_idx, common::Span<float> _additional_input,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
const bst_float* preds_ptr = _preds.data();
const bst_float* labels_ptr = _labels.data();
const bst_float* weights_ptr = _weights.data();
GradientPair* out_gpair_ptr = _out_gpair.data();
const size_t begin = data_block_idx*block_size;
const size_t end = ::min(ndata, begin + block_size);
const float _scale_pos_weight = _additional_input[1];
const bool _is_null_weight = _additional_input[2];
for (size_t idx = begin; idx < end; ++idx) {
bst_float p = Loss::PredTransform(preds_ptr[idx]);
bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets];
bst_float label = labels_ptr[idx];
if (label == 1.0f) {
w *= _scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
_additional_input[0] = 0;
}
out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
}
},
common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device)
.Eval(&additional_input_, out_gpair, &preds, info.labels.Data(),
&info.weights_);
auto const flag = additional_input_.HostVector().begin()[0];
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
public:
const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
void PredTransform(HostDeviceVector<float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(Loss::Name());
out["reg_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["reg_loss_param"], ¶m_);
}
protected:
RegLossParam param_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(RegLossParam);
XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name())
.describe("Regression with squared error.")
.set_body([]() { return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name())
.describe("Regression with root mean squared logarithmic error.")
.set_body([]() { return new RegLossObj<SquaredLogError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name())
.describe("Logistic regression for probability regression task.")
.set_body([]() { return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name())
.describe("Logistic regression for binary classification task.")
.set_body([]() { return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name())
.describe("Logistic regression for classification, output score "
"before logistic transformation.")
.set_body([]() { return new RegLossObj<LogisticRaw>(); });
// Deprecated functions
XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear")
.describe("Regression with squared error.")
.set_body([]() {
LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror.";
return new RegLossObj<LinearSquareLoss>(); });
// End deprecated
class PseudoHuberRegression : public ObjFunction {
PesudoHuberParam param_;
public:
void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); }
ObjInfo Task() const override { return ObjInfo::kRegression; }
uint32_t Targets(MetaInfo const& info) const override {
return ::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto slope = param_.huber_slope;
CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0.";
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable {
auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape()));
const float z = predt(i) - y;
const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope));
float grad = z / scale_sqrt;
auto scale = common::Sqr(slope) + common::Sqr(z);
float hess = common::Sqr(slope) / (scale * scale_sqrt);
auto w = weight[sample_id];
gpair(i) = {grad * w, hess * w};
});
}
const char* DefaultEvalMetric() const override { return "mphe"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:pseudohubererror");
out["pseudo_huber_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
if (config.find("pseudo_huber_param") == config.cend()) {
// The parameter is added in 1.6.
return;
}
FromJson(in["pseudo_huber_param"], ¶m_);
}
};
XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror")
.describe("Regression Pseudo Huber error.")
.set_body([]() { return new PseudoHuberRegression(); });
// declare parameter
struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> {
float max_delta_step;
DMLC_DECLARE_PARAMETER(PoissonRegressionParam) {
DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f)
.describe("Maximum delta step we allow each weight estimation to be." \
" This parameter is required for possion regression.");
}
};
// poisson regression for count
class PoissonRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
bst_float max_delta_step = param_.max_delta_step;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair{(expf(p) - y) * w,
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "PoissonRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "poisson-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("count:poisson");
out["poisson_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["poisson_regression_param"], ¶m_);
}
private:
PoissonRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(PoissonRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
.describe("Poisson regression for count data.")
.set_body([]() { return new PoissonRegression(); });
// cox regression for survival data (negative values mean they are censored)
class CoxRegression : public ObjFunction {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const auto& preds_h = preds.HostVector();
out_gpair->Resize(preds_h.size());
auto& gpair = out_gpair->HostVector();
const std::vector<size_t> &label_order = info.LabelAbsSort();
const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*)
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
// pre-compute a sum
double exp_p_sum = 0; // we use double because we might need the precision with large datasets
for (omp_ulong i = 0; i < ndata; ++i) {
exp_p_sum += ::exp(preds_h[label_order[i]]);
}
// start calculating grad and hess
const auto& labels = info.labels.HostView();
double r_k = 0;
double s_k = 0;
double last_exp_p = 0.0;
double last_abs_y = 0.0;
double accumulated_sum = 0;
for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*)
const size_t ind = label_order[i];
const double p = preds_h[ind];
const double exp_p = ::exp(p);
const double w = info.GetWeight(ind);
const double y = labels(ind);
const double abs_y = std::abs(y);
// only update the denominator after we move forward in time (labels are sorted)
// this is Breslow's method for ties
accumulated_sum += last_exp_p;
if (last_abs_y < abs_y) {
exp_p_sum -= accumulated_sum;
accumulated_sum = 0;
} else {
CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " <<
"MetaInfo::LabelArgsort failed!";
}
if (y > 0) {
r_k += 1.0/exp_p_sum;
s_k += 1.0/(exp_p_sum*exp_p_sum);
}
const double grad = exp_p*r_k - static_cast<bst_float>(y > 0);
const double hess = exp_p*r_k - exp_p*exp_p * s_k;
gpair.at(ind) = GradientPair(grad * w, hess * w);
last_abs_y = abs_y;
last_exp_p = exp_p;
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*)
preds[j] = ::exp(preds[j]);
});
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "cox-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:cox");
}
void LoadConfig(Json const&) override {}
};
// register the objective function
XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
.describe("Cox regression for censored survival data (negative labels are considered censored).")
.set_body([]() { return new CoxRegression(); });
// gamma regression
class GammaRegression : public ObjFunction {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto device = ctx_->gpu_id;
out_gpair->Resize(ndata);
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y <= 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "GammaRegression: label must be positive.";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "gamma-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:gamma");
}
void LoadConfig(Json const&) override {}
private:
HostDeviceVector<int> label_correct_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma")
.describe("Gamma regression for severity data.")
.set_body([]() { return new GammaRegression(); });
// declare parameter
struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> {
float tweedie_variance_power;
DMLC_DECLARE_PARAMETER(TweedieRegressionParam) {
DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f)
.describe("Tweedie variance power. Must be between in range [1, 2).");
}
};
// tweedie regression
class TweedieRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
std::ostringstream os;
os << "tweedie-nloglik@" << param_.tweedie_variance_power;
metric_ = os.str();
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
const float rho = param_.tweedie_variance_power;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p);
bst_float hess =
-y * (1 - rho) * \
::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p);
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device)
.Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "TweedieRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return ::log(base_score);
}
const char* DefaultEvalMetric() const override {
return metric_.c_str();
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:tweedie");
out["tweedie_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["tweedie_regression_param"], ¶m_);
}
private:
std::string metric_;
TweedieRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(TweedieRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie")
.describe("Tweedie regression for insurance data.")
.set_body([]() { return new TweedieRegression(); });
class MeanAbsoluteError : public ObjFunction {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable {
auto sign = [](auto x) {
return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0));
};
auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape()));
auto grad = sign(predt(i) - y) * weight[i];
auto hess = weight[sample_id];
gpair(i) = GradientPair{grad, hess};
});
}
void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info,
HostDeviceVector<float> const& prediction, RegTree* p_tree) const override {
if (ctx_->IsCPU()) {
auto const& h_position = position.ConstHostVector();
detail::UpdateTreeLeafHost(ctx_, h_position, info, prediction, 0.5, p_tree);
} else {
#if defined(XGBOOST_USE_CUDA)
position.SetDevice(ctx_->gpu_id);
auto d_position = position.ConstDeviceSpan();
detail::UpdateTreeLeafDevice(ctx_, d_position, info, prediction, 0.5, p_tree);
#else
common::AssertGPUSupport();
#endif // defined(XGBOOST_USE_CUDA)
}
}
const char* DefaultEvalMetric() const override { return "mae"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:absoluteerror");
}
void LoadConfig(Json const& in) override {
CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"});
}
};
XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror")
.describe("Mean absoluate error.")
.set_body([]() { return new MeanAbsoluteError(); });
} // namespace obj
} // namespace xgboost
|
26e2ec9560ebb9fbf5189c3adfcc8248f1ed8124.cu
|
/*!
* Copyright 2015-2022 by XGBoost Contributors
* \file regression_obj.cu
* \brief Definition of single-value regression and classification objectives.
* \author Tianqi Chen, Kailong Chen
*/
#include <dmlc/omp.h>
#include <xgboost/logging.h>
#include <xgboost/objective.h>
#include <xgboost/tree_model.h>
#include <cmath>
#include <memory>
#include <vector>
#include "../common/common.h"
#include "../common/linalg_op.h"
#include "../common/pseudo_huber.h"
#include "../common/threading_utils.h"
#include "../common/transform.h"
#include "./regression_loss.h"
#include "adaptive.h"
#include "xgboost/base.h"
#include "xgboost/data.h"
#include "xgboost/generic_parameters.h"
#include "xgboost/host_device_vector.h"
#include "xgboost/json.h"
#include "xgboost/linalg.h"
#include "xgboost/parameter.h"
#include "xgboost/span.h"
#if defined(XGBOOST_USE_CUDA)
#include "../common/device_helpers.cuh"
#include "../common/linalg_op.cuh"
#endif // defined(XGBOOST_USE_CUDA)
namespace xgboost {
namespace obj {
namespace {
void CheckRegInputs(MetaInfo const& info, HostDeviceVector<bst_float> const& preds) {
CHECK_EQ(info.labels.Shape(0), info.num_row_) << "Invalid shape of labels.";
CHECK_EQ(info.labels.Size(), preds.Size()) << "Invalid shape of labels.";
if (!info.weights_.Empty()) {
CHECK_EQ(info.weights_.Size(), info.num_row_)
<< "Number of weights should be equal to number of data points.";
}
}
} // anonymous namespace
#if defined(XGBOOST_USE_CUDA)
DMLC_REGISTRY_FILE_TAG(regression_obj_gpu);
#endif // defined(XGBOOST_USE_CUDA)
struct RegLossParam : public XGBoostParameter<RegLossParam> {
float scale_pos_weight;
// declare parameters
DMLC_DECLARE_PARAMETER(RegLossParam) {
DMLC_DECLARE_FIELD(scale_pos_weight).set_default(1.0f).set_lower_bound(0.0f)
.describe("Scale the weight of positive examples by this factor");
}
};
template<typename Loss>
class RegLossObj : public ObjFunction {
protected:
HostDeviceVector<float> additional_input_;
public:
// 0 - label_correct flag, 1 - scale_pos_weight, 2 - is_null_weight
RegLossObj(): additional_input_(3) {}
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return Loss::Info(); }
uint32_t Targets(MetaInfo const& info) const override {
// Multi-target regression.
return std::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
additional_input_.HostVector().begin()[0] = 1; // Fill the label_correct flag
bool is_null_weight = info.weights_.Size() == 0;
auto scale_pos_weight = param_.scale_pos_weight;
additional_input_.HostVector().begin()[1] = scale_pos_weight;
additional_input_.HostVector().begin()[2] = is_null_weight;
const size_t nthreads = ctx_->Threads();
bool on_device = device >= 0;
// On CPU we run the transformation each thread processing a contigious block of data
// for better performance.
const size_t n_data_blocks = std::max(static_cast<size_t>(1), (on_device ? ndata : nthreads));
const size_t block_size = ndata / n_data_blocks + !!(ndata % n_data_blocks);
auto const n_targets = std::max(info.labels.Shape(1), static_cast<size_t>(1));
common::Transform<>::Init(
[block_size, ndata, n_targets] XGBOOST_DEVICE(
size_t data_block_idx, common::Span<float> _additional_input,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
const bst_float* preds_ptr = _preds.data();
const bst_float* labels_ptr = _labels.data();
const bst_float* weights_ptr = _weights.data();
GradientPair* out_gpair_ptr = _out_gpair.data();
const size_t begin = data_block_idx*block_size;
const size_t end = std::min(ndata, begin + block_size);
const float _scale_pos_weight = _additional_input[1];
const bool _is_null_weight = _additional_input[2];
for (size_t idx = begin; idx < end; ++idx) {
bst_float p = Loss::PredTransform(preds_ptr[idx]);
bst_float w = _is_null_weight ? 1.0f : weights_ptr[idx / n_targets];
bst_float label = labels_ptr[idx];
if (label == 1.0f) {
w *= _scale_pos_weight;
}
if (!Loss::CheckLabel(label)) {
// If there is an incorrect label, the host code will know.
_additional_input[0] = 0;
}
out_gpair_ptr[idx] = GradientPair(Loss::FirstOrderGradient(p, label) * w,
Loss::SecondOrderGradient(p, label) * w);
}
},
common::Range{0, static_cast<int64_t>(n_data_blocks)}, nthreads, device)
.Eval(&additional_input_, out_gpair, &preds, info.labels.Data(),
&info.weights_);
auto const flag = additional_input_.HostVector().begin()[0];
if (flag == 0) {
LOG(FATAL) << Loss::LabelErrorMsg();
}
}
public:
const char* DefaultEvalMetric() const override {
return Loss::DefaultEvalMetric();
}
void PredTransform(HostDeviceVector<float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<float> _preds) {
_preds[_idx] = Loss::PredTransform(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
float ProbToMargin(float base_score) const override {
return Loss::ProbToMargin(base_score);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(Loss::Name());
out["reg_loss_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["reg_loss_param"], ¶m_);
}
protected:
RegLossParam param_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(RegLossParam);
XGBOOST_REGISTER_OBJECTIVE(SquaredLossRegression, LinearSquareLoss::Name())
.describe("Regression with squared error.")
.set_body([]() { return new RegLossObj<LinearSquareLoss>(); });
XGBOOST_REGISTER_OBJECTIVE(SquareLogError, SquaredLogError::Name())
.describe("Regression with root mean squared logarithmic error.")
.set_body([]() { return new RegLossObj<SquaredLogError>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRegression, LogisticRegression::Name())
.describe("Logistic regression for probability regression task.")
.set_body([]() { return new RegLossObj<LogisticRegression>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticClassification, LogisticClassification::Name())
.describe("Logistic regression for binary classification task.")
.set_body([]() { return new RegLossObj<LogisticClassification>(); });
XGBOOST_REGISTER_OBJECTIVE(LogisticRaw, LogisticRaw::Name())
.describe("Logistic regression for classification, output score "
"before logistic transformation.")
.set_body([]() { return new RegLossObj<LogisticRaw>(); });
// Deprecated functions
XGBOOST_REGISTER_OBJECTIVE(LinearRegression, "reg:linear")
.describe("Regression with squared error.")
.set_body([]() {
LOG(WARNING) << "reg:linear is now deprecated in favor of reg:squarederror.";
return new RegLossObj<LinearSquareLoss>(); });
// End deprecated
class PseudoHuberRegression : public ObjFunction {
PesudoHuberParam param_;
public:
void Configure(Args const& args) override { param_.UpdateAllowUnknown(args); }
ObjInfo Task() const override { return ObjInfo::kRegression; }
uint32_t Targets(MetaInfo const& info) const override {
return std::max(static_cast<size_t>(1), info.labels.Shape(1));
}
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto slope = param_.huber_slope;
CHECK_NE(slope, 0.0) << "slope for pseudo huber cannot be 0.";
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable {
auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape()));
const float z = predt(i) - y;
const float scale_sqrt = std::sqrt(1 + common::Sqr(z) / common::Sqr(slope));
float grad = z / scale_sqrt;
auto scale = common::Sqr(slope) + common::Sqr(z);
float hess = common::Sqr(slope) / (scale * scale_sqrt);
auto w = weight[sample_id];
gpair(i) = {grad * w, hess * w};
});
}
const char* DefaultEvalMetric() const override { return "mphe"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:pseudohubererror");
out["pseudo_huber_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
auto const& config = get<Object const>(in);
if (config.find("pseudo_huber_param") == config.cend()) {
// The parameter is added in 1.6.
return;
}
FromJson(in["pseudo_huber_param"], ¶m_);
}
};
XGBOOST_REGISTER_OBJECTIVE(PseudoHuberRegression, "reg:pseudohubererror")
.describe("Regression Pseudo Huber error.")
.set_body([]() { return new PseudoHuberRegression(); });
// declare parameter
struct PoissonRegressionParam : public XGBoostParameter<PoissonRegressionParam> {
float max_delta_step;
DMLC_DECLARE_PARAMETER(PoissonRegressionParam) {
DMLC_DECLARE_FIELD(max_delta_step).set_lower_bound(0.0f).set_default(0.7f)
.describe("Maximum delta step we allow each weight estimation to be." \
" This parameter is required for possion regression.");
}
};
// poisson regression for count
class PoissonRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
size_t const ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
bst_float max_delta_step = param_.max_delta_step;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair{(expf(p) - y) * w,
expf(p + max_delta_step) * w};
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "PoissonRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "poisson-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("count:poisson");
out["poisson_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["poisson_regression_param"], ¶m_);
}
private:
PoissonRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(PoissonRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(PoissonRegression, "count:poisson")
.describe("Poisson regression for count data.")
.set_body([]() { return new PoissonRegression(); });
// cox regression for survival data (negative values mean they are censored)
class CoxRegression : public ObjFunction {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const auto& preds_h = preds.HostVector();
out_gpair->Resize(preds_h.size());
auto& gpair = out_gpair->HostVector();
const std::vector<size_t> &label_order = info.LabelAbsSort();
const omp_ulong ndata = static_cast<omp_ulong>(preds_h.size()); // NOLINT(*)
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
// pre-compute a sum
double exp_p_sum = 0; // we use double because we might need the precision with large datasets
for (omp_ulong i = 0; i < ndata; ++i) {
exp_p_sum += std::exp(preds_h[label_order[i]]);
}
// start calculating grad and hess
const auto& labels = info.labels.HostView();
double r_k = 0;
double s_k = 0;
double last_exp_p = 0.0;
double last_abs_y = 0.0;
double accumulated_sum = 0;
for (omp_ulong i = 0; i < ndata; ++i) { // NOLINT(*)
const size_t ind = label_order[i];
const double p = preds_h[ind];
const double exp_p = std::exp(p);
const double w = info.GetWeight(ind);
const double y = labels(ind);
const double abs_y = std::abs(y);
// only update the denominator after we move forward in time (labels are sorted)
// this is Breslow's method for ties
accumulated_sum += last_exp_p;
if (last_abs_y < abs_y) {
exp_p_sum -= accumulated_sum;
accumulated_sum = 0;
} else {
CHECK(last_abs_y <= abs_y) << "CoxRegression: labels must be in sorted order, " <<
"MetaInfo::LabelArgsort failed!";
}
if (y > 0) {
r_k += 1.0/exp_p_sum;
s_k += 1.0/(exp_p_sum*exp_p_sum);
}
const double grad = exp_p*r_k - static_cast<bst_float>(y > 0);
const double hess = exp_p*r_k - exp_p*exp_p * s_k;
gpair.at(ind) = GradientPair(grad * w, hess * w);
last_abs_y = abs_y;
last_exp_p = exp_p;
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
std::vector<bst_float> &preds = io_preds->HostVector();
const long ndata = static_cast<long>(preds.size()); // NOLINT(*)
common::ParallelFor(ndata, ctx_->Threads(), [&](long j) { // NOLINT(*)
preds[j] = std::exp(preds[j]);
});
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "cox-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("survival:cox");
}
void LoadConfig(Json const&) override {}
};
// register the objective function
XGBOOST_REGISTER_OBJECTIVE(CoxRegression, "survival:cox")
.describe("Cox regression for censored survival data (negative labels are considered censored).")
.set_body([]() { return new CoxRegression(); });
// gamma regression
class GammaRegression : public ObjFunction {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float> &preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
auto device = ctx_->gpu_id;
out_gpair->Resize(ndata);
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y <= 0.0f) {
_label_correct[0] = 0;
}
_out_gpair[_idx] = GradientPair((1 - y / expf(p)) * w, y / expf(p) * w);
},
common::Range{0, static_cast<int64_t>(ndata)}, this->ctx_->Threads(), device).Eval(
&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "GammaRegression: label must be positive.";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
void EvalTransform(HostDeviceVector<bst_float> *io_preds) override {
PredTransform(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return "gamma-nloglik";
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:gamma");
}
void LoadConfig(Json const&) override {}
private:
HostDeviceVector<int> label_correct_;
};
// register the objective functions
XGBOOST_REGISTER_OBJECTIVE(GammaRegression, "reg:gamma")
.describe("Gamma regression for severity data.")
.set_body([]() { return new GammaRegression(); });
// declare parameter
struct TweedieRegressionParam : public XGBoostParameter<TweedieRegressionParam> {
float tweedie_variance_power;
DMLC_DECLARE_PARAMETER(TweedieRegressionParam) {
DMLC_DECLARE_FIELD(tweedie_variance_power).set_range(1.0f, 2.0f).set_default(1.5f)
.describe("Tweedie variance power. Must be between in range [1, 2).");
}
};
// tweedie regression
class TweedieRegression : public ObjFunction {
public:
// declare functions
void Configure(const std::vector<std::pair<std::string, std::string> >& args) override {
param_.UpdateAllowUnknown(args);
std::ostringstream os;
os << "tweedie-nloglik@" << param_.tweedie_variance_power;
metric_ = os.str();
}
ObjInfo Task() const override { return ObjInfo::kRegression; }
void GetGradient(const HostDeviceVector<bst_float>& preds,
const MetaInfo &info, int,
HostDeviceVector<GradientPair> *out_gpair) override {
CHECK_NE(info.labels.Size(), 0U) << "label set cannot be empty";
CHECK_EQ(preds.Size(), info.labels.Size()) << "labels are not correctly provided";
const size_t ndata = preds.Size();
out_gpair->Resize(ndata);
auto device = ctx_->gpu_id;
label_correct_.Resize(1);
label_correct_.Fill(1);
const bool is_null_weight = info.weights_.Size() == 0;
if (!is_null_weight) {
CHECK_EQ(info.weights_.Size(), ndata)
<< "Number of weights should be equal to number of data points.";
}
const float rho = param_.tweedie_variance_power;
common::Transform<>::Init(
[=] XGBOOST_DEVICE(size_t _idx,
common::Span<int> _label_correct,
common::Span<GradientPair> _out_gpair,
common::Span<const bst_float> _preds,
common::Span<const bst_float> _labels,
common::Span<const bst_float> _weights) {
bst_float p = _preds[_idx];
bst_float w = is_null_weight ? 1.0f : _weights[_idx];
bst_float y = _labels[_idx];
if (y < 0.0f) {
_label_correct[0] = 0;
}
bst_float grad = -y * expf((1 - rho) * p) + expf((2 - rho) * p);
bst_float hess =
-y * (1 - rho) * \
std::exp((1 - rho) * p) + (2 - rho) * expf((2 - rho) * p);
_out_gpair[_idx] = GradientPair(grad * w, hess * w);
},
common::Range{0, static_cast<int64_t>(ndata), 1}, this->ctx_->Threads(), device)
.Eval(&label_correct_, out_gpair, &preds, info.labels.Data(), &info.weights_);
// copy "label correct" flags back to host
std::vector<int>& label_correct_h = label_correct_.HostVector();
for (auto const flag : label_correct_h) {
if (flag == 0) {
LOG(FATAL) << "TweedieRegression: label must be nonnegative";
}
}
}
void PredTransform(HostDeviceVector<bst_float> *io_preds) const override {
common::Transform<>::Init(
[] XGBOOST_DEVICE(size_t _idx, common::Span<bst_float> _preds) {
_preds[_idx] = expf(_preds[_idx]);
},
common::Range{0, static_cast<int64_t>(io_preds->Size())}, this->ctx_->Threads(),
io_preds->DeviceIdx())
.Eval(io_preds);
}
bst_float ProbToMargin(bst_float base_score) const override {
return std::log(base_score);
}
const char* DefaultEvalMetric() const override {
return metric_.c_str();
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:tweedie");
out["tweedie_regression_param"] = ToJson(param_);
}
void LoadConfig(Json const& in) override {
FromJson(in["tweedie_regression_param"], ¶m_);
}
private:
std::string metric_;
TweedieRegressionParam param_;
HostDeviceVector<int> label_correct_;
};
// register the objective functions
DMLC_REGISTER_PARAMETER(TweedieRegressionParam);
XGBOOST_REGISTER_OBJECTIVE(TweedieRegression, "reg:tweedie")
.describe("Tweedie regression for insurance data.")
.set_body([]() { return new TweedieRegression(); });
class MeanAbsoluteError : public ObjFunction {
public:
void Configure(Args const&) override {}
ObjInfo Task() const override { return {ObjInfo::kRegression, true, true}; }
void GetGradient(HostDeviceVector<bst_float> const& preds, const MetaInfo& info, int /*iter*/,
HostDeviceVector<GradientPair>* out_gpair) override {
CheckRegInputs(info, preds);
auto labels = info.labels.View(ctx_->gpu_id);
out_gpair->SetDevice(ctx_->gpu_id);
out_gpair->Resize(info.labels.Size());
auto gpair = linalg::MakeVec(out_gpair);
preds.SetDevice(ctx_->gpu_id);
auto predt = linalg::MakeVec(&preds);
info.weights_.SetDevice(ctx_->gpu_id);
common::OptionalWeights weight{ctx_->IsCPU() ? info.weights_.ConstHostSpan()
: info.weights_.ConstDeviceSpan()};
linalg::ElementWiseKernel(ctx_, labels, [=] XGBOOST_DEVICE(size_t i, float const y) mutable {
auto sign = [](auto x) {
return (x > static_cast<decltype(x)>(0)) - (x < static_cast<decltype(x)>(0));
};
auto sample_id = std::get<0>(linalg::UnravelIndex(i, labels.Shape()));
auto grad = sign(predt(i) - y) * weight[i];
auto hess = weight[sample_id];
gpair(i) = GradientPair{grad, hess};
});
}
void UpdateTreeLeaf(HostDeviceVector<bst_node_t> const& position, MetaInfo const& info,
HostDeviceVector<float> const& prediction, RegTree* p_tree) const override {
if (ctx_->IsCPU()) {
auto const& h_position = position.ConstHostVector();
detail::UpdateTreeLeafHost(ctx_, h_position, info, prediction, 0.5, p_tree);
} else {
#if defined(XGBOOST_USE_CUDA)
position.SetDevice(ctx_->gpu_id);
auto d_position = position.ConstDeviceSpan();
detail::UpdateTreeLeafDevice(ctx_, d_position, info, prediction, 0.5, p_tree);
#else
common::AssertGPUSupport();
#endif // defined(XGBOOST_USE_CUDA)
}
}
const char* DefaultEvalMetric() const override { return "mae"; }
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String("reg:absoluteerror");
}
void LoadConfig(Json const& in) override {
CHECK_EQ(StringView{get<String const>(in["name"])}, StringView{"reg:absoluteerror"});
}
};
XGBOOST_REGISTER_OBJECTIVE(MeanAbsoluteError, "reg:absoluteerror")
.describe("Mean absoluate error.")
.set_body([]() { return new MeanAbsoluteError(); });
} // namespace obj
} // namespace xgboost
|
b2c5e209640d0e4d46c1ae931d9a79dfe31ef0c0.hip
|
// !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
/**
1.5[MB]
div == 4, size = * 48000
2.0[MB]
div == 8, size = * 32000
2.4[MB]
div == 8, size = * 37000
**/
__global__ void __add(float* a,float* b,int size,int div){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#pragma unroll
for(int i = 0 ; i < div ; i ++){
a[idx + (size*i)/div] += b[idx + (size*i)/div];
}
}
static float elapsed(struct timeval tv0,struct timeval tv1){
return (float)(tv1.tv_sec - tv0.tv_sec)
+ (float)(tv1.tv_usec - tv0.tv_usec)
* 0.000001f;
}
int main(){
struct timeval t0,t1;
gettimeofday(&t0,NULL);
float *h_a = NULL;
float *h_b = NULL;
float *d_a = NULL;
float *d_b = NULL;
int div = 4;
int threadNum = 1024;
unsigned int size = (threadNum*div) * 48000;
int blockNum = size/(threadNum*div);
printf("blockNum : %d\n",blockNum);
printf("threadNum : %d\n",threadNum);
printf("size : %d\n",size);
printf("vector size : %d\n",sizeof(float)*size);
int ite = 180;
hipMalloc((void**)&d_a,sizeof(float)*size);
hipMalloc((void**)&d_b,sizeof(float)*size);
/*
h_a = (float*)malloc(sizeof(float)*size);
h_b = (float*)malloc(sizeof(float)*size);
*/
hipHostMalloc((void**)&h_a,sizeof(float)*size,0);
hipHostMalloc((void**)&h_b,sizeof(float)*size,0);
for(int i = 0 ; i < size ; i ++){
h_a[i] = 0.0f;
h_b[i] = 1.0f;
}
dim3 threads(threadNum,1,1);
dim3 blocks(blockNum,1,1);
for(int i = 0 ; i < ite ; i ++){
hipMemcpy(d_a,h_a,sizeof(float)*size,hipMemcpyHostToDevice);
hipMemcpy(d_b,h_b,sizeof(float)*size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( __add), dim3(blocks),dim3(threads), 0, 0, d_a,d_b,size,div);
hipDeviceSynchronize();
hipMemcpy(h_a,d_a,sizeof(float)*size,hipMemcpyDeviceToHost);
}
int pass = 1;
int firstFailedIndex = 0;
for(int i = 0 ; i < size ; i ++){
// printf("h_a[%d]:%f ",i,h_a[i]);
if(h_a[i] != ite){
firstFailedIndex = i;
pass = 0;
break;
}
}
if(pass){
printf("Result test PASS!\n");
}else{
printf("Result test Failed\n");
printf("h_a[%d] == %f\n",firstFailedIndex,h_a[firstFailedIndex]);
}
gettimeofday(&t1,NULL);
printf("TIME RESULT : %f(MEM SMALL)\n",elapsed(t0,t1));
return 0;
}
|
b2c5e209640d0e4d46c1ae931d9a79dfe31ef0c0.cu
|
#include <stdio.h>
#include <stdlib.h>
#include <cuda_runtime.h>
#include <unistd.h>
#include <sys/wait.h>
#include <sys/time.h>
/**
1.5[MB]
div == 4, size = * 48000
2.0[MB]
div == 8, size = * 32000
2.4[MB]
div == 8, size = * 37000
**/
__global__ void __add(float* a,float* b,int size,int div){
int idx = blockDim.x * blockIdx.x + threadIdx.x;
#pragma unroll
for(int i = 0 ; i < div ; i ++){
a[idx + (size*i)/div] += b[idx + (size*i)/div];
}
}
static float elapsed(struct timeval tv0,struct timeval tv1){
return (float)(tv1.tv_sec - tv0.tv_sec)
+ (float)(tv1.tv_usec - tv0.tv_usec)
* 0.000001f;
}
int main(){
struct timeval t0,t1;
gettimeofday(&t0,NULL);
float *h_a = NULL;
float *h_b = NULL;
float *d_a = NULL;
float *d_b = NULL;
int div = 4;
int threadNum = 1024;
unsigned int size = (threadNum*div) * 48000;
int blockNum = size/(threadNum*div);
printf("blockNum : %d\n",blockNum);
printf("threadNum : %d\n",threadNum);
printf("size : %d\n",size);
printf("vector size : %d\n",sizeof(float)*size);
int ite = 180;
cudaMalloc((void**)&d_a,sizeof(float)*size);
cudaMalloc((void**)&d_b,sizeof(float)*size);
/*
h_a = (float*)malloc(sizeof(float)*size);
h_b = (float*)malloc(sizeof(float)*size);
*/
cudaHostAlloc((void**)&h_a,sizeof(float)*size,0);
cudaHostAlloc((void**)&h_b,sizeof(float)*size,0);
for(int i = 0 ; i < size ; i ++){
h_a[i] = 0.0f;
h_b[i] = 1.0f;
}
dim3 threads(threadNum,1,1);
dim3 blocks(blockNum,1,1);
for(int i = 0 ; i < ite ; i ++){
cudaMemcpy(d_a,h_a,sizeof(float)*size,cudaMemcpyHostToDevice);
cudaMemcpy(d_b,h_b,sizeof(float)*size,cudaMemcpyHostToDevice);
__add<<<blocks,threads>>>(d_a,d_b,size,div);
cudaDeviceSynchronize();
cudaMemcpy(h_a,d_a,sizeof(float)*size,cudaMemcpyDeviceToHost);
}
int pass = 1;
int firstFailedIndex = 0;
for(int i = 0 ; i < size ; i ++){
// printf("h_a[%d]:%f ",i,h_a[i]);
if(h_a[i] != ite){
firstFailedIndex = i;
pass = 0;
break;
}
}
if(pass){
printf("Result test PASS!\n");
}else{
printf("Result test Failed\n");
printf("h_a[%d] == %f\n",firstFailedIndex,h_a[firstFailedIndex]);
}
gettimeofday(&t1,NULL);
printf("TIME RESULT : %f(MEM SMALL)\n",elapsed(t0,t1));
return 0;
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.