14 Commits

7 changed files with 335 additions and 115 deletions

View File

@@ -103,6 +103,11 @@ void singlesContribution
// -- TIJK // -- TIJK
// , DataPtr<F> Tijk // , DataPtr<F> Tijk
, DataFieldType<F>* Tijk_ , DataFieldType<F>* Tijk_
#if defined(HAVE_CUDA)
// -- tmp buffers
, DataFieldType<F>* _t_buffer
, DataFieldType<F>* _vhhh
#endif
); );
// Doubles contribution:1 ends here // Doubles contribution:1 ends here

View File

@@ -24,20 +24,13 @@ namespace acc {
// cuda kernels // cuda kernels
template <typename F>
__MAYBE_GLOBAL__
void zeroing(F* a, size_t n) {
F zero = {0};
for (size_t i = 0; i < n; i++) {
a[i] = zero;
}
}
//// ////
template <typename F> template <typename F>
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__ __MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
F maybeConjugateScalar(const F &a) { return a; } F maybeConjugateScalar(const F &a) { return a; }
// TODO: instantiate for std::complex<double>
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
template <> template <>
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__ __MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__

View File

@@ -352,7 +352,7 @@ Info info;
// [[file:~/cuda/atrip/atrip.org::*Attributes][Attributes:2]] // [[file:~/cuda/atrip/atrip.org::*Attributes][Attributes:2]]
DataPtr<F> data; DataPtr<F> data;
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA) && !defined (ATRIP_SOURCES_IN_GPU)
F* mpi_data; F* mpi_data;
#endif #endif
// Attributes:2 ends here // Attributes:2 ends here
@@ -456,7 +456,7 @@ void unwrapAndMarkReady() {
if (errorCode != MPI_SUCCESS) if (errorCode != MPI_SUCCESS)
throw "Atrip: Unexpected error MPI ERROR"; throw "Atrip: Unexpected error MPI ERROR";
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
// copy the retrieved mpi data to the device // copy the retrieved mpi data to the device
WITH_CHRONO("cuda:memcpy", WITH_CHRONO("cuda:memcpy",
_CHECK_CUDA_SUCCESS("copying mpi data to device", _CHECK_CUDA_SUCCESS("copying mpi data to device",
@@ -488,7 +488,7 @@ void unwrapAndMarkReady() {
Slice(size_t size_) Slice(size_t size_)
: info({}) : info({})
, data(DataNullPtr) , data(DataNullPtr)
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
, mpi_data(nullptr) , mpi_data(nullptr)
#endif #endif
, size(size_) , size(size_)

View File

@@ -200,7 +200,7 @@ template <typename F=double>
: Slice<F>::Fetch : Slice<F>::Fetch
; ;
if (blank.info.state == Slice<F>::SelfSufficient) { if (blank.info.state == Slice<F>::SelfSufficient) {
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
const size_t _size = sizeof(F) * sliceSize; const size_t _size = sizeof(F) * sliceSize;
// TODO: this is code duplication with downstairs // TODO: this is code duplication with downstairs
if (freePointers.size() == 0) { if (freePointers.size() == 0) {
@@ -221,7 +221,6 @@ template <typename F=double>
(void*)SOURCES_DATA(sources[from.source]), (void*)SOURCES_DATA(sources[from.source]),
sizeof(F) * sliceSize)); sizeof(F) * sliceSize));
)) ))
#else #else
blank.data = SOURCES_DATA(sources[from.source]); blank.data = SOURCES_DATA(sources[from.source]);
#endif #endif
@@ -388,6 +387,22 @@ template <typename F=double>
} }
} }
static size_t
getSize(const std::vector<size_t> sliceLength,
const std::vector<size_t> paramLength,
const size_t np,
const MPI_Comm global_world) {
const RankMap<F> rankMap(paramLength, np, global_world);
const size_t
nSources = rankMap.nSources(),
sliceSize = std::accumulate(sliceLength.begin(),
sliceLength.end(),
1UL,
std::multiplies<size_t>());
return nSources * sliceSize;
}
// CONSTRUCTOR // CONSTRUCTOR
SliceUnion( std::vector<typename Slice<F>::Type> sliceTypes_ SliceUnion( std::vector<typename Slice<F>::Type> sliceTypes_
, std::vector<size_t> sliceLength_ , std::vector<size_t> sliceLength_
@@ -405,6 +420,7 @@ template <typename F=double>
, sliceSize(std::accumulate(sliceLength.begin(), , sliceSize(std::accumulate(sliceLength.begin(),
sliceLength.end(), sliceLength.end(),
1UL, std::multiplies<size_t>())) 1UL, std::multiplies<size_t>()))
#if defined(ATRIP_SOURCES_IN_GPU) #if defined(ATRIP_SOURCES_IN_GPU)
, sources(rankMap.nSources()) , sources(rankMap.nSources())
#else #else
@@ -417,6 +433,7 @@ template <typename F=double>
{ // constructor begin { // constructor begin
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n"; LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
printf("sliceSize %d, number of slices %d\n\n\n", sliceSize, sources.size());
#if defined(ATRIP_SOURCES_IN_GPU) #if defined(ATRIP_SOURCES_IN_GPU)
for (auto& ptr: sources) { for (auto& ptr: sources) {
@@ -463,30 +480,30 @@ template <typename F=double>
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
LOG(1,"Atrip") << "warming communication up " << slices.size() << "\n"; LOG(1,"Atrip") << "warming communication up " << slices.size() << "\n";
WITH_CHRONO("cuda:warmup", WITH_CHRONO("cuda:warmup",
int nRanks=Atrip::np, requestCount=0; int nRanks=Atrip::np, requestCount=0;
int nSends=sliceBuffers.size()*nRanks; int nSends=sliceBuffers.size()*nRanks;
MPI_Request *requests = (MPI_Request*) malloc(nSends*2 * sizeof(MPI_Request)); MPI_Request *requests = (MPI_Request*) malloc(nSends*2 * sizeof(MPI_Request));
MPI_Status *statuses = (MPI_Status*) malloc(nSends*2 * sizeof(MPI_Status)); MPI_Status *statuses = (MPI_Status*) malloc(nSends*2 * sizeof(MPI_Status));
for (int sliceId=0; sliceId<sliceBuffers.size(); sliceId++){ for (int sliceId=0; sliceId<sliceBuffers.size(); sliceId++){
for (int rankId=0; rankId<nRanks; rankId++){ for (int rankId=0; rankId<nRanks; rankId++){
MPI_Isend((void*)SOURCES_DATA(sources[0]), MPI_Isend((void*)SOURCES_DATA(sources[0]),
sliceSize, sliceSize,
traits::mpi::datatypeOf<F>(), traits::mpi::datatypeOf<F>(),
rankId, rankId,
100, 100,
universe, universe,
&requests[requestCount++]); &requests[requestCount++]);
MPI_Irecv((void*)sliceBuffers[sliceId], MPI_Irecv((void*)sliceBuffers[sliceId],
sliceSize, sliceSize,
traits::mpi::datatypeOf<F>(), traits::mpi::datatypeOf<F>(),
rankId, rankId,
100, 100,
universe, universe,
&requests[requestCount++]); &requests[requestCount++]);
} }
} }
MPI_Waitall(nSends*2, requests, statuses); MPI_Waitall(nSends*2, requests, statuses);
) )
#endif #endif
@@ -571,14 +588,16 @@ template <typename F=double>
if (slice.info.state == Slice<F>::Fetch) { // if-1 if (slice.info.state == Slice<F>::Fetch) { // if-1
// TODO: do it through the slice class // TODO: do it through the slice class
slice.info.state = Slice<F>::Dispatched; slice.info.state = Slice<F>::Dispatched;
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA) && defined(ATRIP_SOURCES_IN_GPU)
# if !defined(ATRIP_CUDA_AWARE_MPI) && defined(ATRIP_SOURCES_IN_GPU) # if !defined(ATRIP_CUDA_AWARE_MPI)
# error "You need CUDA aware MPI to have slices on the GPU" # error "You need CUDA aware MPI to have slices on the GPU"
# endif # endif
MPI_Irecv((void*)slice.data,
#elif defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
slice.mpi_data = (F*)malloc(sizeof(F) * slice.size); slice.mpi_data = (F*)malloc(sizeof(F) * slice.size);
MPI_Irecv(slice.mpi_data, MPI_Irecv(slice.mpi_data,
#else #else
MPI_Irecv(slice.data, MPI_Irecv((void*)slice.data,
#endif #endif
slice.size, slice.size,
traits::mpi::datatypeOf<F>(), traits::mpi::datatypeOf<F>(),

View File

@@ -160,9 +160,9 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
LOG(0,"Atrip") << "ooo blocks: " LOG(0,"Atrip") << "ooo blocks: "
<< Atrip::kernelDimensions.ooo.blocks << "\n"; << Atrip::kernelDimensions.ooo.blocks << "\n";
LOG(0,"Atrip") << "ooo threads per block: " LOG(0,"Atrip") << "ooo threads per block: "
<< Atrip::kernelDimensions.ooo.threads << "\n"; << Atrip::kernelDimensions.ooo.threads << "\n";
#endif #endif
// allocate the three scratches, see piecuch // allocate the three scratches, see piecuch
@@ -202,7 +202,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
_CHECK_CUDA_SUCCESS("Zijk", _CHECK_CUDA_SUCCESS("Zijk",
cuMemAlloc(&Zijk, sizeof(F) * No * No * No)); cuMemAlloc(&Zijk, sizeof(F) * No * No * No));
#else #else
std::vector<F> &Tai = _Tai, &epsi = _epsi, &epsa = _epsa; DataPtr<F> Tai = _Tai.data(), epsi = _epsi.data(), epsa = _epsa.data();
Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>)); Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>)); Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
#endif #endif
@@ -235,11 +235,54 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
MPI_Comm_size(child_comm, &child_size); MPI_Comm_size(child_comm, &child_size);
} }
// a, b, c, d, e, f and P => Nv
// H => No
// total_source_sizes contains a list of the number of elements
// in all sources of every tensor union, therefore nSlices * sliceSize
const std::vector<size_t> total_source_sizes = {
// ABPH
SliceUnion<F>::getSize({Nv, No}, {Nv, Nv}, (size_t)np, universe),
// ABHH
SliceUnion<F>::getSize({No, No}, {Nv, Nv}, (size_t)np, universe),
// TABHH
SliceUnion<F>::getSize({No, No}, {Nv, Nv}, (size_t)np, universe),
// TAPHH
SliceUnion<F>::getSize({Nv, No, No}, {Nv}, (size_t)np, universe),
// HHHA
SliceUnion<F>::getSize({No, No, No}, {Nv}, (size_t)np, universe),
};
const size_t
total_source_size = sizeof(DataFieldType<F>)
* std::accumulate(total_source_sizes.begin(),
total_source_sizes.end(),
0UL);
#if defined(HAVE_CUDA)
DataPtr<F> all_sources_pointer;
cuMemAlloc(&all_sources_pointer, total_source_size);
#else
DataPtr<F>
all_sources_pointer = (DataPtr<F>)malloc(total_source_size);
#endif
size_t _source_pointer_idx = 0;
// BUILD SLICES PARAMETRIZED BY NV x NV =============================={{{1 // BUILD SLICES PARAMETRIZED BY NV x NV =============================={{{1
WITH_CHRONO("nv-nv-slices", WITH_CHRONO("nv-nv-slices",
LOG(0,"Atrip") << "building NV x NV slices\n"; LOG(0,"Atrip") << "building NV x NV slices\n";
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
ABPH<F> abph(*in.Vppph, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); ABPH<F> abph(*in.Vppph, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
ABHH<F> abhh(*in.Vpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); ABHH<F> abhh(*in.Vpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
TABHH<F> tabhh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); TABHH<F> tabhh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
) )
@@ -251,13 +294,38 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// BUILD SLICES PARAMETRIZED BY NV ==================================={{{1 // BUILD SLICES PARAMETRIZED BY NV ==================================={{{1
WITH_CHRONO("nv-slices", WITH_CHRONO("nv-slices",
LOG(0,"Atrip") << "building NV slices\n"; LOG(0,"Atrip") << "building NV slices\n";
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
TAPHH<F> taphh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); TAPHH<F> taphh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
HHHA<F> hhha(*in.Vhhhp, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); HHHA<F> hhha(*in.Vhhhp, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
) )
// all tensors // all tensors
std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh}; std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh};
#ifdef HAVE_CUDA
// TODO: free buffers
DataFieldType<F>* _t_buffer;
DataFieldType<F>* _vhhh;
WITH_CHRONO("double:cuda:alloc",
_CHECK_CUDA_SUCCESS("Allocating _t_buffer",
cuMemAlloc((CUdeviceptr*)&_t_buffer,
No*No*No * sizeof(DataFieldType<F>)));
_CHECK_CUDA_SUCCESS("Allocating _vhhh",
cuMemAlloc((CUdeviceptr*)&_vhhh,
No*No*No * sizeof(DataFieldType<F>)));
)
//const size_t
// bs = Atrip::kernelDimensions.ooo.blocks,
//ths = Atrip::kernelDimensions.ooo.threads;
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
#endif
// get tuples for the current rank // get tuples for the current rank
TuplesDistribution *distribution; TuplesDistribution *distribution;
@@ -639,7 +707,14 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
tabhh.unwrapSlice(Slice<F>::AC, abc), tabhh.unwrapSlice(Slice<F>::AC, abc),
tabhh.unwrapSlice(Slice<F>::BC, abc), tabhh.unwrapSlice(Slice<F>::BC, abc),
// -- TIJK // -- TIJK
(DataFieldType<F>*)Tijk); (DataFieldType<F>*)Tijk
#if defined(HAVE_CUDA)
// -- tmp buffers
,(DataFieldType<F>*)_t_buffer
,(DataFieldType<F>*)_vhhh
#endif
);
WITH_RANK << iteration << "-th doubles done\n"; WITH_RANK << iteration << "-th doubles done\n";
)) ))
} }
@@ -667,7 +742,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
(DataFieldType<F>*)Tai, (DataFieldType<F>*)Tai,
#else #else
singlesContribution<F>(No, Nv, abc[0], abc[1], abc[2], singlesContribution<F>(No, Nv, abc[0], abc[1], abc[2],
Tai.data(), Tai,
#endif #endif
(DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB, (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB,
abc), abc),
@@ -707,18 +782,30 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
1, 1, // for cuda 1, 1, // for cuda
_epsabc, _epsabc,
No, No,
#if defined(HAVE_CUDA)
(DataFieldType<F>*)epsi, (DataFieldType<F>*)epsi,
(DataFieldType<F>*)Tijk, (DataFieldType<F>*)Tijk,
(DataFieldType<F>*)Zijk, (DataFieldType<F>*)Zijk,
#else
epsi,
Tijk,
Zijk,
#endif
tupleEnergy); tupleEnergy);
} else { } else {
ACC_FUNCALL(getEnergySame<DataFieldType<F>>, ACC_FUNCALL(getEnergySame<DataFieldType<F>>,
1, 1, // for cuda 1, 1, // for cuda
_epsabc, _epsabc,
No, No,
#if defined(HAVE_CUDA)
(DataFieldType<F>*)epsi, (DataFieldType<F>*)epsi,
(DataFieldType<F>*)Tijk, (DataFieldType<F>*)Tijk,
(DataFieldType<F>*)Zijk, (DataFieldType<F>*)Zijk,
#else
epsi,
Tijk,
Zijk,
#endif
tupleEnergy); tupleEnergy);
}) })
@@ -865,5 +952,5 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
} }
// instantiate // instantiate
template Atrip::Output Atrip::run(Atrip::Input<double> const& in); template Atrip::Output Atrip::run(Atrip::Input<double> const& in);
template Atrip::Output Atrip::run(Atrip::Input<Complex> const& in); // template Atrip::Output Atrip::run(Atrip::Input<Complex> const& in);
// Main:1 ends here // Main:1 ends here

View File

@@ -21,11 +21,6 @@ namespace atrip {
template <> double maybeConjugate(const double a) { return a; } template <> double maybeConjugate(const double a) { return a; }
template <> Complex maybeConjugate(const Complex a) { return std::conj(a); } template <> Complex maybeConjugate(const Complex a) { return std::conj(a); }
#if defined(HAVE_CUDA)
#endif
namespace traits { namespace traits {
template <typename F> bool isComplex() { return false; } template <typename F> bool isComplex() { return false; }
template <> bool isComplex<double>() { return false; } template <> bool isComplex<double>() { return false; }

View File

@@ -13,6 +13,8 @@
// limitations under the License. // limitations under the License.
// [[file:~/cuda/atrip/atrip.org::*Prolog][Prolog:2]] // [[file:~/cuda/atrip/atrip.org::*Prolog][Prolog:2]]
#include <cstring>
#include<atrip/Equations.hpp> #include<atrip/Equations.hpp>
#include<atrip/CUDA.hpp> #include<atrip/CUDA.hpp>
@@ -25,11 +27,8 @@ namespace atrip {
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
#define FOR_K() \ #define FOR_K() \
for (size_t kmin = blockIdx.x * blockDim.x + threadIdx.x, \ const size_t k = blockIdx.x * blockDim.x + threadIdx.x; \
k = kmin, \ size_t idx = k*size*size;
idx = kmin * size * size * size; \
k < (kmin < size) ? kmin + 1 : size; \
k++)
#else #else
#define FOR_K() for (size_t k=0, idx=0; k < size; k++) #define FOR_K() for (size_t k=0, idx=0; k < size; k++)
#endif #endif
@@ -102,6 +101,7 @@ namespace atrip {
# define MIN(a, b) std::min((a), (b)) # define MIN(a, b) std::min((a), (b))
#endif #endif
#if defined(ATRIP_NEW_ENERGY)
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]] // [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]]
template <typename F> template <typename F>
@@ -250,6 +250,131 @@ void getEnergySame
} }
// Energy:2 ends here // Energy:2 ends here
#else
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]]
template <typename F>
__MAYBE_GLOBAL__
void getEnergyDistinct
( F const epsabc
, size_t const No
, F* const epsi
, F* const Tijk
, F* const Zijk
, double* _energy
) {
constexpr size_t blockSize=16;
F energy(0.);
for (size_t kk=0; kk<No; kk+=blockSize){
const size_t kend( MIN(No, kk+blockSize) );
for (size_t jj(kk); jj<No; jj+=blockSize){
const size_t jend( MIN( No, jj+blockSize) );
for (size_t ii(jj); ii<No; ii+=blockSize){
const size_t iend( MIN( No, ii+blockSize) );
for (size_t k(kk); k < kend; k++){
const F ek(epsi[k]);
const size_t jstart = jj > k ? jj : k;
for (size_t j(jstart); j < jend; j++){
F const ej(epsi[j]);
F const facjk = j == k ? F(0.5) : F(1.0);
size_t istart = ii > j ? ii : j;
for (size_t i(istart); i < iend; i++){
const F
ei(epsi[i])
, facij = i == j ? F(0.5) : F(1.0)
, denominator(epsabc - ei - ej - ek)
, U(Zijk[i + No*j + No*No*k])
, V(Zijk[i + No*k + No*No*j])
, W(Zijk[j + No*i + No*No*k])
, X(Zijk[j + No*k + No*No*i])
, Y(Zijk[k + No*i + No*No*j])
, Z(Zijk[k + No*j + No*No*i])
, A(acc::maybeConjugateScalar<F>(Tijk[i + No*j + No*No*k]))
, B(acc::maybeConjugateScalar<F>(Tijk[i + No*k + No*No*j]))
, C(acc::maybeConjugateScalar<F>(Tijk[j + No*i + No*No*k]))
, D(acc::maybeConjugateScalar<F>(Tijk[j + No*k + No*No*i]))
, E(acc::maybeConjugateScalar<F>(Tijk[k + No*i + No*No*j]))
, _F(acc::maybeConjugateScalar<F>(Tijk[k + No*j + No*No*i]))
, value
= 3.0 * ( A * U
+ B * V
+ C * W
+ D * X
+ E * Y
+ _F * Z )
+ ( ( U + X + Y )
- 2.0 * ( V + W + Z )
) * ( A + D + E )
+ ( ( V + W + Z )
- 2.0 * ( U + X + Y )
) * ( B + C + _F )
;
energy += 2.0 * value / denominator * facjk * facij;
} // i
} // j
} // k
} // ii
} // jj
} // kk
*_energy = acc::real(energy);
}
template <typename F>
__MAYBE_GLOBAL__
void getEnergySame
( F const epsabc
, size_t const No
, F* const epsi
, F* const Tijk
, F* const Zijk
, double* _energy
) {
constexpr size_t blockSize = 16;
F energy = F(0.);
for (size_t kk=0; kk<No; kk+=blockSize){
const size_t kend( MIN( kk+blockSize, No) );
for (size_t jj(kk); jj<No; jj+=blockSize){
const size_t jend( MIN( jj+blockSize, No) );
for (size_t ii(jj); ii<No; ii+=blockSize){
const size_t iend( MIN( ii+blockSize, No) );
for (size_t k(kk); k < kend; k++){
const F ek(epsi[k]);
const size_t jstart = jj > k ? jj : k;
for(size_t j(jstart); j < jend; j++){
const F facjk( j == k ? F(0.5) : F(1.0));
const F ej(epsi[j]);
const size_t istart = ii > j ? ii : j;
for(size_t i(istart); i < iend; i++){
const F
ei(epsi[i])
, facij ( i==j ? F(0.5) : F(1.0))
, denominator(epsabc - ei - ej - ek)
, U(Zijk[i + No*j + No*No*k])
, V(Zijk[j + No*k + No*No*i])
, W(Zijk[k + No*i + No*No*j])
, A(acc::maybeConjugateScalar<F>(Tijk[i + No*j + No*No*k]))
, B(acc::maybeConjugateScalar<F>(Tijk[j + No*k + No*No*i]))
, C(acc::maybeConjugateScalar<F>(Tijk[k + No*i + No*No*j]))
, value
= F(3.0) * ( A * U
+ B * V
+ C * W
)
- ( A + B + C ) * ( U + V + W )
;
energy += F(2.0) * value / denominator * facjk * facij;
} // i
} // j
} // k
} // ii
} // jj
} // kk
*_energy = acc::real(energy);
}
// Energy:2 ends here
#endif /* defined(ATRIP_NEW_ENERGY) */
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:3]] // [[file:~/cuda/atrip/atrip.org::*Energy][Energy:3]]
// instantiate double // instantiate double
template template
@@ -274,6 +399,8 @@ void getEnergySame
, DataFieldType<double>* energy , DataFieldType<double>* energy
); );
// TODO: put this back in
#if defined(ATRIP_WITH_COMPLEX)
// instantiate Complex // instantiate Complex
template template
__MAYBE_GLOBAL__ __MAYBE_GLOBAL__
@@ -297,6 +424,7 @@ void getEnergySame
, DataFieldType<double>* energy , DataFieldType<double>* energy
); );
// Energy:3 ends here // Energy:3 ends here
#endif
// [[file:~/cuda/atrip/atrip.org::*Singles%20contribution][Singles contribution:2]] // [[file:~/cuda/atrip/atrip.org::*Singles%20contribution][Singles contribution:2]]
template <typename F> __MAYBE_GLOBAL__ template <typename F> __MAYBE_GLOBAL__
@@ -401,16 +529,22 @@ void getEnergySame
// -- TIJK // -- TIJK
// , DataPtr<F> Tijk_ // , DataPtr<F> Tijk_
, DataFieldType<F>* Tijk_ , DataFieldType<F>* Tijk_
) { #if defined(HAVE_CUDA)
// -- tmp buffers
const size_t NoNo = No*No; , DataFieldType<F>* _t_buffer
, DataFieldType<F>* _vhhh
#endif
) {
const size_t a = abc[0], b = abc[1], c = abc[2]
, NoNo = No*No
;
DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_; DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_;
#if defined(ATRIP_USE_DGEMM) #if defined(ATRIP_USE_DGEMM)
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
#define REORDER(__II, __JJ, __KK) \ #define REORDER(__II, __JJ, __KK) \
reorder<<<bs, ths>>>(reorder_proxy< \ reorder<<<1, No>>>(reorder_proxy< \
DataFieldType<F>, \ DataFieldType<F>, \
__II ## __JJ ## __KK \ __II ## __JJ ## __KK \
>{}, \ >{}, \
@@ -448,13 +582,8 @@ void getEnergySame
) )
#define MAYBE_CONJ(_conj, _buffer) \ #define MAYBE_CONJ(_conj, _buffer) \
do { \ do { \
acc::maybeConjugate<<< \ acc::maybeConjugate<<<1, 1 \
\ >>>((DataFieldType<F>*)_conj, \
Atrip::kernelDimensions.ooo.blocks, \
\
Atrip::kernelDimensions.ooo.threads \
\
>>>((DataFieldType<F>*)_conj, \
(DataFieldType<F>*)_buffer, \ (DataFieldType<F>*)_buffer, \
NoNoNo); \ NoNoNo); \
} while (0) } while (0)
@@ -505,59 +634,39 @@ void getEnergySame
_t_buffer, \ _t_buffer, \
(int const*)&NoNo \ (int const*)&NoNo \
) )
#define MAYBE_CONJ(_conj, _buffer) \ #define MAYBE_CONJ(_conj, _buffer) \
do { \ acc::maybeConjugate((DataFieldType<F>*)_conj, \
for (size_t __i = 0; __i < NoNoNo; ++__i) { \ (DataFieldType<F>*)_buffer,\
_conj[__i] \ NoNoNo);
= maybeConjugate<F>(_buffer[__i]); \
} \
} while (0)
#endif #endif
F one{1.0}, m_one{-1.0}, zero{0.0}; F one{1.0}, m_one{-1.0}, zero{0.0};
const size_t NoNoNo = No*NoNo; const size_t NoNoNo = No*NoNo;
// Zeroing vectors
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
DataFieldType<F>* _t_buffer;
DataFieldType<F>* _vhhh;
WITH_CHRONO("double:cuda:alloc",
_CHECK_CUDA_SUCCESS("Allocating _t_buffer",
cuMemAlloc((CUdeviceptr*)&_t_buffer,
NoNoNo * sizeof(DataFieldType<F>)));
_CHECK_CUDA_SUCCESS("Allocating _vhhh",
cuMemAlloc((CUdeviceptr*)&_vhhh,
NoNoNo * sizeof(DataFieldType<F>)));
)
const size_t
bs = Atrip::kernelDimensions.ooo.blocks,
ths = Atrip::kernelDimensions.ooo.threads;
#if !defined(ATRIP_ONLY_DGEMM) #if !defined(ATRIP_ONLY_DGEMM)
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo); {
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo); const size_t elements = NoNoNo * sizeof(DataFieldType<F>)/4;
WITH_CHRONO("double:zeroing",
_CHECK_CUDA_SUCCESS("Zeroing Tijk",
cuMemsetD32_v2((CUdeviceptr)Tijk, 0x00, elements));
_CHECK_CUDA_SUCCESS("Zeroing t buffer",
cuMemsetD32_v2((CUdeviceptr)_t_buffer, 0x00, elements));
_CHECK_CUDA_SUCCESS("Zeroing vhhh buffer",
cuMemsetD32_v2((CUdeviceptr)_vhhh, 0x00, elements));
)
}
#endif #endif
#else #else
DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F)); DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F)); DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
DataFieldType<F> zero_h{0.0}; std::memset((void*)_t_buffer, 0x00, NoNoNo * sizeof(DataFieldType<F>));
for (size_t i=0; i < NoNoNo; i++) { std::memset((void*)_vhhh, 0x00, NoNoNo * sizeof(DataFieldType<F>));
_t_buffer[i] = zero_h; std::memset((void*)Tijk, 0x00, NoNoNo * sizeof(DataFieldType<F>));
_vhhh[i] = zero_h; #endif /* HAVE_CUDA */
}
#endif
// Set Tijk to zero
#if defined(HAVE_CUDA) && !defined(ATRIP_ONLY_DGEMM)
WITH_CHRONO("double:reorder",
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk,
NoNoNo);
)
#else
WITH_CHRONO("double:reorder",
for (size_t k = 0; k < NoNoNo; k++) {
Tijk[k] = DataFieldType<F>{0.0};
})
#endif /* defined(HAVE_CUDA) && !defined(ATRIP_ONLY_DGEMM) */
#if defined(ATRIP_ONLY_DGEMM) #if defined(ATRIP_ONLY_DGEMM)
@@ -649,12 +758,12 @@ void getEnergySame
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
// we need to synchronize here since we need // we need to synchronize here since we need
// the Tijk for next process in the pipeline // the Tijk for next process in the pipeline
_CHECK_CUDA_SUCCESS("Synchronizing", //_CHECK_CUDA_SUCCESS("Synchronizing",
cuCtxSynchronize()); // cuCtxSynchronize());
_CHECK_CUDA_SUCCESS("Freeing _vhhh", //_CHECK_CUDA_SUCCESS("Freeing _vhhh",
cuMemFree((CUdeviceptr)_vhhh)); // cuMemFree((CUdeviceptr)_vhhh));
_CHECK_CUDA_SUCCESS("Freeing _t_buffer", //_CHECK_CUDA_SUCCESS("Freeing _t_buffer",
cuMemFree((CUdeviceptr)_t_buffer)); // cuMemFree((CUdeviceptr)_t_buffer));
#else #else
free(_vhhh); free(_vhhh);
free(_t_buffer); free(_t_buffer);
@@ -741,6 +850,12 @@ void getEnergySame
, DataPtr<double> const TBChh , DataPtr<double> const TBChh
// -- TIJK // -- TIJK
, DataFieldType<double>* Tijk , DataFieldType<double>* Tijk
#if defined(HAVE_CUDA)
// -- tmp buffers
, DataFieldType<double>* _t_buffer
, DataFieldType<double>* _vhhh
#endif
); );
template template
@@ -769,6 +884,12 @@ void getEnergySame
, DataPtr<Complex> const TBChh , DataPtr<Complex> const TBChh
// -- TIJK // -- TIJK
, DataFieldType<Complex>* Tijk , DataFieldType<Complex>* Tijk
#if defined(HAVE_CUDA)
// -- tmp buffers
, DataFieldType<Complex>* _t_buffer
, DataFieldType<Complex>* _vhhh
#endif
); );
// Doubles contribution:2 ends here // Doubles contribution:2 ends here