8 Commits

5 changed files with 139 additions and 59 deletions

View File

@@ -103,6 +103,11 @@ void singlesContribution
// -- TIJK // -- TIJK
// , DataPtr<F> Tijk // , DataPtr<F> Tijk
, DataFieldType<F>* Tijk_ , DataFieldType<F>* Tijk_
#if defined(HAVE_CUDA)
// -- tmp buffers
, DataFieldType<F>* _t_buffer
, DataFieldType<F>* _vhhh
#endif
); );
// Doubles contribution:1 ends here // Doubles contribution:1 ends here

View File

@@ -352,7 +352,7 @@ Info info;
// [[file:~/cuda/atrip/atrip.org::*Attributes][Attributes:2]] // [[file:~/cuda/atrip/atrip.org::*Attributes][Attributes:2]]
DataPtr<F> data; DataPtr<F> data;
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA) && !defined (ATRIP_SOURCES_IN_GPU)
F* mpi_data; F* mpi_data;
#endif #endif
// Attributes:2 ends here // Attributes:2 ends here
@@ -456,7 +456,7 @@ void unwrapAndMarkReady() {
if (errorCode != MPI_SUCCESS) if (errorCode != MPI_SUCCESS)
throw "Atrip: Unexpected error MPI ERROR"; throw "Atrip: Unexpected error MPI ERROR";
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
// copy the retrieved mpi data to the device // copy the retrieved mpi data to the device
WITH_CHRONO("cuda:memcpy", WITH_CHRONO("cuda:memcpy",
_CHECK_CUDA_SUCCESS("copying mpi data to device", _CHECK_CUDA_SUCCESS("copying mpi data to device",
@@ -488,7 +488,7 @@ void unwrapAndMarkReady() {
Slice(size_t size_) Slice(size_t size_)
: info({}) : info({})
, data(DataNullPtr) , data(DataNullPtr)
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
, mpi_data(nullptr) , mpi_data(nullptr)
#endif #endif
, size(size_) , size(size_)

View File

@@ -405,6 +405,7 @@ template <typename F=double>
, sliceSize(std::accumulate(sliceLength.begin(), , sliceSize(std::accumulate(sliceLength.begin(),
sliceLength.end(), sliceLength.end(),
1UL, std::multiplies<size_t>())) 1UL, std::multiplies<size_t>()))
#if defined(ATRIP_SOURCES_IN_GPU) #if defined(ATRIP_SOURCES_IN_GPU)
, sources(rankMap.nSources()) , sources(rankMap.nSources())
#else #else
@@ -417,6 +418,23 @@ template <typename F=double>
{ // constructor begin { // constructor begin
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n"; LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
printf("sliceSize %d, number of slices %d\n\n\n", sliceSize, sources.size());
#if defined(ATRIP_SOURCES_IN_GPU)
for (auto& ptr: sources) {
const CUresult sourceError =
cuMemAlloc(&ptr, sizeof(F) * sliceSize);
if (ptr == 0UL) {
throw "UNSUFICCIENT MEMORY ON THE GRAPHIC CARD FOR SOURCES";
}
if (sourceError != CUDA_SUCCESS) {
std::stringstream s;
s << "Error allocating memory for sources "
<< "code " << sourceError << "\n";
throw s.str();
}
}
#endif
for (auto& ptr: sliceBuffers) { for (auto& ptr: sliceBuffers) {
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
@@ -447,30 +465,30 @@ template <typename F=double>
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
LOG(1,"Atrip") << "warming communication up " << slices.size() << "\n"; LOG(1,"Atrip") << "warming communication up " << slices.size() << "\n";
WITH_CHRONO("cuda:warmup", WITH_CHRONO("cuda:warmup",
int nRanks=Atrip::np, requestCount=0; int nRanks=Atrip::np, requestCount=0;
int nSends=sliceBuffers.size()*nRanks; int nSends=sliceBuffers.size()*nRanks;
MPI_Request *requests = (MPI_Request*) malloc(nSends*2 * sizeof(MPI_Request)); MPI_Request *requests = (MPI_Request*) malloc(nSends*2 * sizeof(MPI_Request));
MPI_Status *statuses = (MPI_Status*) malloc(nSends*2 * sizeof(MPI_Status)); MPI_Status *statuses = (MPI_Status*) malloc(nSends*2 * sizeof(MPI_Status));
for (int sliceId=0; sliceId<sliceBuffers.size(); sliceId++){ for (int sliceId=0; sliceId<sliceBuffers.size(); sliceId++){
for (int rankId=0; rankId<nRanks; rankId++){ for (int rankId=0; rankId<nRanks; rankId++){
MPI_Isend((void*)SOURCES_DATA(sources[0]), MPI_Isend((void*)SOURCES_DATA(sources[0]),
sliceSize, sliceSize,
traits::mpi::datatypeOf<F>(), traits::mpi::datatypeOf<F>(),
rankId, rankId,
100, 100,
universe, universe,
&requests[requestCount++]); &requests[requestCount++]);
MPI_Irecv((void*)sliceBuffers[sliceId], MPI_Irecv((void*)sliceBuffers[sliceId],
sliceSize, sliceSize,
traits::mpi::datatypeOf<F>(), traits::mpi::datatypeOf<F>(),
rankId, rankId,
100, 100,
universe, universe,
&requests[requestCount++]); &requests[requestCount++]);
} }
} }
MPI_Waitall(nSends*2, requests, statuses); MPI_Waitall(nSends*2, requests, statuses);
) )
#endif #endif
@@ -555,12 +573,11 @@ template <typename F=double>
if (slice.info.state == Slice<F>::Fetch) { // if-1 if (slice.info.state == Slice<F>::Fetch) { // if-1
// TODO: do it through the slice class // TODO: do it through the slice class
slice.info.state = Slice<F>::Dispatched; slice.info.state = Slice<F>::Dispatched;
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA) && defined(ATRIP_SOURCES_IN_GPU)
# if !defined(ATRIP_CUDA_AWARE_MPI) && defined(ATRIP_SOURCES_IN_GPU) # if !defined(ATRIP_CUDA_AWARE_MPI)
# error "You need CUDA aware MPI to have slices on the GPU" # error "You need CUDA aware MPI to have slices on the GPU"
# endif # endif
slice.mpi_data = (F*)malloc(sizeof(F) * slice.size); MPI_Irecv((void*)slice.data,
MPI_Irecv(slice.mpi_data,
#else #else
MPI_Irecv(slice.data, MPI_Irecv(slice.data,
#endif #endif

View File

@@ -202,7 +202,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
_CHECK_CUDA_SUCCESS("Zijk", _CHECK_CUDA_SUCCESS("Zijk",
cuMemAlloc(&Zijk, sizeof(F) * No * No * No)); cuMemAlloc(&Zijk, sizeof(F) * No * No * No));
#else #else
std::vector<F> &Tai = _Tai, &epsi = _epsi, &epsa = _epsa; DataPtr<F> Tai = _Tai.data(), epsi = _epsi.data(), epsa = _epsa.data();
Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>)); Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>)); Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
#endif #endif
@@ -258,6 +258,25 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// all tensors // all tensors
std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh}; std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh};
#ifdef HAVE_CUDA
// TODO: free buffers
DataFieldType<F>* _t_buffer;
DataFieldType<F>* _vhhh;
WITH_CHRONO("double:cuda:alloc",
_CHECK_CUDA_SUCCESS("Allocating _t_buffer",
cuMemAlloc((CUdeviceptr*)&_t_buffer,
No*No*No * sizeof(DataFieldType<F>)));
_CHECK_CUDA_SUCCESS("Allocating _vhhh",
cuMemAlloc((CUdeviceptr*)&_vhhh,
No*No*No * sizeof(DataFieldType<F>)));
)
//const size_t
// bs = Atrip::kernelDimensions.ooo.blocks,
//ths = Atrip::kernelDimensions.ooo.threads;
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
#endif
// get tuples for the current rank // get tuples for the current rank
TuplesDistribution *distribution; TuplesDistribution *distribution;
@@ -639,7 +658,14 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
tabhh.unwrapSlice(Slice<F>::AC, abc), tabhh.unwrapSlice(Slice<F>::AC, abc),
tabhh.unwrapSlice(Slice<F>::BC, abc), tabhh.unwrapSlice(Slice<F>::BC, abc),
// -- TIJK // -- TIJK
(DataFieldType<F>*)Tijk); (DataFieldType<F>*)Tijk
#if defined(HAVE_CUDA)
// -- tmp buffers
,(DataFieldType<F>*)_t_buffer
,(DataFieldType<F>*)_vhhh
#endif
);
WITH_RANK << iteration << "-th doubles done\n"; WITH_RANK << iteration << "-th doubles done\n";
)) ))
} }
@@ -667,7 +693,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
(DataFieldType<F>*)Tai, (DataFieldType<F>*)Tai,
#else #else
singlesContribution<F>(No, Nv, abc[0], abc[1], abc[2], singlesContribution<F>(No, Nv, abc[0], abc[1], abc[2],
Tai.data(), Tai,
#endif #endif
(DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB, (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB,
abc), abc),
@@ -688,7 +714,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
double *tupleEnergy; double *tupleEnergy;
cuMemAlloc((DataPtr<double>*)&tupleEnergy, sizeof(double)); cuMemAlloc((DataPtr<double>*)&tupleEnergy, sizeof(double));
#elif #else
double _tupleEnergy(0.); double _tupleEnergy(0.);
double *tupleEnergy = &_tupleEnergy; double *tupleEnergy = &_tupleEnergy;
#endif /* defined(HAVE_CUDA) */ #endif /* defined(HAVE_CUDA) */
@@ -707,18 +733,30 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
1, 1, // for cuda 1, 1, // for cuda
_epsabc, _epsabc,
No, No,
#if defined(HAVE_CUDA)
(DataFieldType<F>*)epsi, (DataFieldType<F>*)epsi,
(DataFieldType<F>*)Tijk, (DataFieldType<F>*)Tijk,
(DataFieldType<F>*)Zijk, (DataFieldType<F>*)Zijk,
#else
epsi,
Tijk,
Zijk,
#endif
tupleEnergy); tupleEnergy);
} else { } else {
ACC_FUNCALL(getEnergySame<DataFieldType<F>>, ACC_FUNCALL(getEnergySame<DataFieldType<F>>,
1, 1, // for cuda 1, 1, // for cuda
_epsabc, _epsabc,
No, No,
#if defined(HAVE_CUDA)
(DataFieldType<F>*)epsi, (DataFieldType<F>*)epsi,
(DataFieldType<F>*)Tijk, (DataFieldType<F>*)Tijk,
(DataFieldType<F>*)Zijk, (DataFieldType<F>*)Zijk,
#else
epsi,
Tijk,
Zijk,
#endif
tupleEnergy); tupleEnergy);
}) })
@@ -727,7 +765,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
cuMemcpyDtoH((void*)&host_tuple_energy, cuMemcpyDtoH((void*)&host_tuple_energy,
(DataPtr<double>)tupleEnergy, (DataPtr<double>)tupleEnergy,
sizeof(double)); sizeof(double));
#elif #else
double host_tuple_energy = *tupleEnergy; double host_tuple_energy = *tupleEnergy;
#endif /* defined(HAVE_CUDA) */ #endif /* defined(HAVE_CUDA) */

View File

@@ -401,9 +401,15 @@ void getEnergySame
// -- TIJK // -- TIJK
// , DataPtr<F> Tijk_ // , DataPtr<F> Tijk_
, DataFieldType<F>* Tijk_ , DataFieldType<F>* Tijk_
) { #if defined(HAVE_CUDA)
// -- tmp buffers
const size_t NoNo = No*No; , DataFieldType<F>* _t_buffer
, DataFieldType<F>* _vhhh
#endif
) {
const size_t a = abc[0], b = abc[1], c = abc[2]
, NoNo = No*No
;
DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_; DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_;
@@ -517,21 +523,21 @@ void getEnergySame
F one{1.0}, m_one{-1.0}, zero{0.0}; F one{1.0}, m_one{-1.0}, zero{0.0};
const size_t NoNoNo = No*NoNo; const size_t NoNoNo = No*NoNo;
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
DataFieldType<F>* _t_buffer; // DataFieldType<F>* _t_buffer;
DataFieldType<F>* _vhhh; // DataFieldType<F>* _vhhh;
WITH_CHRONO("double:cuda:alloc", // WITH_CHRONO("double:cuda:alloc",
_CHECK_CUDA_SUCCESS("Allocating _t_buffer", // _CHECK_CUDA_SUCCESS("Allocating _t_buffer",
cuMemAlloc((CUdeviceptr*)&_t_buffer, // cuMemAlloc((CUdeviceptr*)&_t_buffer,
NoNoNo * sizeof(DataFieldType<F>))); // NoNoNo * sizeof(DataFieldType<F>)));
_CHECK_CUDA_SUCCESS("Allocating _vhhh", // _CHECK_CUDA_SUCCESS("Allocating _vhhh",
cuMemAlloc((CUdeviceptr*)&_vhhh, // cuMemAlloc((CUdeviceptr*)&_vhhh,
NoNoNo * sizeof(DataFieldType<F>))); // NoNoNo * sizeof(DataFieldType<F>)));
) // )
#if !defined(ATRIP_ONLY_DGEMM)
// we still have to zero this
const size_t const size_t
bs = Atrip::kernelDimensions.ooo.blocks, bs = Atrip::kernelDimensions.ooo.blocks,
ths = Atrip::kernelDimensions.ooo.threads; ths = Atrip::kernelDimensions.ooo.threads;
#if !defined(ATRIP_ONLY_DGEMM)
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo); acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo); acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
#endif #endif
@@ -552,12 +558,14 @@ void getEnergySame
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk, acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk,
NoNoNo); NoNoNo);
) )
#else #endif
#if !defined(HAVE_CUDA)
WITH_CHRONO("double:reorder", WITH_CHRONO("double:reorder",
for (size_t k = 0; k < NoNoNo; k++) { for (size_t k = 0; k < NoNoNo; k++) {
Tijk[k] = DataFieldType<F>{0.0}; Tijk[k] = DataFieldType<F>{0.0};
}) })
#endif /* defined(HAVE_CUDA) && !defined(ATRIP_ONLY_DGEMM) */ #endif /* !defined(HAVE_CUDA) */
#if defined(ATRIP_ONLY_DGEMM) #if defined(ATRIP_ONLY_DGEMM)
@@ -649,12 +657,12 @@ void getEnergySame
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
// we need to synchronize here since we need // we need to synchronize here since we need
// the Tijk for next process in the pipeline // the Tijk for next process in the pipeline
_CHECK_CUDA_SUCCESS("Synchronizing", //_CHECK_CUDA_SUCCESS("Synchronizing",
cuCtxSynchronize()); // cuCtxSynchronize());
_CHECK_CUDA_SUCCESS("Freeing _vhhh", //_CHECK_CUDA_SUCCESS("Freeing _vhhh",
cuMemFree((CUdeviceptr)_vhhh)); // cuMemFree((CUdeviceptr)_vhhh));
_CHECK_CUDA_SUCCESS("Freeing _t_buffer", //_CHECK_CUDA_SUCCESS("Freeing _t_buffer",
cuMemFree((CUdeviceptr)_t_buffer)); // cuMemFree((CUdeviceptr)_t_buffer));
#else #else
free(_vhhh); free(_vhhh);
free(_t_buffer); free(_t_buffer);
@@ -741,6 +749,12 @@ void getEnergySame
, DataPtr<double> const TBChh , DataPtr<double> const TBChh
// -- TIJK // -- TIJK
, DataFieldType<double>* Tijk , DataFieldType<double>* Tijk
#if defined(HAVE_CUDA)
// -- tmp buffers
, DataFieldType<double>* _t_buffer
, DataFieldType<double>* _vhhh
#endif
); );
template template
@@ -769,6 +783,12 @@ void getEnergySame
, DataPtr<Complex> const TBChh , DataPtr<Complex> const TBChh
// -- TIJK // -- TIJK
, DataFieldType<Complex>* Tijk , DataFieldType<Complex>* Tijk
#if defined(HAVE_CUDA)
// -- tmp buffers
, DataFieldType<Complex>* _t_buffer
, DataFieldType<Complex>* _vhhh
#endif
); );
// Doubles contribution:2 ends here // Doubles contribution:2 ends here