Compare commits

..

4 Commits

Author SHA1 Message Date
Gallo Alejandro
7241bbe9fb Implement reordering on the GPU 2022-08-12 18:32:32 +02:00
Gallo Alejandro
c2e9e930ba Update main Atrip.cxx using several gpus 2022-08-12 18:30:55 +02:00
Gallo Alejandro
b4aef4db9e Fix compilation issues and add KernelSizes 2022-08-12 18:29:21 +02:00
Gallo Alejandro
4651231d3b Update test bench for CUDA 2022-08-12 18:28:20 +02:00
7 changed files with 295 additions and 170 deletions

View File

@ -45,6 +45,16 @@ int main(int argc, char** argv) {
checkpoint_percentage, checkpoint_percentage,
"Percentage for checkpoints"); "Percentage for checkpoints");
#if defined(HAVE_CUDA)
size_t ooo_threads = 0, ooo_blocks = 0;
app.add_option("--ooo-blocks",
ooo_blocks,
"CUDA: Number of blocks per block for kernels going through ooo tensors");
app.add_option("--ooo-threads",
ooo_threads,
"CUDA: Number of threads per block for kernels going through ooo tensors");
#endif
CLI11_PARSE(app, argc, argv); CLI11_PARSE(app, argc, argv);
CTF::World world(argc, argv); CTF::World world(argc, argv);
@ -154,15 +164,24 @@ int main(int argc, char** argv) {
.with_checkpointAtPercentage(checkpoint_percentage) .with_checkpointAtPercentage(checkpoint_percentage)
.with_checkpointPath(checkpoint_path) .with_checkpointPath(checkpoint_path)
.with_readCheckpointIfExists(!noCheckpoint) .with_readCheckpointIfExists(!noCheckpoint)
#if defined(HAVE_CUDA)
.with_oooThreads(ooo_threads)
.with_oooBlocks(ooo_blocks)
#endif
; ;
try {
auto out = atrip::Atrip::run(in); auto out = atrip::Atrip::run(in);
if (atrip::Atrip::rank == 0)
std::cout << "Energy: " << out.energy << std::endl;
} catch (const char* msg) {
if (atrip::Atrip::rank == 0)
std::cout << "Atrip throwed with msg:\n\t\t " << msg << "\n";
}
if (!in.deleteVppph) if (!in.deleteVppph)
delete Vppph; delete Vppph;
if (atrip::Atrip::rank == 0)
std::cout << "Energy: " << out.energy << std::endl;
MPI_Finalize(); MPI_Finalize();
return 0; return 0;

View File

@ -51,6 +51,9 @@ namespace atrip {
cublasHandle_t handle; cublasHandle_t handle;
}; };
static CudaContext cuda; static CudaContext cuda;
static struct KernelDimensions {
struct {size_t blocks, threads;} ooo;
} kernelDimensions;
#endif #endif
static void init(MPI_Comm); static void init(MPI_Comm);
@ -92,6 +95,10 @@ namespace atrip {
ADD_ATTRIBUTE(bool, writeCheckpoint, true) ADD_ATTRIBUTE(bool, writeCheckpoint, true)
ADD_ATTRIBUTE(float, checkpointAtPercentage, 10) ADD_ATTRIBUTE(float, checkpointAtPercentage, 10)
ADD_ATTRIBUTE(size_t, checkpointAtEveryIteration, 0) ADD_ATTRIBUTE(size_t, checkpointAtEveryIteration, 0)
#if defined(HAVE_CUDA)
ADD_ATTRIBUTE(size_t, oooThreads, 0)
ADD_ATTRIBUTE(size_t, oooBlocks, 0)
#endif
}; };

View File

@ -378,12 +378,16 @@ template <typename F=double>
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n"; LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
for (auto& ptr: sliceBuffers) for (auto& ptr: sliceBuffers) {
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
cuMemAlloc(&ptr, sizeof(F) * sources[0].size()); cuMemAlloc(&ptr, sizeof(F) * sources[0].size());
if (ptr == 0UL) {
throw "UNSUFICCIENT MEMORY ON THE GRAPHIC CARD FOR FREE POINTERS";
}
#else #else
ptr = (DataPtr<F>)malloc(sizeof(F) * sources[0].size()); ptr = (DataPtr<F>)malloc(sizeof(F) * sources[0].size());
#endif #endif
}
slices slices
= std::vector<Slice<F>>(2 * sliceTypes.size(), { sources[0].size() }); = std::vector<Slice<F>>(2 * sliceTypes.size(), { sources[0].size() });
@ -396,24 +400,12 @@ template <typename F=double>
LOG(1,"Atrip") << "rankMap.nSources " LOG(1,"Atrip") << "#slices " << slices.size() << "\n";
<< rankMap.nSources() << "\n"; WITH_RANK << "#slices[0] " << slices[0].size << "\n";
LOG(1,"Atrip") << "#slices " LOG(1,"Atrip") << "#sources " << sources.size() << "\n";
<< slices.size() << "\n"; WITH_RANK << "#sources[0] " << sources[0].size() << "\n";
LOG(1,"Atrip") << "#slices[0] " WITH_RANK << "#freePointers " << freePointers.size() << "\n";
<< slices[0].size << "\n"; LOG(1,"Atrip") << "#sliceBuffers " << sliceBuffers.size() << "\n";
LOG(1,"Atrip") << "#sources "
<< sources.size() << "\n";
LOG(1,"Atrip") << "#sources[0] "
<< sources[0].size() << "\n";
LOG(1,"Atrip") << "#freePointers "
<< freePointers.size() << "\n";
LOG(1,"Atrip") << "#sliceBuffers "
<< sliceBuffers.size() << "\n";
LOG(1,"Atrip") << "#sliceLength "
<< sliceLength.size() << "\n";
LOG(1,"Atrip") << "#paramLength "
<< paramLength.size() << "\n";
LOG(1,"Atrip") << "GB*" << np << " " LOG(1,"Atrip") << "GB*" << np << " "
<< double(sources.size() + sliceBuffers.size()) << double(sources.size() + sliceBuffers.size())
* sources[0].size() * sources[0].size()
@ -434,7 +426,8 @@ template <typename F=double>
__sliceLength.data(), __sliceLength.data(),
syms.data(), syms.data(),
w); w);
LOG(1,"Atrip") << "slicing... \n";
WITH_OCD WITH_RANK << "slicing... \n";
// setUp sources // setUp sources
for (size_t it(0); it < rankMap.nSources(); ++it) { for (size_t it(0); it < rankMap.nSources(); ++it) {

View File

@ -19,13 +19,27 @@
#include <map> #include <map>
#include <chrono> #include <chrono>
#pragma GCC diagnostic push #if defined(__NVCC__)
#pragma GCC diagnostic ignored "-Wvla" # pragma nv_diagnostic_push
#pragma GCC diagnostic ignored "-Wint-in-bool-context" # if defined __NVCC_DIAG_PRAGMA_SUPPORT__
#pragma GCC diagnostic ignored "-Wunused-parameter" // http://www.ssl.berkeley.edu/~jimm/grizzly_docs/SSL/opt/intel/cc/9.0/lib/locale/en_US/mcpcom.msg
#pragma GCC diagnostic ignored "-Wdeprecated-copy" # pragma nv_diag_suppress partial_override
#include <ctf.hpp> # else
#pragma GCC diagnostic pop # pragma diag_suppress partial_override
# endif
# include <ctf.hpp>
# pragma nv_diagnostic_pop
#else
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wvla"
# pragma GCC diagnostic ignored "-Wnonnull"
# pragma GCC diagnostic ignored "-Wall"
# pragma GCC diagnostic ignored "-Wint-in-bool-context"
# pragma GCC diagnostic ignored "-Wunused-parameter"
# pragma GCC diagnostic ignored "-Wdeprecated-copy"
# include <ctf.hpp>
# pragma GCC diagnostic pop
#endif
#include <atrip/Debug.hpp> #include <atrip/Debug.hpp>

View File

@ -24,13 +24,7 @@
using namespace atrip; using namespace atrip;
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
#include <cuda.h>
namespace atrip {
namespace cuda {
};
};
#endif #endif
template <typename F> bool RankMap<F>::RANK_ROUND_ROBIN; template <typename F> bool RankMap<F>::RANK_ROUND_ROBIN;
@ -40,6 +34,7 @@ size_t Atrip::rank;
size_t Atrip::np; size_t Atrip::np;
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
typename Atrip::CudaContext Atrip::cuda; typename Atrip::CudaContext Atrip::cuda;
typename Atrip::KernelDimensions Atrip::kernelDimensions;
#endif #endif
MPI_Comm Atrip::communicator; MPI_Comm Atrip::communicator;
Timings Atrip::chrono; Timings Atrip::chrono;
@ -74,20 +69,99 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
LOG(0,"Atrip") << "Nv: " << Nv << "\n"; LOG(0,"Atrip") << "Nv: " << Nv << "\n";
LOG(0,"Atrip") << "np: " << np << "\n"; LOG(0,"Atrip") << "np: " << np << "\n";
#if defined(HAVE_CUDA)
int ngcards;
cuDeviceGetCount(&ngcards);
LOG(0,"Atrip") << "ngcards: " << ngcards << "\n";
if (np > ngcards) {
std::cerr << "ATRIP: You are running on more ranks than the number of graphic cards\n"
<< "You have " << ngcards << " cards at your disposal\n";
throw "";
}
if (np < ngcards) {
std::cerr << "You have " << ngcards << " cards at your disposal\n"
<< "You will be only using " << np << ", i.e., the nubmer of ranks.\n";
}
for (size_t _rank = 0; _rank < np; _rank++) {
if (rank == _rank) {
CUcontext ctx;
CUdevice dev;
CUdevprop prop;
struct { struct { size_t free, total; } avail; size_t total; } memory;
char *name = (char*)malloc(256);
// set current device
cuDeviceGet(&dev, rank);
cuCtxCreate(&ctx, 0, dev);
cuCtxSetCurrent(ctx);
// get information of the device
cuDeviceGetProperties(&prop, dev);
cuMemGetInfo(&memory.avail.free, &memory.avail.total);
cuDeviceGetName(name, 256, dev);
cuDeviceTotalMem(&memory.total, dev);
printf("\n"
"CUDA CARD RANK %d\n"
"=================\n"
"\tnumber: %1$d\n"
"\tname: %s\n"
"\tMem. clock rate (KHz): %d\n"
"\tShared Mem Per Block (KB): %f\n"
"\tAvail. Free/Total mem (GB): %f/%f\n"
"\tFree memory (GB): %f\n"
"\n",
Atrip::rank,
name,
prop.clockRate,
prop.sharedMemPerBlock / 1024.0,
memory.avail.free / 1024.0 / 1024.0 / 1024.0 ,
memory.avail.total / 1024.0 / 1024.0 / 1024.0,
memory.total / 1024.0 / 1024.0 / 1024.0
);
std::free((void*)name);
}
MPI_Barrier(universe);
}
if (in.oooThreads > 0) {
Atrip::kernelDimensions.ooo.threads = in.oooThreads;
}
if (in.oooBlocks > 0) {
Atrip::kernelDimensions.ooo.blocks = in.oooBlocks;
}
if (Atrip::kernelDimensions.ooo.threads <= 0 ||
Atrip::kernelDimensions.ooo.blocks <= 0) {
Atrip::kernelDimensions.ooo.blocks = No / 32 + No % 32;
Atrip::kernelDimensions.ooo.threads = 32;
}
LOG(0,"Atrip") << "ooo blocks: "
<< Atrip::kernelDimensions.ooo.blocks << "\n";
LOG(0,"Atrip") << "ooo threads per block: "
<< Atrip::kernelDimensions.ooo.threads << "\n";
#endif
// allocate the three scratches, see piecuch // allocate the three scratches, see piecuch
// we need local copies of the following tensors on every // we need local copies of the following tensors on every
// rank // rank
std::vector<F> _epsi(No) std::vector<F> _epsi(No), _epsa(Nv), _Tai(No * Nv);
, _epsa(Nv)
, _Tai(No * Nv)
;
// copy the data from the tensors into the vectors
in.ei->read_all(_epsi.data()); in.ei->read_all(_epsi.data());
in.ea->read_all(_epsa.data()); in.ea->read_all(_epsa.data());
in.Tph->read_all(_Tai.data()); in.Tph->read_all(_Tai.data());
//TODO: free memory pointers in the end of the algorithm
DataPtr<F> Tijk, Zijk;
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
DataPtr<F> Tai, epsi, epsa; DataPtr<F> Tai, epsi, epsa;
//TODO: free memory pointers in the end of the algorithm
cuMemAlloc(&Tai, sizeof(F) * _Tai.size()); cuMemAlloc(&Tai, sizeof(F) * _Tai.size());
cuMemAlloc(&epsi, sizeof(F) * _epsi.size()); cuMemAlloc(&epsi, sizeof(F) * _epsi.size());
cuMemAlloc(&epsa, sizeof(F) * _epsa.size()); cuMemAlloc(&epsa, sizeof(F) * _epsa.size());
@ -96,13 +170,12 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
cuMemcpyHtoD(epsi,(void*)_epsi.data(), sizeof(F) * _epsi.size()); cuMemcpyHtoD(epsi,(void*)_epsi.data(), sizeof(F) * _epsi.size());
cuMemcpyHtoD(epsa, (void*)_epsa.data(), sizeof(F) * _epsa.size()); cuMemcpyHtoD(epsa, (void*)_epsa.data(), sizeof(F) * _epsa.size());
DataPtr<F> Tijk, Zijk;
//TODO: free memory
cuMemAlloc(&Tijk, sizeof(F) * No * No * No); cuMemAlloc(&Tijk, sizeof(F) * No * No * No);
cuMemAlloc(&Zijk, sizeof(F) * No * No * No); cuMemAlloc(&Zijk, sizeof(F) * No * No * No);
#else #else
std::vector<F> &Tai = _Tai, &epsi = _epsi, &epsa = _epsa; std::vector<F> &Tai = _Tai, &epsi = _epsi, &epsa = _epsa;
std::vector<F> Tijk(No*No*No), Zijk(No*No*No); Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
#endif #endif
RankMap<F>::RANK_ROUND_ROBIN = in.rankRoundRobin; RankMap<F>::RANK_ROUND_ROBIN = in.rankRoundRobin;
@ -135,7 +208,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// BUILD SLICES PARAMETRIZED BY NV x NV =============================={{{1 // BUILD SLICES PARAMETRIZED BY NV x NV =============================={{{1
WITH_CHRONO("nv-nv-slices", WITH_CHRONO("nv-nv-slices",
LOG(0,"Atrip") << "BUILD NV x NV-SLICES\n"; LOG(0,"Atrip") << "building NV x NV slices\n";
ABPH<F> abph(*in.Vppph, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); ABPH<F> abph(*in.Vppph, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
ABHH<F> abhh(*in.Vpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); ABHH<F> abhh(*in.Vpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
TABHH<F> tabhh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); TABHH<F> tabhh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
@ -148,7 +221,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// BUILD SLICES PARAMETRIZED BY NV ==================================={{{1 // BUILD SLICES PARAMETRIZED BY NV ==================================={{{1
WITH_CHRONO("nv-slices", WITH_CHRONO("nv-slices",
LOG(0,"Atrip") << "BUILD NV-SLICES\n"; LOG(0,"Atrip") << "building NV slices\n";
TAPHH<F> taphh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); TAPHH<F> taphh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
HHHA<F> hhha(*in.Vhhhp, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); HHHA<F> hhha(*in.Vhhhp, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
) )
@ -373,9 +446,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
} }
} }
LOG(0, "AtripCUDA") << "Starting iterations\n";
for ( size_t for ( size_t
i = first_iteration, i = first_iteration,
iteration = first_iteration + 1 iteration = first_iteration + 1
@ -384,8 +454,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
) { ) {
Atrip::chrono["iterations"].start(); Atrip::chrono["iterations"].start();
LOG(0, "AtripCUDA") << "iteration " << i << "\n";
// check overhead from chrono over all iterations // check overhead from chrono over all iterations
WITH_CHRONO("start:stop", {}) WITH_CHRONO("start:stop", {})
@ -397,8 +465,8 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// write checkpoints // write checkpoints
// TODO: ENABLE THIS
if (iteration % checkpoint_mod == 0 && false) { if (iteration % checkpoint_mod == 0 && false) {
LOG(0, "AtripCUDA") << "checkpoints \n";
double globalEnergy = 0; double globalEnergy = 0;
MPI_Reduce(&energy, &globalEnergy, 1, MPI_DOUBLE, MPI_SUM, 0, universe); MPI_Reduce(&energy, &globalEnergy, 1, MPI_DOUBLE, MPI_SUM, 0, universe);
Checkpoint out Checkpoint out
@ -410,10 +478,9 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
iteration - 1, iteration - 1,
in.rankRoundRobin}; in.rankRoundRobin};
LOG(0, "Atrip") << "Writing checkpoint\n"; LOG(0, "Atrip") << "Writing checkpoint\n";
//if (Atrip::rank == 0) write_checkpoint(out, in.checkpointPath); if (Atrip::rank == 0) write_checkpoint(out, in.checkpointPath);
} }
LOG(0, "AtripCUDA") << "reporting \n";
// write reporting // write reporting
if (iteration % iterationMod == 0 || iteration == iteration1Percent) { if (iteration % iterationMod == 0 || iteration == iteration1Percent) {
@ -467,32 +534,20 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
<< "\n"; << "\n";
) )
LOG(0, "AtripCUDA") << "first database " << i << "\n";
// COMM FIRST DATABASE ================================================{{{1 // COMM FIRST DATABASE ================================================{{{1
if (i == first_iteration) { if (i == first_iteration) {
LOG(0, "AtripCUDA") << "first database " << i << "\n";
WITH_RANK << "__first__:first database ............ \n"; WITH_RANK << "__first__:first database ............ \n";
const auto db = communicateDatabase(abc, universe); const auto db = communicateDatabase(abc, universe);
LOG(0, "AtripCUDA") << "first database communicated" << i << "\n";
WITH_RANK << "__first__:first database communicated \n"; WITH_RANK << "__first__:first database communicated \n";
WITH_RANK << "__first__:first database io phase \n"; WITH_RANK << "__first__:first database io phase \n";
LOG(0, "AtripCUDA") << "doing io " << i << "\n";
doIOPhase(db); doIOPhase(db);
LOG(0, "AtripCUDA") << "io done " << i << "\n";
WITH_RANK << "__first__:first database io phase DONE\n"; WITH_RANK << "__first__:first database io phase DONE\n";
WITH_RANK << "__first__::::Unwrapping all slices for first database\n"; WITH_RANK << "__first__::::Unwrapping all slices for first database\n";
LOG(0, "AtripCUDA") << "unrwapping " << i << "\n";
for (auto& u: unions) u->unwrapAll(abc); for (auto& u: unions) u->unwrapAll(abc);
LOG(0, "AtripCUDA") << "unwrapped " << i << "\n";
WITH_RANK << "__first__::::Unwrapping slices for first database DONE\n"; WITH_RANK << "__first__::::Unwrapping slices for first database DONE\n";
LOG(0, "AtripCUDA") << "barrier " << i << "\n";
MPI_Barrier(universe); MPI_Barrier(universe);
LOG(0, "AtripCUDA") << "barriered " << i << "\n";
} }
LOG(0, "AtripCUDA") << "next database" << i << "\n";
// COMM NEXT DATABASE ================================================={{{1 // COMM NEXT DATABASE ================================================={{{1
if (abcNext) { if (abcNext) {
WITH_RANK << "__comm__:" << iteration << "th communicating database\n"; WITH_RANK << "__comm__:" << iteration << "th communicating database\n";
@ -508,9 +563,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// COMPUTE DOUBLES ===================================================={{{1 // COMPUTE DOUBLES ===================================================={{{1
OCD_Barrier(universe); OCD_Barrier(universe);
if (!isFakeTuple(i)) { if (!isFakeTuple(i)) {
LOG(0, "AtripCUDA") << "computing doubles " << i << "\n";
WITH_RANK << iteration << "-th doubles\n"; WITH_RANK << iteration << "-th doubles\n";
WITH_CHRONO("oneshot-unwrap", WITH_CHRONO("oneshot-unwrap",
WITH_CHRONO("unwrap", WITH_CHRONO("unwrap",
@ -542,11 +594,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
, tabhh.unwrapSlice(Slice<F>::AC, abc) , tabhh.unwrapSlice(Slice<F>::AC, abc)
, tabhh.unwrapSlice(Slice<F>::BC, abc) , tabhh.unwrapSlice(Slice<F>::BC, abc)
// -- TIJK // -- TIJK
#if defined(HAVE_CUDA)
, (DataFieldType<F>*)Tijk , (DataFieldType<F>*)Tijk
#else
, Tijk.data()
#endif
); );
WITH_RANK << iteration << "-th doubles done\n"; WITH_RANK << iteration << "-th doubles done\n";
)) ))
@ -563,16 +611,10 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
WITH_CHRONO("reorder", WITH_CHRONO("reorder",
int ooo = No*No*No, stride = 1; int ooo = No*No*No, stride = 1;
atrip::xcopy<F>(&ooo, atrip::xcopy<F>(&ooo,
#if defined(HAVE_CUDA)
(DataFieldType<F>*)Tijk, &stride, (DataFieldType<F>*)Tijk, &stride,
(DataFieldType<F>*)Zijk, &stride); (DataFieldType<F>*)Zijk, &stride);
#else
(DataFieldType<F>*)Tijk.data(), &stride,
(DataFieldType<F>*)Zijk.data(), &stride);
#endif
) )
WITH_CHRONO("singles", WITH_CHRONO("singles",
LOG(0, "AtripCUDA") << "doing singles" << i << "\n";
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
singlesContribution<F><<<1,1>>>( No, Nv, abc[0], abc[1], abc[2] singlesContribution<F><<<1,1>>>( No, Nv, abc[0], abc[1], abc[2]
, (DataFieldType<F>*)Tai , (DataFieldType<F>*)Tai
@ -583,13 +625,8 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
, (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB, abc) , (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB, abc)
, (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AC, abc) , (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AC, abc)
, (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::BC, abc) , (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::BC, abc)
#if defined(HAVE_CUDA)
, (DataFieldType<F>*)Zijk); , (DataFieldType<F>*)Zijk);
#else
, Zijk.data());
#endif
) )
LOG(0, "AtripCUDA") << "singles done" << i << "\n";
} }
@ -602,7 +639,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
if (abc[1] == abc[2]) distinct--; if (abc[1] == abc[2]) distinct--;
const F epsabc(_epsa[abc[0]] + _epsa[abc[1]] + _epsa[abc[2]]); const F epsabc(_epsa[abc[0]] + _epsa[abc[1]] + _epsa[abc[2]]);
LOG(0, "AtripCUDA") << "doing energy " << i << "distinct " << distinct << "\n"; // LOG(0, "AtripCUDA") << "doing energy " << i << "distinct " << distinct << "\n";
WITH_CHRONO("energy", WITH_CHRONO("energy",
/* /*
TODO: think about how to do this on the GPU in the best way possible TODO: think about how to do this on the GPU in the best way possible
@ -686,6 +723,17 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
} }
// END OF MAIN LOOP // END OF MAIN LOOP
#if defined(HAVE_CUDA)
cuMemFree(Tai);
cuMemFree(epsi);
cuMemFree(epsa);
cuMemFree(Tijk);
cuMemFree(Zijk);
#else
std::free(Zijk);
std::free(Tijk);
#endif
MPI_Barrier(universe); MPI_Barrier(universe);
// PRINT TUPLES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%{{{1 // PRINT TUPLES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%{{{1

View File

@ -80,7 +80,6 @@ namespace atrip {
typename DataField<Complex>::type *C, typename DataField<Complex>::type *C,
const int *ldc) { const int *ldc) {
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
#pragma warning HAVE_CUDA
cuDoubleComplex cuDoubleComplex
cu_alpha = {std::real(*alpha), std::imag(*alpha)}, cu_alpha = {std::real(*alpha), std::imag(*alpha)},
cu_beta = {std::real(*beta), std::imag(*beta)}; cu_beta = {std::real(*beta), std::imag(*beta)};

View File

@ -14,6 +14,7 @@
// [[file:~/cuda/atrip/atrip.org::*Prolog][Prolog:2]] // [[file:~/cuda/atrip/atrip.org::*Prolog][Prolog:2]]
#include<atrip/Equations.hpp> #include<atrip/Equations.hpp>
#include<atrip/CUDA.hpp>
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
#include <cuda.h> #include <cuda.h>
@ -22,6 +23,23 @@
namespace atrip { namespace atrip {
// Prolog:2 ends here // Prolog:2 ends here
// These are just help structures
// to help with the templating of reorder
// function
enum reordering_t
{
IJK,
IKJ,
JIK,
JKI,
KIJ,
KJI
};
template <typename F, reordering_t R>
struct reorder_proxy {};
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
namespace cuda { namespace cuda {
@ -110,11 +128,61 @@ namespace cuda {
return lz; return lz;
} }
}; };
#endif #endif
#if defined(HAVE_CUDA)
#define LIMS_KS \
size_t \
kmin = blockIdx.x * blockDim.x + threadIdx.x, \
k = kmin, \
idx = kmin * size * size * size \
; \
k < (kmin < size) ? kmin + 1 : size
#else
#define LIMS_KS size_t k=0, idx=0; k < size
#endif
#define _IJK_(i, j, k) i + j*size + k*size*size
#define _REORDER_BODY_(...) \
for (LIMS_KS ; k++) \
for (size_t j = 0; j < size; j++) \
for (size_t i = 0; i < size; i++, idx++) { \
__VA_ARGS__ \
}
#define _MAKE_REORDER_(_enum, ...) \
template <typename F> \
__MAYBE_GLOBAL__ \
void reorder(reorder_proxy< F, _enum > p, \
size_t size, F* to, F* from) { \
_REORDER_BODY_(__VA_ARGS__) \
}
#if defined(HAVE_CUDA)
#define GO(__TO, __FROM) cuda::sum_in_place<F>(&__TO, &__FROM);
#else
#define GO(__TO, __FROM) __TO += __FROM;
#endif
template <typename F, reordering_t R>
__MAYBE_GLOBAL__ \
void reorder(reorder_proxy<F, R> proxy,
size_t size, F* to, F* from);
_MAKE_REORDER_(IJK, GO(to[idx], from[_IJK_(i, j, k)]))
_MAKE_REORDER_(IKJ, GO(to[idx], from[_IJK_(i, k, j)]))
_MAKE_REORDER_(JIK, GO(to[idx], from[_IJK_(j, i, k)]))
_MAKE_REORDER_(JKI, GO(to[idx], from[_IJK_(j, k, i)]))
_MAKE_REORDER_(KIJ, GO(to[idx], from[_IJK_(k, i, j)]))
_MAKE_REORDER_(KJI, GO(to[idx], from[_IJK_(k, j, i)]))
#undef LIMS_KS
#undef _MAKE_REORDER
#undef _REORDER_BODY_
#undef _IJK_
#undef GO
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]] // [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]]
template <typename F> template <typename F>
double getEnergyDistinct double getEnergyDistinct
@ -274,10 +342,7 @@ double getEnergySame
// Energy:3 ends here // Energy:3 ends here
// [[file:~/cuda/atrip/atrip.org::*Singles%20contribution][Singles contribution:2]] // [[file:~/cuda/atrip/atrip.org::*Singles%20contribution][Singles contribution:2]]
template <typename F> template <typename F> __MAYBE_GLOBAL__
#ifdef HAVE_CUDA
__global__
#endif
void singlesContribution void singlesContribution
( size_t No ( size_t No
, size_t Nv , size_t Nv
@ -295,7 +360,7 @@ __global__
for (size_t k = 0; k < No; k++) for (size_t k = 0; k < No; k++)
for (size_t i = 0; i < No; i++) for (size_t i = 0; i < No; i++)
for (size_t j = 0; j < No; j++) { for (size_t j = 0; j < No; j++) {
const size_t ijk = i + j*No + k*No*No; const size_t ijk = i + j*No + k*NoNo;
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
# define GO(__TPH, __VABIJ) \ # define GO(__TPH, __VABIJ) \
@ -316,10 +381,7 @@ __global__
// instantiate // instantiate
template template __MAYBE_GLOBAL__
#ifdef HAVE_CUDA
__global__
#endif
void singlesContribution<double>( size_t No void singlesContribution<double>( size_t No
, size_t Nv , size_t Nv
, size_t a , size_t a
@ -332,10 +394,7 @@ __global__
, double* Zijk , double* Zijk
); );
template template __MAYBE_GLOBAL__
#ifdef HAVE_CUDA
__global__
#endif
void singlesContribution<Complex>( size_t No void singlesContribution<Complex>( size_t No
, size_t Nv , size_t Nv
, size_t a , size_t a
@ -380,18 +439,18 @@ __global__
) { ) {
const size_t a = abc[0], b = abc[1], c = abc[2] const size_t a = abc[0], b = abc[1], c = abc[2]
, NoNo = No*No, NoNv = No*Nv , NoNo = No*No
; ;
typename DataField<F>::type* Tijk = (typename DataField<F>::type*) Tijk_; DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_;
LOG(0, "AtripCUDA") << "in doubles " << "\n";
#if defined(ATRIP_USE_DGEMM) #if defined(ATRIP_USE_DGEMM)
#define _IJK_(i, j, k) i + j*No + k*NoNo
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
// TODO #define REORDER(__II, __JJ, __KK) \
#define REORDER(__II, __JJ, __KK) reorder<<< \
#define __TO_DEVICEPTR(_v) (_v) bs, ths \
>>>(reorder_proxy<DataFieldType<F>, __II ## __JJ ## __KK >{}, \
No, Tijk, _t_buffer);
#define DGEMM_PARTICLES(__A, __B) \ #define DGEMM_PARTICLES(__A, __B) \
atrip::xgemm<F>("T", \ atrip::xgemm<F>("T", \
"N", \ "N", \
@ -404,7 +463,7 @@ __global__
(DataFieldType<F>*)__B, \ (DataFieldType<F>*)__B, \
(int const*)&Nv, \ (int const*)&Nv, \
&zero, \ &zero, \
_t_buffer_p, \ _t_buffer, \
(int const*)&NoNo); (int const*)&NoNo);
#define DGEMM_HOLES(__A, __B, __TRANSB) \ #define DGEMM_HOLES(__A, __B, __TRANSB) \
atrip::xgemm<F>("N", \ atrip::xgemm<F>("N", \
@ -413,26 +472,24 @@ __global__
(int const*)&No, \ (int const*)&No, \
(int const*)&No, \ (int const*)&No, \
&m_one, \ &m_one, \
__TO_DEVICEPTR(__A), \ __A, \
(int const*)&NoNo, \ (int const*)&NoNo, \
(DataFieldType<F>*)__B, \ (DataFieldType<F>*)__B, \
(int const*)&No, \ (int const*)&No, \
&zero, \ &zero, \
_t_buffer_p, \ _t_buffer, \
(int const*)&NoNo \ (int const*)&NoNo \
); );
#define MAYBE_CONJ(_conj, _buffer) \ #define MAYBE_CONJ(_conj, _buffer) \
cuda::maybeConjugate<<<1,1>>>((DataFieldType<F>*)_conj, (DataFieldType<F>*)_buffer, NoNoNo); cuda::maybeConjugate<<< \
Atrip::kernelDimensions.ooo.blocks, \
Atrip::kernelDimensions.ooo.threads \
>>>((DataFieldType<F>*)_conj, (DataFieldType<F>*)_buffer, NoNoNo);
#else #else
// NONCUDA //////////////////////////////////////////////////////////////////////
#define REORDER(__II, __JJ, __KK) \ #define REORDER(__II, __JJ, __KK) \
WITH_CHRONO("doubles:reorder", \ reorder(reorder_proxy<DataFieldType<F>, __II ## __JJ ## __KK >{}, \
for (size_t k = 0; k < No; k++) \ No, Tijk, _t_buffer);
for (size_t j = 0; j < No; j++) \
for (size_t i = 0; i < No; i++) { \
Tijk[_IJK_(i, j, k)] += _t_buffer_p[_IJK_(__II, __JJ, __KK)]; \
} \
)
#define __TO_DEVICEPTR(_v) (_v)
#define DGEMM_PARTICLES(__A, __B) \ #define DGEMM_PARTICLES(__A, __B) \
atrip::xgemm<F>("T", \ atrip::xgemm<F>("T", \
"N", \ "N", \
@ -445,7 +502,7 @@ __global__
__B, \ __B, \
(int const*)&Nv, \ (int const*)&Nv, \
&zero, \ &zero, \
_t_buffer_p, \ _t_buffer, \
(int const*)&NoNo \ (int const*)&NoNo \
); );
#define DGEMM_HOLES(__A, __B, __TRANSB) \ #define DGEMM_HOLES(__A, __B, __TRANSB) \
@ -460,7 +517,7 @@ __global__
__B, \ __B, \
(int const*)&No, \ (int const*)&No, \
&zero, \ &zero, \
_t_buffer_p, \ _t_buffer, \
(int const*)&NoNo \ (int const*)&NoNo \
); );
#define MAYBE_CONJ(_conj, _buffer) \ #define MAYBE_CONJ(_conj, _buffer) \
@ -469,31 +526,33 @@ __global__
#endif #endif
F one{1.0}, m_one{-1.0}, zero{0.0}; F one{1.0}, m_one{-1.0}, zero{0.0};
DataFieldType<F> zero_h{0.0};
const size_t NoNoNo = No*NoNo; const size_t NoNoNo = No*NoNo;
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
DataFieldType<F>* _t_buffer; DataFieldType<F>* _t_buffer;
DataFieldType<F>* _vhhh; DataFieldType<F>* _vhhh;
LOG(0, "AtripCUDA") << "getting memory" << "\n";
cuMemAlloc((CUdeviceptr*)&_t_buffer, NoNoNo * sizeof(DataFieldType<F>)); cuMemAlloc((CUdeviceptr*)&_t_buffer, NoNoNo * sizeof(DataFieldType<F>));
cuMemAlloc((CUdeviceptr*)&_vhhh, NoNoNo * sizeof(DataFieldType<F>)); cuMemAlloc((CUdeviceptr*)&_vhhh, NoNoNo * sizeof(DataFieldType<F>));
LOG(0, "AtripCUDA") << "cuda::zeroing " << "\n"; const size_t
cuda::zeroing<<<1,1>>>((DataFieldType<F>*)_t_buffer, NoNoNo); bs = Atrip::kernelDimensions.ooo.blocks,
cuda::zeroing<<<1,1>>>((DataFieldType<F>*)_vhhh, NoNoNo); ths = Atrip::kernelDimensions.ooo.threads;
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
#else #else
F* _t_buffer = (F*)malloc(NoNoNo * sizeof(F)); DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
F* _vhhh = (F*)malloc(NoNoNo * sizeof(F)); DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
DataFieldType<F> zero_h{0.0};
for (size_t i=0; i < NoNoNo; i++) { for (size_t i=0; i < NoNoNo; i++) {
_t_buffer[i] = zero_h; _t_buffer[i] = zero_h;
_vhhh[i] = zero_h; _vhhh[i] = zero_h;
} }
#endif #endif
//_t_buffer.reserve(NoNoNo);
DataFieldType<F>* _t_buffer_p = __TO_DEVICEPTR(_t_buffer);
// Set Tijk to zero
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
LOG(0, "AtripCUDA") << "cuda::zeroing Tijk" << "\n"; WITH_CHRONO("double:reorder",
cuda::zeroing<<<1,1>>>((DataFieldType<F>*)Tijk, NoNoNo); cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk, NoNoNo);
// synchronize all initializations to zero
)
#else #else
WITH_CHRONO("double:reorder", WITH_CHRONO("double:reorder",
for (size_t k = 0; k < NoNoNo; k++) { for (size_t k = 0; k < NoNoNo; k++) {
@ -501,103 +560,89 @@ __global__
}) })
#endif #endif
LOG(0, "AtripCUDA") << "doing holes" << "\n"; // HOLES
// TOMERGE: replace chronos
WITH_CHRONO("doubles:holes", WITH_CHRONO("doubles:holes",
{ // Holes part %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {
// VhhhC[i + k*No + L*NoNo] * TABhh[L + j*No]; H1 // VhhhC[i + k*No + L*NoNo] * TABhh[L + j*No]; H1
LOG(0, "AtripCUDA") << "conj 1" << "\n";
MAYBE_CONJ(_vhhh, VhhhC) MAYBE_CONJ(_vhhh, VhhhC)
LOG(0, "AtripCUDA") << "done" << "\n";
WITH_CHRONO("doubles:holes:1", WITH_CHRONO("doubles:holes:1",
LOG(0, "AtripCUDA") << "dgemm 1" << "\n";
DGEMM_HOLES(_vhhh, TABhh, "N") DGEMM_HOLES(_vhhh, TABhh, "N")
LOG(0, "AtripCUDA") << "reorder 1" << "\n"; REORDER(I, K, J)
REORDER(i, k, j)
) )
// VhhhC[j + k*No + L*NoNo] * TABhh[i + L*No]; H0 // VhhhC[j + k*No + L*NoNo] * TABhh[i + L*No]; H0
WITH_CHRONO("doubles:holes:2", WITH_CHRONO("doubles:holes:2",
LOG(0, "AtripCUDA") << "dgemm 2" << "\n";
DGEMM_HOLES(_vhhh, TABhh, "T") DGEMM_HOLES(_vhhh, TABhh, "T")
REORDER(j, k, i) REORDER(J, K, I)
) )
// VhhhB[i + j*No + L*NoNo] * TAChh[L + k*No]; H5 // VhhhB[i + j*No + L*NoNo] * TAChh[L + k*No]; H5
LOG(0, "AtripCUDA") << "conj 2" << "\n";
MAYBE_CONJ(_vhhh, VhhhB) MAYBE_CONJ(_vhhh, VhhhB)
LOG(0, "AtripCUDA") << "done" << "\n";
WITH_CHRONO("doubles:holes:3", WITH_CHRONO("doubles:holes:3",
DGEMM_HOLES(_vhhh, TAChh, "N") DGEMM_HOLES(_vhhh, TAChh, "N")
REORDER(i, j, k) REORDER(I, J, K)
) )
// VhhhB[k + j*No + L*NoNo] * TAChh[i + L*No]; H3 // VhhhB[k + j*No + L*NoNo] * TAChh[i + L*No]; H3
WITH_CHRONO("doubles:holes:4", WITH_CHRONO("doubles:holes:4",
DGEMM_HOLES(_vhhh, TAChh, "T") DGEMM_HOLES(_vhhh, TAChh, "T")
REORDER(k, j, i) REORDER(K, J, I)
) )
// VhhhA[j + i*No + L*NoNo] * TBChh[L + k*No]; H1 // VhhhA[j + i*No + L*NoNo] * TBChh[L + k*No]; H1
LOG(0, "AtripCUDA") << "conj 3" << "\n";
MAYBE_CONJ(_vhhh, VhhhA) MAYBE_CONJ(_vhhh, VhhhA)
WITH_CHRONO("doubles:holes:5", WITH_CHRONO("doubles:holes:5",
DGEMM_HOLES(_vhhh, TBChh, "N") DGEMM_HOLES(_vhhh, TBChh, "N")
REORDER(j, i, k) REORDER(J, I, K)
) )
// VhhhA[k + i*No + L*NoNo] * TBChh[j + L*No]; H4 // VhhhA[k + i*No + L*NoNo] * TBChh[j + L*No]; H4
WITH_CHRONO("doubles:holes:6", WITH_CHRONO("doubles:holes:6",
DGEMM_HOLES(_vhhh, TBChh, "T") DGEMM_HOLES(_vhhh, TBChh, "T")
REORDER(k, i, j) REORDER(K, I, J)
) )
} }
) )
#undef MAYBE_CONJ #undef MAYBE_CONJ
LOG(0, "AtripCUDA") << "doing particles" << "\n"; // PARTICLES
WITH_CHRONO("doubles:particles", WITH_CHRONO("doubles:particles",
{ // Particle part %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {
// TAphh[E + i*Nv + j*NoNv] * VBCph[E + k*Nv]; P0 // TAphh[E + i*Nv + j*NoNv] * VBCph[E + k*Nv]; P0
WITH_CHRONO("doubles:particles:1", WITH_CHRONO("doubles:particles:1",
DGEMM_PARTICLES(TAphh, VBCph) DGEMM_PARTICLES(TAphh, VBCph)
REORDER(i, j, k) REORDER(I, J, K)
) )
// TAphh[E + i*Nv + k*NoNv] * VCBph[E + j*Nv]; P3 // TAphh[E + i*Nv + k*NoNv] * VCBph[E + j*Nv]; P3
WITH_CHRONO("doubles:particles:2", WITH_CHRONO("doubles:particles:2",
DGEMM_PARTICLES(TAphh, VCBph) DGEMM_PARTICLES(TAphh, VCBph)
REORDER(i, k, j) REORDER(I, K, J)
) )
// TCphh[E + k*Nv + i*NoNv] * VABph[E + j*Nv]; P5 // TCphh[E + k*Nv + i*NoNv] * VABph[E + j*Nv]; P5
WITH_CHRONO("doubles:particles:3", WITH_CHRONO("doubles:particles:3",
DGEMM_PARTICLES(TCphh, VABph) DGEMM_PARTICLES(TCphh, VABph)
REORDER(k, i, j) REORDER(K, I, J)
) )
// TCphh[E + k*Nv + j*NoNv] * VBAph[E + i*Nv]; P2 // TCphh[E + k*Nv + j*NoNv] * VBAph[E + i*Nv]; P2
WITH_CHRONO("doubles:particles:4", WITH_CHRONO("doubles:particles:4",
DGEMM_PARTICLES(TCphh, VBAph) DGEMM_PARTICLES(TCphh, VBAph)
REORDER(k, j, i) REORDER(K, J, I)
) )
// TBphh[E + j*Nv + i*NoNv] * VACph[E + k*Nv]; P1 // TBphh[E + j*Nv + i*NoNv] * VACph[E + k*Nv]; P1
WITH_CHRONO("doubles:particles:5", WITH_CHRONO("doubles:particles:5",
DGEMM_PARTICLES(TBphh, VACph) DGEMM_PARTICLES(TBphh, VACph)
REORDER(j, i, k) REORDER(J, I, K)
) )
// TBphh[E + j*Nv + k*NoNv] * VCAph[E + i*Nv]; P4 // TBphh[E + j*Nv + k*NoNv] * VCAph[E + i*Nv]; P4
WITH_CHRONO("doubles:particles:6", WITH_CHRONO("doubles:particles:6",
DGEMM_PARTICLES(TBphh, VCAph) DGEMM_PARTICLES(TBphh, VCAph)
REORDER(j, k, i) REORDER(J, K, I)
) )
} }
) )
LOG(0, "AtripCUDA") << "particles done" << "\n";
{ // free resources { // free resources
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
LOG(0, "AtripCUDA") << "free mem" << "\n"; cuCtxSynchronize();
cuMemFree((CUdeviceptr)_vhhh); cuMemFree((CUdeviceptr)_vhhh);
cuMemFree((CUdeviceptr)_t_buffer); cuMemFree((CUdeviceptr)_t_buffer);
LOG(0, "AtripCUDA") << "free mem done" << "\n";
#else #else
free(_vhhh); free(_vhhh);
free(_t_buffer); free(_t_buffer);
@ -607,8 +652,8 @@ __global__
#undef REORDER #undef REORDER
#undef DGEMM_HOLES #undef DGEMM_HOLES
#undef DGEMM_PARTICLES #undef DGEMM_PARTICLES
#undef _IJK_
#else #else
const size_t NoNv = No*Nv;
for (size_t k = 0; k < No; k++) for (size_t k = 0; k < No; k++)
for (size_t j = 0; j < No; j++) for (size_t j = 0; j < No; j++)
for (size_t i = 0; i < No; i++){ for (size_t i = 0; i < No; i++){