Compare commits

...

4 Commits

Author SHA1 Message Date
Gallo Alejandro
7241bbe9fb Implement reordering on the GPU 2022-08-12 18:32:32 +02:00
Gallo Alejandro
c2e9e930ba Update main Atrip.cxx using several gpus 2022-08-12 18:30:55 +02:00
Gallo Alejandro
b4aef4db9e Fix compilation issues and add KernelSizes 2022-08-12 18:29:21 +02:00
Gallo Alejandro
4651231d3b Update test bench for CUDA 2022-08-12 18:28:20 +02:00
7 changed files with 295 additions and 170 deletions

View File

@ -45,6 +45,16 @@ int main(int argc, char** argv) {
checkpoint_percentage,
"Percentage for checkpoints");
#if defined(HAVE_CUDA)
size_t ooo_threads = 0, ooo_blocks = 0;
app.add_option("--ooo-blocks",
ooo_blocks,
"CUDA: Number of blocks per block for kernels going through ooo tensors");
app.add_option("--ooo-threads",
ooo_threads,
"CUDA: Number of threads per block for kernels going through ooo tensors");
#endif
CLI11_PARSE(app, argc, argv);
CTF::World world(argc, argv);
@ -154,15 +164,24 @@ int main(int argc, char** argv) {
.with_checkpointAtPercentage(checkpoint_percentage)
.with_checkpointPath(checkpoint_path)
.with_readCheckpointIfExists(!noCheckpoint)
#if defined(HAVE_CUDA)
.with_oooThreads(ooo_threads)
.with_oooBlocks(ooo_blocks)
#endif
;
auto out = atrip::Atrip::run(in);
try {
auto out = atrip::Atrip::run(in);
if (atrip::Atrip::rank == 0)
std::cout << "Energy: " << out.energy << std::endl;
} catch (const char* msg) {
if (atrip::Atrip::rank == 0)
std::cout << "Atrip throwed with msg:\n\t\t " << msg << "\n";
}
if (!in.deleteVppph)
delete Vppph;
if (atrip::Atrip::rank == 0)
std::cout << "Energy: " << out.energy << std::endl;
MPI_Finalize();
return 0;

View File

@ -51,6 +51,9 @@ namespace atrip {
cublasHandle_t handle;
};
static CudaContext cuda;
static struct KernelDimensions {
struct {size_t blocks, threads;} ooo;
} kernelDimensions;
#endif
static void init(MPI_Comm);
@ -92,6 +95,10 @@ namespace atrip {
ADD_ATTRIBUTE(bool, writeCheckpoint, true)
ADD_ATTRIBUTE(float, checkpointAtPercentage, 10)
ADD_ATTRIBUTE(size_t, checkpointAtEveryIteration, 0)
#if defined(HAVE_CUDA)
ADD_ATTRIBUTE(size_t, oooThreads, 0)
ADD_ATTRIBUTE(size_t, oooBlocks, 0)
#endif
};

View File

@ -378,12 +378,16 @@ template <typename F=double>
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
for (auto& ptr: sliceBuffers)
for (auto& ptr: sliceBuffers) {
#if defined(HAVE_CUDA)
cuMemAlloc(&ptr, sizeof(F) * sources[0].size());
if (ptr == 0UL) {
throw "UNSUFICCIENT MEMORY ON THE GRAPHIC CARD FOR FREE POINTERS";
}
#else
ptr = (DataPtr<F>)malloc(sizeof(F) * sources[0].size());
#endif
}
slices
= std::vector<Slice<F>>(2 * sliceTypes.size(), { sources[0].size() });
@ -396,24 +400,12 @@ template <typename F=double>
LOG(1,"Atrip") << "rankMap.nSources "
<< rankMap.nSources() << "\n";
LOG(1,"Atrip") << "#slices "
<< slices.size() << "\n";
LOG(1,"Atrip") << "#slices[0] "
<< slices[0].size << "\n";
LOG(1,"Atrip") << "#sources "
<< sources.size() << "\n";
LOG(1,"Atrip") << "#sources[0] "
<< sources[0].size() << "\n";
LOG(1,"Atrip") << "#freePointers "
<< freePointers.size() << "\n";
LOG(1,"Atrip") << "#sliceBuffers "
<< sliceBuffers.size() << "\n";
LOG(1,"Atrip") << "#sliceLength "
<< sliceLength.size() << "\n";
LOG(1,"Atrip") << "#paramLength "
<< paramLength.size() << "\n";
LOG(1,"Atrip") << "#slices " << slices.size() << "\n";
WITH_RANK << "#slices[0] " << slices[0].size << "\n";
LOG(1,"Atrip") << "#sources " << sources.size() << "\n";
WITH_RANK << "#sources[0] " << sources[0].size() << "\n";
WITH_RANK << "#freePointers " << freePointers.size() << "\n";
LOG(1,"Atrip") << "#sliceBuffers " << sliceBuffers.size() << "\n";
LOG(1,"Atrip") << "GB*" << np << " "
<< double(sources.size() + sliceBuffers.size())
* sources[0].size()
@ -434,7 +426,8 @@ template <typename F=double>
__sliceLength.data(),
syms.data(),
w);
LOG(1,"Atrip") << "slicing... \n";
WITH_OCD WITH_RANK << "slicing... \n";
// setUp sources
for (size_t it(0); it < rankMap.nSources(); ++it) {

View File

@ -19,13 +19,27 @@
#include <map>
#include <chrono>
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wvla"
#pragma GCC diagnostic ignored "-Wint-in-bool-context"
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wdeprecated-copy"
#include <ctf.hpp>
#pragma GCC diagnostic pop
#if defined(__NVCC__)
# pragma nv_diagnostic_push
# if defined __NVCC_DIAG_PRAGMA_SUPPORT__
// http://www.ssl.berkeley.edu/~jimm/grizzly_docs/SSL/opt/intel/cc/9.0/lib/locale/en_US/mcpcom.msg
# pragma nv_diag_suppress partial_override
# else
# pragma diag_suppress partial_override
# endif
# include <ctf.hpp>
# pragma nv_diagnostic_pop
#else
# pragma GCC diagnostic push
# pragma GCC diagnostic ignored "-Wvla"
# pragma GCC diagnostic ignored "-Wnonnull"
# pragma GCC diagnostic ignored "-Wall"
# pragma GCC diagnostic ignored "-Wint-in-bool-context"
# pragma GCC diagnostic ignored "-Wunused-parameter"
# pragma GCC diagnostic ignored "-Wdeprecated-copy"
# include <ctf.hpp>
# pragma GCC diagnostic pop
#endif
#include <atrip/Debug.hpp>

View File

@ -24,13 +24,7 @@
using namespace atrip;
#if defined(HAVE_CUDA)
namespace atrip {
namespace cuda {
};
};
#include <cuda.h>
#endif
template <typename F> bool RankMap<F>::RANK_ROUND_ROBIN;
@ -40,6 +34,7 @@ size_t Atrip::rank;
size_t Atrip::np;
#if defined(HAVE_CUDA)
typename Atrip::CudaContext Atrip::cuda;
typename Atrip::KernelDimensions Atrip::kernelDimensions;
#endif
MPI_Comm Atrip::communicator;
Timings Atrip::chrono;
@ -74,20 +69,99 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
LOG(0,"Atrip") << "Nv: " << Nv << "\n";
LOG(0,"Atrip") << "np: " << np << "\n";
#if defined(HAVE_CUDA)
int ngcards;
cuDeviceGetCount(&ngcards);
LOG(0,"Atrip") << "ngcards: " << ngcards << "\n";
if (np > ngcards) {
std::cerr << "ATRIP: You are running on more ranks than the number of graphic cards\n"
<< "You have " << ngcards << " cards at your disposal\n";
throw "";
}
if (np < ngcards) {
std::cerr << "You have " << ngcards << " cards at your disposal\n"
<< "You will be only using " << np << ", i.e., the nubmer of ranks.\n";
}
for (size_t _rank = 0; _rank < np; _rank++) {
if (rank == _rank) {
CUcontext ctx;
CUdevice dev;
CUdevprop prop;
struct { struct { size_t free, total; } avail; size_t total; } memory;
char *name = (char*)malloc(256);
// set current device
cuDeviceGet(&dev, rank);
cuCtxCreate(&ctx, 0, dev);
cuCtxSetCurrent(ctx);
// get information of the device
cuDeviceGetProperties(&prop, dev);
cuMemGetInfo(&memory.avail.free, &memory.avail.total);
cuDeviceGetName(name, 256, dev);
cuDeviceTotalMem(&memory.total, dev);
printf("\n"
"CUDA CARD RANK %d\n"
"=================\n"
"\tnumber: %1$d\n"
"\tname: %s\n"
"\tMem. clock rate (KHz): %d\n"
"\tShared Mem Per Block (KB): %f\n"
"\tAvail. Free/Total mem (GB): %f/%f\n"
"\tFree memory (GB): %f\n"
"\n",
Atrip::rank,
name,
prop.clockRate,
prop.sharedMemPerBlock / 1024.0,
memory.avail.free / 1024.0 / 1024.0 / 1024.0 ,
memory.avail.total / 1024.0 / 1024.0 / 1024.0,
memory.total / 1024.0 / 1024.0 / 1024.0
);
std::free((void*)name);
}
MPI_Barrier(universe);
}
if (in.oooThreads > 0) {
Atrip::kernelDimensions.ooo.threads = in.oooThreads;
}
if (in.oooBlocks > 0) {
Atrip::kernelDimensions.ooo.blocks = in.oooBlocks;
}
if (Atrip::kernelDimensions.ooo.threads <= 0 ||
Atrip::kernelDimensions.ooo.blocks <= 0) {
Atrip::kernelDimensions.ooo.blocks = No / 32 + No % 32;
Atrip::kernelDimensions.ooo.threads = 32;
}
LOG(0,"Atrip") << "ooo blocks: "
<< Atrip::kernelDimensions.ooo.blocks << "\n";
LOG(0,"Atrip") << "ooo threads per block: "
<< Atrip::kernelDimensions.ooo.threads << "\n";
#endif
// allocate the three scratches, see piecuch
// we need local copies of the following tensors on every
// rank
std::vector<F> _epsi(No)
, _epsa(Nv)
, _Tai(No * Nv)
;
std::vector<F> _epsi(No), _epsa(Nv), _Tai(No * Nv);
// copy the data from the tensors into the vectors
in.ei->read_all(_epsi.data());
in.ea->read_all(_epsa.data());
in.Tph->read_all(_Tai.data());
//TODO: free memory pointers in the end of the algorithm
DataPtr<F> Tijk, Zijk;
#if defined(HAVE_CUDA)
DataPtr<F> Tai, epsi, epsa;
//TODO: free memory pointers in the end of the algorithm
cuMemAlloc(&Tai, sizeof(F) * _Tai.size());
cuMemAlloc(&epsi, sizeof(F) * _epsi.size());
cuMemAlloc(&epsa, sizeof(F) * _epsa.size());
@ -96,13 +170,12 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
cuMemcpyHtoD(epsi,(void*)_epsi.data(), sizeof(F) * _epsi.size());
cuMemcpyHtoD(epsa, (void*)_epsa.data(), sizeof(F) * _epsa.size());
DataPtr<F> Tijk, Zijk;
//TODO: free memory
cuMemAlloc(&Tijk, sizeof(F) * No * No * No);
cuMemAlloc(&Zijk, sizeof(F) * No * No * No);
#else
std::vector<F> &Tai = _Tai, &epsi = _epsi, &epsa = _epsa;
std::vector<F> Tijk(No*No*No), Zijk(No*No*No);
Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
#endif
RankMap<F>::RANK_ROUND_ROBIN = in.rankRoundRobin;
@ -135,7 +208,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// BUILD SLICES PARAMETRIZED BY NV x NV =============================={{{1
WITH_CHRONO("nv-nv-slices",
LOG(0,"Atrip") << "BUILD NV x NV-SLICES\n";
LOG(0,"Atrip") << "building NV x NV slices\n";
ABPH<F> abph(*in.Vppph, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
ABHH<F> abhh(*in.Vpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
TABHH<F> tabhh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
@ -148,7 +221,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// BUILD SLICES PARAMETRIZED BY NV ==================================={{{1
WITH_CHRONO("nv-slices",
LOG(0,"Atrip") << "BUILD NV-SLICES\n";
LOG(0,"Atrip") << "building NV slices\n";
TAPHH<F> taphh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
HHHA<F> hhha(*in.Vhhhp, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
)
@ -373,9 +446,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
}
}
LOG(0, "AtripCUDA") << "Starting iterations\n";
for ( size_t
i = first_iteration,
iteration = first_iteration + 1
@ -384,8 +454,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
) {
Atrip::chrono["iterations"].start();
LOG(0, "AtripCUDA") << "iteration " << i << "\n";
// check overhead from chrono over all iterations
WITH_CHRONO("start:stop", {})
@ -397,8 +465,8 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// write checkpoints
// TODO: ENABLE THIS
if (iteration % checkpoint_mod == 0 && false) {
LOG(0, "AtripCUDA") << "checkpoints \n";
double globalEnergy = 0;
MPI_Reduce(&energy, &globalEnergy, 1, MPI_DOUBLE, MPI_SUM, 0, universe);
Checkpoint out
@ -410,10 +478,9 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
iteration - 1,
in.rankRoundRobin};
LOG(0, "Atrip") << "Writing checkpoint\n";
//if (Atrip::rank == 0) write_checkpoint(out, in.checkpointPath);
if (Atrip::rank == 0) write_checkpoint(out, in.checkpointPath);
}
LOG(0, "AtripCUDA") << "reporting \n";
// write reporting
if (iteration % iterationMod == 0 || iteration == iteration1Percent) {
@ -467,32 +534,20 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
<< "\n";
)
LOG(0, "AtripCUDA") << "first database " << i << "\n";
// COMM FIRST DATABASE ================================================{{{1
if (i == first_iteration) {
LOG(0, "AtripCUDA") << "first database " << i << "\n";
WITH_RANK << "__first__:first database ............ \n";
const auto db = communicateDatabase(abc, universe);
LOG(0, "AtripCUDA") << "first database communicated" << i << "\n";
WITH_RANK << "__first__:first database communicated \n";
WITH_RANK << "__first__:first database io phase \n";
LOG(0, "AtripCUDA") << "doing io " << i << "\n";
doIOPhase(db);
LOG(0, "AtripCUDA") << "io done " << i << "\n";
WITH_RANK << "__first__:first database io phase DONE\n";
WITH_RANK << "__first__::::Unwrapping all slices for first database\n";
LOG(0, "AtripCUDA") << "unrwapping " << i << "\n";
for (auto& u: unions) u->unwrapAll(abc);
LOG(0, "AtripCUDA") << "unwrapped " << i << "\n";
WITH_RANK << "__first__::::Unwrapping slices for first database DONE\n";
LOG(0, "AtripCUDA") << "barrier " << i << "\n";
MPI_Barrier(universe);
LOG(0, "AtripCUDA") << "barriered " << i << "\n";
}
LOG(0, "AtripCUDA") << "next database" << i << "\n";
// COMM NEXT DATABASE ================================================={{{1
if (abcNext) {
WITH_RANK << "__comm__:" << iteration << "th communicating database\n";
@ -508,9 +563,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// COMPUTE DOUBLES ===================================================={{{1
OCD_Barrier(universe);
if (!isFakeTuple(i)) {
LOG(0, "AtripCUDA") << "computing doubles " << i << "\n";
WITH_RANK << iteration << "-th doubles\n";
WITH_CHRONO("oneshot-unwrap",
WITH_CHRONO("unwrap",
@ -542,11 +594,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
, tabhh.unwrapSlice(Slice<F>::AC, abc)
, tabhh.unwrapSlice(Slice<F>::BC, abc)
// -- TIJK
#if defined(HAVE_CUDA)
, (DataFieldType<F>*)Tijk
#else
, Tijk.data()
#endif
);
WITH_RANK << iteration << "-th doubles done\n";
))
@ -563,16 +611,10 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
WITH_CHRONO("reorder",
int ooo = No*No*No, stride = 1;
atrip::xcopy<F>(&ooo,
#if defined(HAVE_CUDA)
(DataFieldType<F>*)Tijk, &stride,
(DataFieldType<F>*)Zijk, &stride);
#else
(DataFieldType<F>*)Tijk.data(), &stride,
(DataFieldType<F>*)Zijk.data(), &stride);
#endif
)
WITH_CHRONO("singles",
LOG(0, "AtripCUDA") << "doing singles" << i << "\n";
#if defined(HAVE_CUDA)
singlesContribution<F><<<1,1>>>( No, Nv, abc[0], abc[1], abc[2]
, (DataFieldType<F>*)Tai
@ -583,13 +625,8 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
, (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB, abc)
, (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AC, abc)
, (DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::BC, abc)
#if defined(HAVE_CUDA)
, (DataFieldType<F>*)Zijk);
#else
, Zijk.data());
#endif
)
LOG(0, "AtripCUDA") << "singles done" << i << "\n";
}
@ -602,7 +639,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
if (abc[1] == abc[2]) distinct--;
const F epsabc(_epsa[abc[0]] + _epsa[abc[1]] + _epsa[abc[2]]);
LOG(0, "AtripCUDA") << "doing energy " << i << "distinct " << distinct << "\n";
// LOG(0, "AtripCUDA") << "doing energy " << i << "distinct " << distinct << "\n";
WITH_CHRONO("energy",
/*
TODO: think about how to do this on the GPU in the best way possible
@ -686,6 +723,17 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
}
// END OF MAIN LOOP
#if defined(HAVE_CUDA)
cuMemFree(Tai);
cuMemFree(epsi);
cuMemFree(epsa);
cuMemFree(Tijk);
cuMemFree(Zijk);
#else
std::free(Zijk);
std::free(Tijk);
#endif
MPI_Barrier(universe);
// PRINT TUPLES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%{{{1

View File

@ -80,7 +80,6 @@ namespace atrip {
typename DataField<Complex>::type *C,
const int *ldc) {
#if defined(HAVE_CUDA)
#pragma warning HAVE_CUDA
cuDoubleComplex
cu_alpha = {std::real(*alpha), std::imag(*alpha)},
cu_beta = {std::real(*beta), std::imag(*beta)};

View File

@ -14,6 +14,7 @@
// [[file:~/cuda/atrip/atrip.org::*Prolog][Prolog:2]]
#include<atrip/Equations.hpp>
#include<atrip/CUDA.hpp>
#if defined(HAVE_CUDA)
#include <cuda.h>
@ -22,6 +23,23 @@
namespace atrip {
// Prolog:2 ends here
// These are just help structures
// to help with the templating of reorder
// function
enum reordering_t
{
IJK,
IKJ,
JIK,
JKI,
KIJ,
KJI
};
template <typename F, reordering_t R>
struct reorder_proxy {};
#ifdef HAVE_CUDA
namespace cuda {
@ -110,11 +128,61 @@ namespace cuda {
return lz;
}
};
#endif
#if defined(HAVE_CUDA)
#define LIMS_KS \
size_t \
kmin = blockIdx.x * blockDim.x + threadIdx.x, \
k = kmin, \
idx = kmin * size * size * size \
; \
k < (kmin < size) ? kmin + 1 : size
#else
#define LIMS_KS size_t k=0, idx=0; k < size
#endif
#define _IJK_(i, j, k) i + j*size + k*size*size
#define _REORDER_BODY_(...) \
for (LIMS_KS ; k++) \
for (size_t j = 0; j < size; j++) \
for (size_t i = 0; i < size; i++, idx++) { \
__VA_ARGS__ \
}
#define _MAKE_REORDER_(_enum, ...) \
template <typename F> \
__MAYBE_GLOBAL__ \
void reorder(reorder_proxy< F, _enum > p, \
size_t size, F* to, F* from) { \
_REORDER_BODY_(__VA_ARGS__) \
}
#if defined(HAVE_CUDA)
#define GO(__TO, __FROM) cuda::sum_in_place<F>(&__TO, &__FROM);
#else
#define GO(__TO, __FROM) __TO += __FROM;
#endif
template <typename F, reordering_t R>
__MAYBE_GLOBAL__ \
void reorder(reorder_proxy<F, R> proxy,
size_t size, F* to, F* from);
_MAKE_REORDER_(IJK, GO(to[idx], from[_IJK_(i, j, k)]))
_MAKE_REORDER_(IKJ, GO(to[idx], from[_IJK_(i, k, j)]))
_MAKE_REORDER_(JIK, GO(to[idx], from[_IJK_(j, i, k)]))
_MAKE_REORDER_(JKI, GO(to[idx], from[_IJK_(j, k, i)]))
_MAKE_REORDER_(KIJ, GO(to[idx], from[_IJK_(k, i, j)]))
_MAKE_REORDER_(KJI, GO(to[idx], from[_IJK_(k, j, i)]))
#undef LIMS_KS
#undef _MAKE_REORDER
#undef _REORDER_BODY_
#undef _IJK_
#undef GO
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]]
template <typename F>
double getEnergyDistinct
@ -274,10 +342,7 @@ double getEnergySame
// Energy:3 ends here
// [[file:~/cuda/atrip/atrip.org::*Singles%20contribution][Singles contribution:2]]
template <typename F>
#ifdef HAVE_CUDA
__global__
#endif
template <typename F> __MAYBE_GLOBAL__
void singlesContribution
( size_t No
, size_t Nv
@ -295,7 +360,7 @@ __global__
for (size_t k = 0; k < No; k++)
for (size_t i = 0; i < No; i++)
for (size_t j = 0; j < No; j++) {
const size_t ijk = i + j*No + k*No*No;
const size_t ijk = i + j*No + k*NoNo;
#ifdef HAVE_CUDA
# define GO(__TPH, __VABIJ) \
@ -316,10 +381,7 @@ __global__
// instantiate
template
#ifdef HAVE_CUDA
__global__
#endif
template __MAYBE_GLOBAL__
void singlesContribution<double>( size_t No
, size_t Nv
, size_t a
@ -332,10 +394,7 @@ __global__
, double* Zijk
);
template
#ifdef HAVE_CUDA
__global__
#endif
template __MAYBE_GLOBAL__
void singlesContribution<Complex>( size_t No
, size_t Nv
, size_t a
@ -380,18 +439,18 @@ __global__
) {
const size_t a = abc[0], b = abc[1], c = abc[2]
, NoNo = No*No, NoNv = No*Nv
, NoNo = No*No
;
typename DataField<F>::type* Tijk = (typename DataField<F>::type*) Tijk_;
LOG(0, "AtripCUDA") << "in doubles " << "\n";
DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_;
#if defined(ATRIP_USE_DGEMM)
#define _IJK_(i, j, k) i + j*No + k*NoNo
#if defined(HAVE_CUDA)
// TODO
#define REORDER(__II, __JJ, __KK)
#define __TO_DEVICEPTR(_v) (_v)
#define REORDER(__II, __JJ, __KK) \
reorder<<< \
bs, ths \
>>>(reorder_proxy<DataFieldType<F>, __II ## __JJ ## __KK >{}, \
No, Tijk, _t_buffer);
#define DGEMM_PARTICLES(__A, __B) \
atrip::xgemm<F>("T", \
"N", \
@ -403,9 +462,9 @@ __global__
(int const*)&Nv, \
(DataFieldType<F>*)__B, \
(int const*)&Nv, \
&zero, \
_t_buffer_p, \
(int const*)&NoNo);
&zero, \
_t_buffer, \
(int const*)&NoNo);
#define DGEMM_HOLES(__A, __B, __TRANSB) \
atrip::xgemm<F>("N", \
__TRANSB, \
@ -413,26 +472,24 @@ __global__
(int const*)&No, \
(int const*)&No, \
&m_one, \
__TO_DEVICEPTR(__A), \
__A, \
(int const*)&NoNo, \
(DataFieldType<F>*)__B, \
(int const*)&No, \
&zero, \
_t_buffer_p, \
_t_buffer, \
(int const*)&NoNo \
);
);
#define MAYBE_CONJ(_conj, _buffer) \
cuda::maybeConjugate<<<1,1>>>((DataFieldType<F>*)_conj, (DataFieldType<F>*)_buffer, NoNoNo);
cuda::maybeConjugate<<< \
Atrip::kernelDimensions.ooo.blocks, \
Atrip::kernelDimensions.ooo.threads \
>>>((DataFieldType<F>*)_conj, (DataFieldType<F>*)_buffer, NoNoNo);
#else
#define REORDER(__II, __JJ, __KK) \
WITH_CHRONO("doubles:reorder", \
for (size_t k = 0; k < No; k++) \
for (size_t j = 0; j < No; j++) \
for (size_t i = 0; i < No; i++) { \
Tijk[_IJK_(i, j, k)] += _t_buffer_p[_IJK_(__II, __JJ, __KK)]; \
} \
)
#define __TO_DEVICEPTR(_v) (_v)
// NONCUDA //////////////////////////////////////////////////////////////////////
#define REORDER(__II, __JJ, __KK) \
reorder(reorder_proxy<DataFieldType<F>, __II ## __JJ ## __KK >{}, \
No, Tijk, _t_buffer);
#define DGEMM_PARTICLES(__A, __B) \
atrip::xgemm<F>("T", \
"N", \
@ -445,7 +502,7 @@ __global__
__B, \
(int const*)&Nv, \
&zero, \
_t_buffer_p, \
_t_buffer, \
(int const*)&NoNo \
);
#define DGEMM_HOLES(__A, __B, __TRANSB) \
@ -460,8 +517,8 @@ __global__
__B, \
(int const*)&No, \
&zero, \
_t_buffer_p, \
(int const*)&NoNo \
_t_buffer, \
(int const*)&NoNo \
);
#define MAYBE_CONJ(_conj, _buffer) \
for (size_t __i = 0; __i < NoNoNo; ++__i) \
@ -469,31 +526,33 @@ __global__
#endif
F one{1.0}, m_one{-1.0}, zero{0.0};
DataFieldType<F> zero_h{0.0};
const size_t NoNoNo = No*NoNo;
#ifdef HAVE_CUDA
DataFieldType<F>* _t_buffer;
DataFieldType<F>* _vhhh;
LOG(0, "AtripCUDA") << "getting memory" << "\n";
cuMemAlloc((CUdeviceptr*)&_t_buffer, NoNoNo * sizeof(DataFieldType<F>));
cuMemAlloc((CUdeviceptr*)&_vhhh, NoNoNo * sizeof(DataFieldType<F>));
LOG(0, "AtripCUDA") << "cuda::zeroing " << "\n";
cuda::zeroing<<<1,1>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
cuda::zeroing<<<1,1>>>((DataFieldType<F>*)_vhhh, NoNoNo);
const size_t
bs = Atrip::kernelDimensions.ooo.blocks,
ths = Atrip::kernelDimensions.ooo.threads;
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
#else
F* _t_buffer = (F*)malloc(NoNoNo * sizeof(F));
F* _vhhh = (F*)malloc(NoNoNo * sizeof(F));
DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
DataFieldType<F> zero_h{0.0};
for (size_t i=0; i < NoNoNo; i++) {
_t_buffer[i] = zero_h;
_vhhh[i] = zero_h;
}
#endif
//_t_buffer.reserve(NoNoNo);
DataFieldType<F>* _t_buffer_p = __TO_DEVICEPTR(_t_buffer);
// Set Tijk to zero
#ifdef HAVE_CUDA
LOG(0, "AtripCUDA") << "cuda::zeroing Tijk" << "\n";
cuda::zeroing<<<1,1>>>((DataFieldType<F>*)Tijk, NoNoNo);
WITH_CHRONO("double:reorder",
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk, NoNoNo);
// synchronize all initializations to zero
)
#else
WITH_CHRONO("double:reorder",
for (size_t k = 0; k < NoNoNo; k++) {
@ -501,103 +560,89 @@ __global__
})
#endif
LOG(0, "AtripCUDA") << "doing holes" << "\n";
// TOMERGE: replace chronos
// HOLES
WITH_CHRONO("doubles:holes",
{ // Holes part %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
{
// VhhhC[i + k*No + L*NoNo] * TABhh[L + j*No]; H1
LOG(0, "AtripCUDA") << "conj 1" << "\n";
MAYBE_CONJ(_vhhh, VhhhC)
LOG(0, "AtripCUDA") << "done" << "\n";
WITH_CHRONO("doubles:holes:1",
LOG(0, "AtripCUDA") << "dgemm 1" << "\n";
DGEMM_HOLES(_vhhh, TABhh, "N")
LOG(0, "AtripCUDA") << "reorder 1" << "\n";
REORDER(i, k, j)
REORDER(I, K, J)
)
// VhhhC[j + k*No + L*NoNo] * TABhh[i + L*No]; H0
WITH_CHRONO("doubles:holes:2",
LOG(0, "AtripCUDA") << "dgemm 2" << "\n";
DGEMM_HOLES(_vhhh, TABhh, "T")
REORDER(j, k, i)
REORDER(J, K, I)
)
// VhhhB[i + j*No + L*NoNo] * TAChh[L + k*No]; H5
LOG(0, "AtripCUDA") << "conj 2" << "\n";
MAYBE_CONJ(_vhhh, VhhhB)
LOG(0, "AtripCUDA") << "done" << "\n";
WITH_CHRONO("doubles:holes:3",
DGEMM_HOLES(_vhhh, TAChh, "N")
REORDER(i, j, k)
REORDER(I, J, K)
)
// VhhhB[k + j*No + L*NoNo] * TAChh[i + L*No]; H3
WITH_CHRONO("doubles:holes:4",
DGEMM_HOLES(_vhhh, TAChh, "T")
REORDER(k, j, i)
REORDER(K, J, I)
)
// VhhhA[j + i*No + L*NoNo] * TBChh[L + k*No]; H1
LOG(0, "AtripCUDA") << "conj 3" << "\n";
MAYBE_CONJ(_vhhh, VhhhA)
WITH_CHRONO("doubles:holes:5",
DGEMM_HOLES(_vhhh, TBChh, "N")
REORDER(j, i, k)
REORDER(J, I, K)
)
// VhhhA[k + i*No + L*NoNo] * TBChh[j + L*No]; H4
WITH_CHRONO("doubles:holes:6",
DGEMM_HOLES(_vhhh, TBChh, "T")
REORDER(k, i, j)
REORDER(K, I, J)
)
}
)
#undef MAYBE_CONJ
LOG(0, "AtripCUDA") << "doing particles" << "\n";
// PARTICLES
WITH_CHRONO("doubles:particles",
{ // Particle part %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
{
// TAphh[E + i*Nv + j*NoNv] * VBCph[E + k*Nv]; P0
WITH_CHRONO("doubles:particles:1",
DGEMM_PARTICLES(TAphh, VBCph)
REORDER(i, j, k)
REORDER(I, J, K)
)
// TAphh[E + i*Nv + k*NoNv] * VCBph[E + j*Nv]; P3
WITH_CHRONO("doubles:particles:2",
DGEMM_PARTICLES(TAphh, VCBph)
REORDER(i, k, j)
REORDER(I, K, J)
)
// TCphh[E + k*Nv + i*NoNv] * VABph[E + j*Nv]; P5
WITH_CHRONO("doubles:particles:3",
DGEMM_PARTICLES(TCphh, VABph)
REORDER(k, i, j)
REORDER(K, I, J)
)
// TCphh[E + k*Nv + j*NoNv] * VBAph[E + i*Nv]; P2
WITH_CHRONO("doubles:particles:4",
DGEMM_PARTICLES(TCphh, VBAph)
REORDER(k, j, i)
REORDER(K, J, I)
)
// TBphh[E + j*Nv + i*NoNv] * VACph[E + k*Nv]; P1
WITH_CHRONO("doubles:particles:5",
DGEMM_PARTICLES(TBphh, VACph)
REORDER(j, i, k)
REORDER(J, I, K)
)
// TBphh[E + j*Nv + k*NoNv] * VCAph[E + i*Nv]; P4
WITH_CHRONO("doubles:particles:6",
DGEMM_PARTICLES(TBphh, VCAph)
REORDER(j, k, i)
REORDER(J, K, I)
)
}
)
LOG(0, "AtripCUDA") << "particles done" << "\n";
{ // free resources
#ifdef HAVE_CUDA
LOG(0, "AtripCUDA") << "free mem" << "\n";
cuCtxSynchronize();
cuMemFree((CUdeviceptr)_vhhh);
cuMemFree((CUdeviceptr)_t_buffer);
LOG(0, "AtripCUDA") << "free mem done" << "\n";
#else
free(_vhhh);
free(_t_buffer);
@ -607,8 +652,8 @@ __global__
#undef REORDER
#undef DGEMM_HOLES
#undef DGEMM_PARTICLES
#undef _IJK_
#else
const size_t NoNv = No*Nv;
for (size_t k = 0; k < No; k++)
for (size_t j = 0; j < No; j++)
for (size_t i = 0; i < No; i++){