4 Commits

5 changed files with 94 additions and 68 deletions

View File

@@ -24,15 +24,6 @@ namespace acc {
// cuda kernels // cuda kernels
template <typename F>
__MAYBE_GLOBAL__
void zeroing(F* a, size_t n) {
F zero = {0};
for (size_t i = 0; i < n; i++) {
a[i] = zero;
}
}
//// ////
template <typename F> template <typename F>
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__ __MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__

View File

@@ -387,6 +387,22 @@ template <typename F=double>
} }
} }
static size_t
getSize(const std::vector<size_t> sliceLength,
const std::vector<size_t> paramLength,
const size_t np,
const MPI_Comm global_world) {
const RankMap<F> rankMap(paramLength, np, global_world);
const size_t
nSources = rankMap.nSources(),
sliceSize = std::accumulate(sliceLength.begin(),
sliceLength.end(),
1UL,
std::multiplies<size_t>());
return nSources * sliceSize;
}
// CONSTRUCTOR // CONSTRUCTOR
SliceUnion( std::vector<typename Slice<F>::Type> sliceTypes_ SliceUnion( std::vector<typename Slice<F>::Type> sliceTypes_
, std::vector<size_t> sliceLength_ , std::vector<size_t> sliceLength_

View File

@@ -160,9 +160,9 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
LOG(0,"Atrip") << "ooo blocks: " LOG(0,"Atrip") << "ooo blocks: "
<< Atrip::kernelDimensions.ooo.blocks << "\n"; << Atrip::kernelDimensions.ooo.blocks << "\n";
LOG(0,"Atrip") << "ooo threads per block: " LOG(0,"Atrip") << "ooo threads per block: "
<< Atrip::kernelDimensions.ooo.threads << "\n"; << Atrip::kernelDimensions.ooo.threads << "\n";
#endif #endif
// allocate the three scratches, see piecuch // allocate the three scratches, see piecuch
@@ -235,11 +235,54 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
MPI_Comm_size(child_comm, &child_size); MPI_Comm_size(child_comm, &child_size);
} }
// a, b, c, d, e, f and P => Nv
// H => No
// total_source_sizes contains a list of the number of elements
// in all sources of every tensor union, therefore nSlices * sliceSize
const std::vector<size_t> total_source_sizes = {
// ABPH
SliceUnion<F>::getSize({Nv, No}, {Nv, Nv}, (size_t)np, universe),
// ABHH
SliceUnion<F>::getSize({No, No}, {Nv, Nv}, (size_t)np, universe),
// TABHH
SliceUnion<F>::getSize({No, No}, {Nv, Nv}, (size_t)np, universe),
// TAPHH
SliceUnion<F>::getSize({Nv, No, No}, {Nv}, (size_t)np, universe),
// HHHA
SliceUnion<F>::getSize({No, No, No}, {Nv}, (size_t)np, universe),
};
const size_t
total_source_size = sizeof(DataFieldType<F>)
* std::accumulate(total_source_sizes.begin(),
total_source_sizes.end(),
0UL);
#if defined(HAVE_CUDA)
DataPtr<F> all_sources_pointer;
cuMemAlloc(&all_sources_pointer, total_source_size);
#else
DataPtr<F>
all_sources_pointer = (DataPtr<F>)malloc(total_source_size);
#endif
size_t _source_pointer_idx = 0;
// BUILD SLICES PARAMETRIZED BY NV x NV =============================={{{1 // BUILD SLICES PARAMETRIZED BY NV x NV =============================={{{1
WITH_CHRONO("nv-nv-slices", WITH_CHRONO("nv-nv-slices",
LOG(0,"Atrip") << "building NV x NV slices\n"; LOG(0,"Atrip") << "building NV x NV slices\n";
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
ABPH<F> abph(*in.Vppph, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); ABPH<F> abph(*in.Vppph, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
ABHH<F> abhh(*in.Vpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); ABHH<F> abhh(*in.Vpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
TABHH<F> tabhh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); TABHH<F> tabhh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
) )
@@ -251,7 +294,13 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// BUILD SLICES PARAMETRIZED BY NV ==================================={{{1 // BUILD SLICES PARAMETRIZED BY NV ==================================={{{1
WITH_CHRONO("nv-slices", WITH_CHRONO("nv-slices",
LOG(0,"Atrip") << "building NV slices\n"; LOG(0,"Atrip") << "building NV slices\n";
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
TAPHH<F> taphh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); TAPHH<F> taphh(*in.Tpphh, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
// TODO
// DataPtr<F> offseted_pointer = all_sources_pointer
// * total_source_sizes[_source_pointer_idx++];
HHHA<F> hhha(*in.Vhhhp, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe); HHHA<F> hhha(*in.Vhhhp, (size_t)No, (size_t)Nv, (size_t)np, child_comm, universe);
) )

View File

@@ -21,11 +21,6 @@ namespace atrip {
template <> double maybeConjugate(const double a) { return a; } template <> double maybeConjugate(const double a) { return a; }
template <> Complex maybeConjugate(const Complex a) { return std::conj(a); } template <> Complex maybeConjugate(const Complex a) { return std::conj(a); }
#if defined(HAVE_CUDA)
#endif
namespace traits { namespace traits {
template <typename F> bool isComplex() { return false; } template <typename F> bool isComplex() { return false; }
template <> bool isComplex<double>() { return false; } template <> bool isComplex<double>() { return false; }

View File

@@ -13,6 +13,8 @@
// limitations under the License. // limitations under the License.
// [[file:~/cuda/atrip/atrip.org::*Prolog][Prolog:2]] // [[file:~/cuda/atrip/atrip.org::*Prolog][Prolog:2]]
#include <cstring>
#include<atrip/Equations.hpp> #include<atrip/Equations.hpp>
#include<atrip/CUDA.hpp> #include<atrip/CUDA.hpp>
@@ -26,7 +28,7 @@ namespace atrip {
#if defined(HAVE_CUDA) #if defined(HAVE_CUDA)
#define FOR_K() \ #define FOR_K() \
const size_t k = blockIdx.x * blockDim.x + threadIdx.x; \ const size_t k = blockIdx.x * blockDim.x + threadIdx.x; \
size_t idx = 0; size_t idx = k*size*size;
#else #else
#define FOR_K() for (size_t k=0, idx=0; k < size; k++) #define FOR_K() for (size_t k=0, idx=0; k < size; k++)
#endif #endif
@@ -580,13 +582,8 @@ void getEnergySame
) )
#define MAYBE_CONJ(_conj, _buffer) \ #define MAYBE_CONJ(_conj, _buffer) \
do { \ do { \
acc::maybeConjugate<<< \ acc::maybeConjugate<<<1, 1 \
\ >>>((DataFieldType<F>*)_conj, \
Atrip::kernelDimensions.ooo.blocks, \
\
Atrip::kernelDimensions.ooo.threads \
\
>>>((DataFieldType<F>*)_conj, \
(DataFieldType<F>*)_buffer, \ (DataFieldType<F>*)_buffer, \
NoNoNo); \ NoNoNo); \
} while (0) } while (0)
@@ -637,61 +634,39 @@ void getEnergySame
_t_buffer, \ _t_buffer, \
(int const*)&NoNo \ (int const*)&NoNo \
) )
#define MAYBE_CONJ(_conj, _buffer) \ #define MAYBE_CONJ(_conj, _buffer) \
do { \ acc::maybeConjugate((DataFieldType<F>*)_conj, \
for (size_t __i = 0; __i < NoNoNo; ++__i) { \ (DataFieldType<F>*)_buffer,\
_conj[__i] \ NoNoNo);
= maybeConjugate<F>(_buffer[__i]); \
} \
} while (0)
#endif #endif
F one{1.0}, m_one{-1.0}, zero{0.0}; F one{1.0}, m_one{-1.0}, zero{0.0};
const size_t NoNoNo = No*NoNo; const size_t NoNoNo = No*NoNo;
// Zeroing vectors
#ifdef HAVE_CUDA #ifdef HAVE_CUDA
// DataFieldType<F>* _t_buffer;
// DataFieldType<F>* _vhhh;
// WITH_CHRONO("double:cuda:alloc",
// _CHECK_CUDA_SUCCESS("Allocating _t_buffer",
// cuMemAlloc((CUdeviceptr*)&_t_buffer,
// NoNoNo * sizeof(DataFieldType<F>)));
// _CHECK_CUDA_SUCCESS("Allocating _vhhh",
// cuMemAlloc((CUdeviceptr*)&_vhhh,
// NoNoNo * sizeof(DataFieldType<F>)));
// )
#if !defined(ATRIP_ONLY_DGEMM) #if !defined(ATRIP_ONLY_DGEMM)
// we still have to zero this {
const size_t const size_t elements = NoNoNo * sizeof(DataFieldType<F>)/4;
bs = Atrip::kernelDimensions.ooo.blocks, WITH_CHRONO("double:zeroing",
ths = Atrip::kernelDimensions.ooo.threads; _CHECK_CUDA_SUCCESS("Zeroing Tijk",
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo); cuMemsetD32_v2((CUdeviceptr)Tijk, 0x00, elements));
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo); _CHECK_CUDA_SUCCESS("Zeroing t buffer",
cuMemsetD32_v2((CUdeviceptr)_t_buffer, 0x00, elements));
_CHECK_CUDA_SUCCESS("Zeroing vhhh buffer",
cuMemsetD32_v2((CUdeviceptr)_vhhh, 0x00, elements));
)
}
#endif #endif
#else #else
DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F)); DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F)); DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
DataFieldType<F> zero_h{0.0}; std::memset((void*)_t_buffer, 0x00, NoNoNo * sizeof(DataFieldType<F>));
for (size_t i=0; i < NoNoNo; i++) { std::memset((void*)_vhhh, 0x00, NoNoNo * sizeof(DataFieldType<F>));
_t_buffer[i] = zero_h; std::memset((void*)Tijk, 0x00, NoNoNo * sizeof(DataFieldType<F>));
_vhhh[i] = zero_h; #endif /* HAVE_CUDA */
}
#endif
// Set Tijk to zero
#if defined(HAVE_CUDA) && !defined(ATRIP_ONLY_DGEMM)
WITH_CHRONO("double:reorder",
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk,
NoNoNo);
)
#endif
#if !defined(HAVE_CUDA)
WITH_CHRONO("double:reorder",
for (size_t k = 0; k < NoNoNo; k++) {
Tijk[k] = DataFieldType<F>{0.0};
})
#endif /* !defined(HAVE_CUDA) */
#if defined(ATRIP_ONLY_DGEMM) #if defined(ATRIP_ONLY_DGEMM)