Compare commits
No commits in common. "ad542fe856e99bb2ee2d7a813cc6c13510d31356" and "6143d1ae735bf5c24d680be1fe5adf1af403178d" have entirely different histories.
ad542fe856
...
6143d1ae73
@ -13,9 +13,7 @@
|
||||
(format "%s/include/" root)
|
||||
(format "%s/" root)
|
||||
(format "%s/bench/" root)
|
||||
(format "%s/build/main/" root)))
|
||||
(setq-local flycheck-clang-include-path
|
||||
flycheck-gcc-include-path)))
|
||||
(format "%s/build/main/" root)))))
|
||||
(eval . (flycheck-mode))
|
||||
(eval . (outline-minor-mode))
|
||||
(indent-tabs-mode . nil)
|
||||
|
||||
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
@ -2,6 +2,8 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, cuda ]
|
||||
pull_request:
|
||||
branches: [ master, cuda ]
|
||||
|
||||
|
||||
@ -19,7 +19,7 @@ BENCHES_LDADD = $(ATRIP_LIB) $(ATRIP_CTF)
|
||||
## main entry point and bench
|
||||
##
|
||||
bin_PROGRAMS += atrip
|
||||
atrip_SOURCES = main.cxx
|
||||
atrip_SOURCES = test_main.cxx
|
||||
atrip_CPPFLAGS = $(AM_CPPFLAGS)
|
||||
atrip_LDADD = $(BENCHES_LDADD)
|
||||
|
||||
|
||||
66
configure.ac
66
configure.ac
@ -21,6 +21,26 @@ AC_ARG_ENABLE(shared,
|
||||
files (default=YES)]),
|
||||
[], [enable_shared=yes])
|
||||
|
||||
AC_ARG_ENABLE(
|
||||
[slice],
|
||||
[AS_HELP_STRING(
|
||||
[--disable-slice],
|
||||
[Disable the step of slicing tensors for CTF, this is useful for example for benchmarking or testing.])],
|
||||
[atrip_dont_slice=1
|
||||
AC_DEFINE([ATRIP_DONT_SLICE],1,[Wether CTF will slice tensors or skip the step])
|
||||
],
|
||||
[atrip_dont_slice=0]
|
||||
)
|
||||
|
||||
AC_ARG_ENABLE(
|
||||
[atrip_dgemm],
|
||||
[AS_HELP_STRING(
|
||||
[--disable-dgemm],
|
||||
[Disable using dgemm for the doubles equations])],
|
||||
[],
|
||||
[AC_DEFINE([ATRIP_USE_DGEMM],1,[Use dgemm for the doubles equations])]
|
||||
)
|
||||
|
||||
|
||||
AC_ARG_ENABLE([docs],
|
||||
[AS_HELP_STRING([--enable-docs],
|
||||
@ -54,53 +74,13 @@ AC_ARG_VAR([NVCC], [Path to the nvidia cuda compiler.])
|
||||
AC_ARG_VAR([CUDA_LDFLAGS], [LDFLAGS to find libraries -lcuda, -lcudart, -lcublas.])
|
||||
AC_ARG_VAR([CUDA_CXXFLAGS], [CXXFLAGS to find the CUDA headers])
|
||||
|
||||
dnl -----------------------------------------------------------------------
|
||||
dnl ATRIP CPP DEFINES
|
||||
dnl -----------------------------------------------------------------------
|
||||
|
||||
AC_ARG_WITH([atrip-debug],
|
||||
[AS_HELP_STRING([--with-atrip-debug],
|
||||
[Debug level for atrip, possible values:
|
||||
1, 2, 3, 4])],
|
||||
[Debug level for atrip, possible values: 1, 2, 3, 4])],
|
||||
[AC_DEFINE([ATRIP_DEBUG],[atrip-debug],[Atrip debug level])],
|
||||
[AC_DEFINE([ATRIP_DEBUG],[1],[Atrip debug level])])
|
||||
|
||||
|
||||
AC_ARG_ENABLE([atrip_dgemm],
|
||||
[AS_HELP_STRING([--disable-dgemm],
|
||||
[Disable using dgemm for the doubles equations])],
|
||||
[],
|
||||
[AC_DEFINE([ATRIP_USE_DGEMM],
|
||||
1,
|
||||
[Use dgemm for the doubles equations])])
|
||||
|
||||
ATRIP_DEF([slice], [disable],
|
||||
[ATRIP_DONT_SLICE],
|
||||
[Disable the step of slicing tensors for CTF, this is useful
|
||||
for example for benchmarking or testing.])
|
||||
|
||||
ATRIP_DEF([only-dgemm], [enable],
|
||||
[ATRIP_ONLY_DGEMM],
|
||||
[Run only the parts of atrip that involve dgemm calls, this
|
||||
is useful for benchmarking and testing the code, it is
|
||||
intended for developers of Atrip.])
|
||||
|
||||
ATRIP_DEF([naive-slow], [enable],
|
||||
[ATRIP_NAIVE_SLOW],
|
||||
[Run slow but correct code for the mapping of (iteration,
|
||||
rank) to tuple of the naive tuple distribution.])
|
||||
|
||||
ATRIP_DEF([sources-in-gpu], [enable],
|
||||
[ATRIP_SOURCES_IN_GPU],
|
||||
[When using CUDA, activate storing all sources (slices of
|
||||
the input tensors) in the GPU. This means that a lot of GPUs
|
||||
will be needed.])
|
||||
|
||||
ATRIP_DEF([cuda-aware-mpi], [enable],
|
||||
[ATRIP_CUDA_AWARE_MPI],
|
||||
[When using MPI, assume support for CUDA aware mpi by the
|
||||
given MPI implementation.])
|
||||
|
||||
[AC_DEFINE([ATRIP_DEBUG],[1],[Atrip debug level])]
|
||||
)
|
||||
|
||||
dnl -----------------------------------------------------------------------
|
||||
|
||||
|
||||
@ -1,8 +0,0 @@
|
||||
AC_DEFUN([ATRIP_DEF],
|
||||
[AC_ARG_ENABLE([$1],
|
||||
[AS_HELP_STRING([--$2-$1],
|
||||
[$4])],
|
||||
[AC_DEFINE([$3],
|
||||
1,
|
||||
[$4])])])
|
||||
|
||||
@ -18,12 +18,6 @@
|
||||
#include <atrip/Slice.hpp>
|
||||
#include <atrip/RankMap.hpp>
|
||||
|
||||
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||
# define SOURCES_DATA(s) (s)
|
||||
#else
|
||||
# define SOURCES_DATA(s) (s).data()
|
||||
#endif
|
||||
|
||||
namespace atrip {
|
||||
// Prolog:1 ends here
|
||||
|
||||
@ -201,7 +195,7 @@ template <typename F=double>
|
||||
;
|
||||
if (blank.info.state == Slice<F>::SelfSufficient) {
|
||||
#if defined(HAVE_CUDA)
|
||||
const size_t _size = sizeof(F) * sliceSize;
|
||||
const size_t _size = sizeof(F) * sources[from.source].size();
|
||||
// TODO: this is code duplication with downstairs
|
||||
if (freePointers.size() == 0) {
|
||||
std::stringstream stream;
|
||||
@ -218,12 +212,12 @@ template <typename F=double>
|
||||
WITH_CHRONO("cuda:memcpy:self-sufficient",
|
||||
_CHECK_CUDA_SUCCESS("copying mpi data to device",
|
||||
cuMemcpyHtoD(blank.data,
|
||||
(void*)SOURCES_DATA(sources[from.source]),
|
||||
sizeof(F) * sliceSize));
|
||||
(void*)sources[from.source].data(),
|
||||
sizeof(F) * sources[from.source].size()));
|
||||
))
|
||||
|
||||
#else
|
||||
blank.data = SOURCES_DATA(sources[from.source]);
|
||||
blank.data = sources[from.source].data();
|
||||
#endif
|
||||
} else {
|
||||
if (freePointers.size() == 0) {
|
||||
@ -402,18 +396,15 @@ template <typename F=double>
|
||||
, world(child_world)
|
||||
, universe(global_world)
|
||||
, sliceLength(sliceLength_)
|
||||
, sliceSize(std::accumulate(sliceLength.begin(),
|
||||
sliceLength.end(),
|
||||
1UL, std::multiplies<size_t>()))
|
||||
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||
, sources(rankMap.nSources())
|
||||
#else
|
||||
, sources(rankMap.nSources(),
|
||||
std::vector<F>(sliceSize))
|
||||
#endif
|
||||
std::vector<F>
|
||||
(std::accumulate(sliceLength.begin(),
|
||||
sliceLength.end(),
|
||||
1UL, std::multiplies<size_t>())))
|
||||
, name(name_)
|
||||
, sliceTypes(sliceTypes_)
|
||||
, sliceBuffers(nSliceBuffers)
|
||||
//, slices(2 * sliceTypes.size(), Slice<F>{ sources[0].size() })
|
||||
{ // constructor begin
|
||||
|
||||
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
|
||||
@ -421,7 +412,7 @@ template <typename F=double>
|
||||
for (auto& ptr: sliceBuffers) {
|
||||
#if defined(HAVE_CUDA)
|
||||
const CUresult error =
|
||||
cuMemAlloc(&ptr, sizeof(F) * sliceSize);
|
||||
cuMemAlloc(&ptr, sizeof(F) * sources[0].size());
|
||||
if (ptr == 0UL) {
|
||||
throw "UNSUFICCIENT MEMORY ON THE GRAPHIC CARD FOR FREE POINTERS";
|
||||
}
|
||||
@ -432,12 +423,12 @@ template <typename F=double>
|
||||
throw s.str();
|
||||
}
|
||||
#else
|
||||
ptr = (DataPtr<F>)malloc(sizeof(F) * sliceSize);
|
||||
ptr = (DataPtr<F>)malloc(sizeof(F) * sources[0].size());
|
||||
#endif
|
||||
}
|
||||
|
||||
slices
|
||||
= std::vector<Slice<F>>(2 * sliceTypes.size(), { sliceSize });
|
||||
= std::vector<Slice<F>>(2 * sliceTypes.size(), { sources[0].size() });
|
||||
// TODO: think exactly ^------------------- about this number
|
||||
|
||||
// initialize the freePointers with the pointers to the buffers
|
||||
@ -450,12 +441,12 @@ template <typename F=double>
|
||||
LOG(1,"Atrip") << "#slices " << slices.size() << "\n";
|
||||
WITH_RANK << "#slices[0] " << slices[0].size << "\n";
|
||||
LOG(1,"Atrip") << "#sources " << sources.size() << "\n";
|
||||
WITH_RANK << "#sources[0] " << sliceSize << "\n";
|
||||
WITH_RANK << "#sources[0] " << sources[0].size() << "\n";
|
||||
WITH_RANK << "#freePointers " << freePointers.size() << "\n";
|
||||
LOG(1,"Atrip") << "#sliceBuffers " << sliceBuffers.size() << "\n";
|
||||
LOG(1,"Atrip") << "GB*" << np << " "
|
||||
<< double(sources.size() + sliceBuffers.size())
|
||||
* sliceSize
|
||||
* sources[0].size()
|
||||
* 8 * np
|
||||
/ 1073741824.0
|
||||
<< "\n";
|
||||
@ -504,13 +495,14 @@ template <typename F=double>
|
||||
if (otherRank == info.from.rank) sendData_p = false;
|
||||
if (!sendData_p) return;
|
||||
|
||||
MPI_Isend((void*)SOURCES_DATA(sources[info.from.source]),
|
||||
sliceSize,
|
||||
traits::mpi::datatypeOf<F>(),
|
||||
otherRank,
|
||||
tag,
|
||||
universe,
|
||||
&request);
|
||||
MPI_Isend( sources[info.from.source].data()
|
||||
, sources[info.from.source].size()
|
||||
, traits::mpi::datatypeOf<F>()
|
||||
, otherRank
|
||||
, tag
|
||||
, universe
|
||||
, &request
|
||||
);
|
||||
WITH_CRAZY_DEBUG
|
||||
WITH_RANK << "sent to " << otherRank << "\n";
|
||||
|
||||
@ -524,26 +516,25 @@ template <typename F=double>
|
||||
|
||||
if (Atrip::rank == info.from.rank) return;
|
||||
|
||||
if (slice.info.state == Slice<F>::Fetch) { // if-1
|
||||
if (slice.info.state == Slice<F>::Fetch) {
|
||||
// TODO: do it through the slice class
|
||||
slice.info.state = Slice<F>::Dispatched;
|
||||
#if defined(HAVE_CUDA)
|
||||
# if !defined(ATRIP_CUDA_AWARE_MPI) && defined(ATRIP_SOURCES_IN_GPU)
|
||||
# error "You need CUDA aware MPI to have slices on the GPU"
|
||||
# endif
|
||||
slice.mpi_data = (F*)malloc(sizeof(F) * slice.size);
|
||||
MPI_Irecv(slice.mpi_data,
|
||||
MPI_Irecv( slice.mpi_data
|
||||
#else
|
||||
MPI_Irecv(slice.data,
|
||||
MPI_Irecv( slice.data
|
||||
#endif
|
||||
slice.size,
|
||||
traits::mpi::datatypeOf<F>(),
|
||||
info.from.rank,
|
||||
tag,
|
||||
universe,
|
||||
&slice.request);
|
||||
} // if-1
|
||||
} // receive
|
||||
, slice.size
|
||||
, traits::mpi::datatypeOf<F>()
|
||||
, info.from.rank
|
||||
, tag
|
||||
, universe
|
||||
, &slice.request
|
||||
//, MPI_STATUS_IGNORE
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
void unwrapAll(ABCTuple const& abc) {
|
||||
for (auto type: sliceTypes) unwrapSlice(type, abc);
|
||||
@ -606,12 +597,7 @@ template <typename F=double>
|
||||
const MPI_Comm world;
|
||||
const MPI_Comm universe;
|
||||
const std::vector<size_t> sliceLength;
|
||||
const size_t sliceSize;
|
||||
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||
std::vector< DataPtr<F> > sources;
|
||||
#else
|
||||
std::vector< std::vector<F> > sources;
|
||||
#endif
|
||||
std::vector< Slice<F> > slices;
|
||||
typename Slice<F>::Name name;
|
||||
const std::vector<typename Slice<F>::Type> sliceTypes;
|
||||
|
||||
@ -19,14 +19,8 @@
|
||||
namespace atrip {
|
||||
|
||||
template <typename F=double>
|
||||
static
|
||||
void sliceIntoVector
|
||||
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||
( DataPtr<F> &source
|
||||
#else
|
||||
( std::vector<F> &source
|
||||
#endif
|
||||
, size_t sliceSize
|
||||
( std::vector<F> &v
|
||||
, CTF::Tensor<F> &toSlice
|
||||
, std::vector<int64_t> const low
|
||||
, std::vector<int64_t> const up
|
||||
@ -50,30 +44,18 @@ namespace atrip {
|
||||
<< "\n";
|
||||
|
||||
#ifndef ATRIP_DONT_SLICE
|
||||
toSlice.slice(toSlice_.low.data(),
|
||||
toSlice_.up.data(),
|
||||
0.0,
|
||||
origin,
|
||||
origin_.low.data(),
|
||||
origin_.up.data(),
|
||||
1.0);
|
||||
toSlice.slice( toSlice_.low.data()
|
||||
, toSlice_.up.data()
|
||||
, 0.0
|
||||
, origin
|
||||
, origin_.low.data()
|
||||
, origin_.up.data()
|
||||
, 1.0);
|
||||
memcpy(v.data(), toSlice.data, sizeof(F) * v.size());
|
||||
#else
|
||||
# pragma message("WARNING: COMPILING WITHOUT SLICING THE TENSORS")
|
||||
#endif
|
||||
|
||||
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||
WITH_CHRONO("cuda:sources",
|
||||
_CHECK_CUDA_SUCCESS("copying sources data to device",
|
||||
cuMemcpyHtoD(source,
|
||||
toSlice.data,
|
||||
sliceSize));
|
||||
)
|
||||
#else
|
||||
memcpy(source.data(),
|
||||
toSlice.data,
|
||||
sizeof(F) * sliceSize);
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
|
||||
@ -98,15 +80,16 @@ namespace atrip {
|
||||
|
||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override
|
||||
{
|
||||
const int Nv = this->sliceLength[0]
|
||||
, No = this->sliceLength[1]
|
||||
, a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it});
|
||||
;
|
||||
|
||||
const int
|
||||
Nv = this->sliceLength[0],
|
||||
No = this->sliceLength[1],
|
||||
a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it});
|
||||
|
||||
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||
to, {0, 0, 0}, {Nv, No, No},
|
||||
from, {a, 0, 0, 0}, {a+1, Nv, No, No});
|
||||
sliceIntoVector<F>( this->sources[it]
|
||||
, to, {0, 0, 0}, {Nv, No, No}
|
||||
, from, {a, 0, 0, 0}, {a+1, Nv, No, No}
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
@ -135,13 +118,14 @@ namespace atrip {
|
||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override
|
||||
{
|
||||
|
||||
const int
|
||||
No = this->sliceLength[0],
|
||||
a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it});
|
||||
const int No = this->sliceLength[0]
|
||||
, a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
|
||||
;
|
||||
|
||||
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||
to, {0, 0, 0}, {No, No, No},
|
||||
from, {0, 0, 0, a}, {No, No, No, a+1});
|
||||
sliceIntoVector<F>( this->sources[it]
|
||||
, to, {0, 0, 0}, {No, No, No}
|
||||
, from, {0, 0, 0, a}, {No, No, No, a+1}
|
||||
);
|
||||
|
||||
}
|
||||
};
|
||||
@ -169,17 +153,18 @@ namespace atrip {
|
||||
|
||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
|
||||
|
||||
const int
|
||||
Nv = this->sliceLength[0],
|
||||
No = this->sliceLength[1],
|
||||
el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it}),
|
||||
a = el % Nv,
|
||||
b = el / Nv;
|
||||
const int Nv = this->sliceLength[0]
|
||||
, No = this->sliceLength[1]
|
||||
, el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
|
||||
, a = el % Nv
|
||||
, b = el / Nv
|
||||
;
|
||||
|
||||
|
||||
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||
to, {0, 0}, {Nv, No},
|
||||
from, {a, b, 0, 0}, {a+1, b+1, Nv, No});
|
||||
sliceIntoVector<F>( this->sources[it]
|
||||
, to, {0, 0}, {Nv, No}
|
||||
, from, {a, b, 0, 0}, {a+1, b+1, Nv, No}
|
||||
);
|
||||
|
||||
}
|
||||
|
||||
@ -206,17 +191,17 @@ namespace atrip {
|
||||
|
||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
|
||||
|
||||
const int
|
||||
Nv = from.lens[0],
|
||||
No = this->sliceLength[1],
|
||||
el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it}),
|
||||
a = el % Nv,
|
||||
b = el / Nv;
|
||||
const int Nv = from.lens[0]
|
||||
, No = this->sliceLength[1]
|
||||
, el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
|
||||
, a = el % Nv
|
||||
, b = el / Nv
|
||||
;
|
||||
|
||||
|
||||
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||
to, {0, 0}, {No, No},
|
||||
from, {a, b, 0, 0}, {a+1, b+1, No, No});
|
||||
sliceIntoVector<F>( this->sources[it]
|
||||
, to, {0, 0}, {No, No}
|
||||
, from, {a, b, 0, 0}, {a+1, b+1, No, No}
|
||||
);
|
||||
|
||||
|
||||
}
|
||||
@ -246,16 +231,17 @@ namespace atrip {
|
||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
|
||||
// TODO: maybe generalize this with ABHH
|
||||
|
||||
const int
|
||||
Nv = from.lens[0],
|
||||
No = this->sliceLength[1],
|
||||
el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it}),
|
||||
a = el % Nv,
|
||||
b = el / Nv;
|
||||
const int Nv = from.lens[0]
|
||||
, No = this->sliceLength[1]
|
||||
, el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
|
||||
, a = el % Nv
|
||||
, b = el / Nv
|
||||
;
|
||||
|
||||
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||
to, {0, 0}, {No, No},
|
||||
from, {a, b, 0, 0}, {a+1, b+1, No, No});
|
||||
sliceIntoVector<F>( this->sources[it]
|
||||
, to, {0, 0}, {No, No}
|
||||
, from, {a, b, 0, 0}, {a+1, b+1, No, No}
|
||||
);
|
||||
|
||||
|
||||
}
|
||||
|
||||
@ -1,11 +1,3 @@
|
||||
#+quicklisp
|
||||
(eval-when (:compile-toplevel :load-toplevel :execute)
|
||||
(ql:quickload '(vgplot fiveam)))
|
||||
|
||||
(defpackage :naive-tuples
|
||||
(:use :cl :vgplot))
|
||||
(in-package :naive-tuples)
|
||||
|
||||
(defun tuples-atrip (nv)
|
||||
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||
(loop :for a :below nv
|
||||
@ -226,3 +218,58 @@
|
||||
cheaper
|
||||
(print (equal (nth i tuples)
|
||||
cheaper)))))
|
||||
|
||||
(let* ((l 101)
|
||||
(tuples (tuples-atrip l)))
|
||||
(loop :for a below l
|
||||
:do (print (let ((s (a-block-atrip a l))
|
||||
(c (count-if (lambda (x) (eq (car x) a))
|
||||
tuples)))
|
||||
(list :a a
|
||||
:size s
|
||||
:real c
|
||||
:? (eq c s))))))
|
||||
|
||||
(ql:quickload 'vgplot)
|
||||
(import 'vgplot:plot)
|
||||
(import 'vgplot:replot)
|
||||
|
||||
(let ((l 10))
|
||||
(plot (mapcar (lambda (x) (getf x :size))
|
||||
(loop :for a upto l
|
||||
collect (list :a a :size (a-block a l))))
|
||||
"penis"))
|
||||
|
||||
(let* ((l 50)
|
||||
(tuples (tuples-half l)))
|
||||
(loop :for a below l
|
||||
:do (print (let ((s (a-block a l))
|
||||
(c (count-if (lambda (x) (eq (car x) a))
|
||||
tuples)))
|
||||
(list :a a
|
||||
:size s
|
||||
:real c
|
||||
:? (eq c s))))))
|
||||
|
||||
(defun range (from to) (loop for i :from from :to to collect i))
|
||||
|
||||
(defun half-again (i nv)
|
||||
(let ((a-block-list (let ((ll (mapcar (lambda (i) (a-block i nv))
|
||||
(range 0 (- nv 1)))))
|
||||
(loop :for i :from 1 :to (length ll)
|
||||
:collect
|
||||
(reduce #'+
|
||||
ll
|
||||
:end i)))))
|
||||
(loop :for blk :in a-block-list
|
||||
:with a = 0
|
||||
:with total-blk = 0
|
||||
:if (eq 0 (floor i blk))
|
||||
:do
|
||||
(let ((i (mod i blk)))
|
||||
(print (list i (- i total-blk) blk a))
|
||||
(return))
|
||||
:else
|
||||
:do (progn
|
||||
(incf a)
|
||||
(setq total-blk blk)))))
|
||||
|
||||
@ -646,9 +646,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
||||
|
||||
// COMPUTE SINGLES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
|
||||
OCD_Barrier(universe);
|
||||
#if defined(ATRIP_ONLY_DGEMM)
|
||||
if (false)
|
||||
#endif
|
||||
if (!isFakeTuple(i)) {
|
||||
WITH_CHRONO("oneshot-unwrap",
|
||||
WITH_CHRONO("unwrap",
|
||||
@ -681,9 +678,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
||||
|
||||
|
||||
// COMPUTE ENERGY %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
|
||||
#if defined(ATRIP_ONLY_DGEMM)
|
||||
if (false)
|
||||
#endif
|
||||
if (!isFakeTuple(i)) {
|
||||
double tupleEnergy(0.);
|
||||
|
||||
|
||||
@ -4,10 +4,8 @@
|
||||
|
||||
namespace atrip {
|
||||
|
||||
#if defined(ATRIP_NAIVE_SLOW)
|
||||
/*
|
||||
* This function is really too slow, below are more performant
|
||||
* functions to get tuples.
|
||||
/* This function is really too slow, below are more performant
|
||||
functions to get tuples.
|
||||
*/
|
||||
static
|
||||
ABCTuples get_nth_naive_tuples(size_t Nv, size_t np, int64_t i) {
|
||||
@ -54,26 +52,33 @@ namespace atrip {
|
||||
return result;
|
||||
|
||||
}
|
||||
#endif
|
||||
|
||||
static
|
||||
inline
|
||||
size_t a_block_atrip(size_t a, size_t nv) {
|
||||
return (nv - 1) * (nv - (a - 1))
|
||||
- ((nv - 1) * nv) / 2
|
||||
+ ((a - 1) * (a - 2)) / 2
|
||||
- 1;
|
||||
}
|
||||
|
||||
static
|
||||
inline
|
||||
size_t a_block_sum_atrip(int64_t T, int64_t nv) {
|
||||
const int64_t nv_min_1 = nv - 1, t_plus_1 = T + 1;
|
||||
return t_plus_1 * nv_min_1 * nv
|
||||
+ nv_min_1 * t_plus_1
|
||||
- (nv_min_1 * (T * t_plus_1) / 2)
|
||||
- (t_plus_1 * (nv_min_1 * nv) / 2)
|
||||
// do not simplify this expression, only the addition of both parts
|
||||
// is a pair integer, prepare to endure the consequences of
|
||||
// simplifying otherwise
|
||||
+ (((T * t_plus_1 * (1 + 2 * T)) / 6) - 3 * ((T * t_plus_1) / 2)) / 2
|
||||
int64_t nv1 = nv - 1, tplus1 = T + 1;
|
||||
return tplus1 * nv1 * nv
|
||||
+ nv1 * tplus1
|
||||
- (nv1 * (T * (T + 1)) / 2)
|
||||
- (tplus1 * (nv1 * nv) / 2)
|
||||
+ (((T * (T + 1) * (1 + 2 * T)) / 6) - 3 * ((T * (T + 1)) / 2)) / 2
|
||||
;
|
||||
// + tplus1;
|
||||
}
|
||||
|
||||
static
|
||||
inline
|
||||
int64_t b_block_sum_atrip (int64_t a, int64_t T, int64_t nv) {
|
||||
|
||||
return nv * ((T - a) + 1)
|
||||
- (T * (T + 1) - a * (a - 1)) / 2
|
||||
- 1;
|
||||
@ -89,6 +94,9 @@ namespace atrip {
|
||||
a_sums.resize(nv);
|
||||
for (size_t _i = 0; _i < nv; _i++) {
|
||||
a_sums[_i] = a_block_sum_atrip(_i, nv);
|
||||
/*
|
||||
std::cout << Atrip::rank << ": " << _i << " " << a_sums[_i] << std::endl;
|
||||
*/
|
||||
}
|
||||
}
|
||||
|
||||
@ -106,6 +114,10 @@ namespace atrip {
|
||||
std::vector<int64_t> b_sums(nv - a);
|
||||
for (size_t t = a, i=0; t < nv; t++) {
|
||||
b_sums[i++] = b_block_sum_atrip(a, t, nv);
|
||||
/*
|
||||
std::cout << Atrip::rank << ": b-sum " << i-1 << " "
|
||||
<< ":a " << a << " :t " << t << " = " << b_sums[i-1] << std::endl;
|
||||
*/
|
||||
}
|
||||
int64_t b = a - 1, block_b = block_a;
|
||||
for (const auto& sum: b_sums) {
|
||||
@ -129,11 +141,6 @@ namespace atrip {
|
||||
inline
|
||||
ABCTuples nth_atrip_distributed(int64_t it, size_t nv, size_t np) {
|
||||
|
||||
// If we are getting the previous tuples in the first iteration,
|
||||
// then just return an impossible tuple, different from the FAKE_TUPLE,
|
||||
// because if FAKE_TUPLE is defined as {0,0,0} slices thereof
|
||||
// are actually attainable.
|
||||
//
|
||||
if (it < 0) {
|
||||
ABCTuples result(np, {nv, nv, nv});
|
||||
return result;
|
||||
@ -153,6 +160,9 @@ namespace atrip {
|
||||
for (size_t rank = 0; rank < np; rank++) {
|
||||
const size_t
|
||||
global_iteration = tuples_per_rank * rank + it;
|
||||
/*
|
||||
std::cout << Atrip::rank << ":" << "global_bit " << global_iteration << "\n";
|
||||
*/
|
||||
result[rank] = nth_atrip(global_iteration, nv);
|
||||
}
|
||||
|
||||
@ -238,25 +248,38 @@ namespace atrip {
|
||||
using Database = typename Slice<F>::Database;
|
||||
Database db;
|
||||
|
||||
#ifdef ATRIP_NAIVE_SLOW
|
||||
#ifdef NAIVE_SLOW
|
||||
WITH_CHRONO("db:comm:naive:tuples",
|
||||
const auto tuples = get_nth_naive_tuples(nv,
|
||||
np,
|
||||
iteration);
|
||||
const auto prev_tuples = get_nth_naive_tuples(nv,
|
||||
np,
|
||||
iteration - 1);
|
||||
(int64_t)iteration - 1);
|
||||
)
|
||||
#else
|
||||
WITH_CHRONO("db:comm:naive:tuples",
|
||||
const auto tuples = nth_atrip_distributed(iteration,
|
||||
const auto tuples = nth_atrip_distributed((int64_t)iteration,
|
||||
nv,
|
||||
np);
|
||||
const auto prev_tuples = nth_atrip_distributed(iteration - 1,
|
||||
const auto prev_tuples = nth_atrip_distributed((int64_t)iteration - 1,
|
||||
nv,
|
||||
np);
|
||||
)
|
||||
|
||||
if (false)
|
||||
for (size_t rank = 0; rank < np; rank++) {
|
||||
std::cout << Atrip::rank << ":"
|
||||
<< " :tuples< " << rank << ">" << iteration
|
||||
<< " :abc " << tuples[rank][0]
|
||||
<< ", " << tuples[rank][1]
|
||||
<< ", " << tuples[rank][2] << "\n";
|
||||
std::cout << Atrip::rank << ":"
|
||||
<< " :prev-tuples< " << rank << ">" << iteration
|
||||
<< " :abc-prev " << prev_tuples[rank][0]
|
||||
<< ", " << prev_tuples[rank][1]
|
||||
<< ", " << prev_tuples[rank][2] << "\n";
|
||||
}
|
||||
#endif
|
||||
|
||||
for (size_t rank = 0; rank < np; rank++) {
|
||||
|
||||
@ -151,11 +151,12 @@ namespace cuda {
|
||||
KJI
|
||||
};
|
||||
|
||||
/*
|
||||
/*
|
||||
* Please the c++ type checker and template creator
|
||||
* in order to have an argument in the signature of
|
||||
* the function that helps the compiler know which
|
||||
* instantiation it should take.
|
||||
*
|
||||
*/
|
||||
template <typename F, reordering_t R>
|
||||
struct reorder_proxy {};
|
||||
@ -435,20 +436,22 @@ double getEnergySame
|
||||
, DataFieldType<F>* Tijk_
|
||||
) {
|
||||
|
||||
const size_t NoNo = No*No;
|
||||
const size_t a = abc[0], b = abc[1], c = abc[2]
|
||||
, NoNo = No*No
|
||||
;
|
||||
|
||||
DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_;
|
||||
|
||||
#if defined(ATRIP_USE_DGEMM)
|
||||
#if defined(HAVE_CUDA)
|
||||
#define REORDER(__II, __JJ, __KK) \
|
||||
reorder<<<bs, ths>>>(reorder_proxy< \
|
||||
DataFieldType<F>, \
|
||||
__II ## __JJ ## __KK \
|
||||
>{}, \
|
||||
No, \
|
||||
Tijk, \
|
||||
_t_buffer)
|
||||
#define REORDER(__II, __JJ, __KK) \
|
||||
reorder<<<bs, ths>>>(reorder_proxy< \
|
||||
DataFieldType<F>, \
|
||||
__II ## __JJ ## __KK \
|
||||
>{}, \
|
||||
No, \
|
||||
Tijk, \
|
||||
_t_buffer);
|
||||
#define DGEMM_PARTICLES(__A, __B) \
|
||||
atrip::xgemm<F>("T", \
|
||||
"N", \
|
||||
@ -478,18 +481,11 @@ double getEnergySame
|
||||
_t_buffer, \
|
||||
(int const*)&NoNo \
|
||||
)
|
||||
#define MAYBE_CONJ(_conj, _buffer) \
|
||||
do { \
|
||||
cuda::maybeConjugate<<< \
|
||||
\
|
||||
Atrip::kernelDimensions.ooo.blocks, \
|
||||
\
|
||||
Atrip::kernelDimensions.ooo.threads \
|
||||
\
|
||||
>>>((DataFieldType<F>*)_conj, \
|
||||
(DataFieldType<F>*)_buffer, \
|
||||
NoNoNo); \
|
||||
} while (0)
|
||||
#define MAYBE_CONJ(_conj, _buffer) \
|
||||
cuda::maybeConjugate<<< \
|
||||
Atrip::kernelDimensions.ooo.blocks, \
|
||||
Atrip::kernelDimensions.ooo.threads \
|
||||
>>>((DataFieldType<F>*)_conj, (DataFieldType<F>*)_buffer, NoNoNo);
|
||||
|
||||
|
||||
// END CUDA ////////////////////////////////////////////////////////////////////
|
||||
@ -504,9 +500,7 @@ double getEnergySame
|
||||
#define REORDER(__II, __JJ, __KK) \
|
||||
reorder(reorder_proxy<DataFieldType<F>, \
|
||||
__II ## __JJ ## __KK >{}, \
|
||||
No, \
|
||||
Tijk, \
|
||||
_t_buffer)
|
||||
No, Tijk, _t_buffer);
|
||||
#define DGEMM_PARTICLES(__A, __B) \
|
||||
atrip::xgemm<F>("T", \
|
||||
"N", \
|
||||
@ -537,13 +531,9 @@ double getEnergySame
|
||||
_t_buffer, \
|
||||
(int const*)&NoNo \
|
||||
)
|
||||
#define MAYBE_CONJ(_conj, _buffer) \
|
||||
do { \
|
||||
for (size_t __i = 0; __i < NoNoNo; ++__i) { \
|
||||
_conj[__i] \
|
||||
= maybeConjugate<F>(_buffer[__i]); \
|
||||
} \
|
||||
} while (0)
|
||||
#define MAYBE_CONJ(_conj, _buffer) \
|
||||
for (size_t __i = 0; __i < NoNoNo; ++__i) \
|
||||
_conj[__i] = maybeConjugate<F>(_buffer[__i]);
|
||||
#endif
|
||||
|
||||
F one{1.0}, m_one{-1.0}, zero{0.0};
|
||||
@ -562,12 +552,8 @@ double getEnergySame
|
||||
const size_t
|
||||
bs = Atrip::kernelDimensions.ooo.blocks,
|
||||
ths = Atrip::kernelDimensions.ooo.threads;
|
||||
|
||||
#if !defined(ATRIP_ONLY_DGEMM)
|
||||
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
|
||||
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
|
||||
#endif
|
||||
|
||||
#else
|
||||
DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
|
||||
DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
|
||||
@ -579,7 +565,7 @@ double getEnergySame
|
||||
#endif
|
||||
|
||||
// Set Tijk to zero
|
||||
#if defined(HAVE_CUDA) && !defined(ATRIP_ONLY_DGEMM)
|
||||
#ifdef HAVE_CUDA
|
||||
WITH_CHRONO("double:reorder",
|
||||
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk,
|
||||
NoNoNo);
|
||||
@ -591,51 +577,43 @@ double getEnergySame
|
||||
})
|
||||
#endif
|
||||
|
||||
|
||||
#if defined(ATRIP_ONLY_DGEMM)
|
||||
#undef MAYBE_CONJ
|
||||
#undef REORDER
|
||||
#define MAYBE_CONJ(a, b) do {} while(0)
|
||||
#define REORDER(i, j, k) do {} while(0)
|
||||
#endif
|
||||
|
||||
// HOLES
|
||||
WITH_CHRONO("doubles:holes",
|
||||
{
|
||||
// VhhhC[i + k*No + L*NoNo] * TABhh[L + j*No]; H1
|
||||
MAYBE_CONJ(_vhhh, VhhhC);
|
||||
MAYBE_CONJ(_vhhh, VhhhC)
|
||||
WITH_CHRONO("doubles:holes:1",
|
||||
DGEMM_HOLES(_vhhh, TABhh, "N");
|
||||
REORDER(I, K, J);
|
||||
REORDER(I, K, J)
|
||||
)
|
||||
// VhhhC[j + k*No + L*NoNo] * TABhh[i + L*No]; H0
|
||||
WITH_CHRONO("doubles:holes:2",
|
||||
DGEMM_HOLES(_vhhh, TABhh, "T");
|
||||
REORDER(J, K, I);
|
||||
REORDER(J, K, I)
|
||||
)
|
||||
|
||||
// VhhhB[i + j*No + L*NoNo] * TAChh[L + k*No]; H5
|
||||
MAYBE_CONJ(_vhhh, VhhhB);
|
||||
MAYBE_CONJ(_vhhh, VhhhB)
|
||||
WITH_CHRONO("doubles:holes:3",
|
||||
DGEMM_HOLES(_vhhh, TAChh, "N");
|
||||
REORDER(I, J, K);
|
||||
REORDER(I, J, K)
|
||||
)
|
||||
// VhhhB[k + j*No + L*NoNo] * TAChh[i + L*No]; H3
|
||||
WITH_CHRONO("doubles:holes:4",
|
||||
DGEMM_HOLES(_vhhh, TAChh, "T");
|
||||
REORDER(K, J, I);
|
||||
REORDER(K, J, I)
|
||||
)
|
||||
|
||||
// VhhhA[j + i*No + L*NoNo] * TBChh[L + k*No]; H1
|
||||
MAYBE_CONJ(_vhhh, VhhhA);
|
||||
MAYBE_CONJ(_vhhh, VhhhA)
|
||||
WITH_CHRONO("doubles:holes:5",
|
||||
DGEMM_HOLES(_vhhh, TBChh, "N");
|
||||
REORDER(J, I, K);
|
||||
REORDER(J, I, K)
|
||||
)
|
||||
// VhhhA[k + i*No + L*NoNo] * TBChh[j + L*No]; H4
|
||||
WITH_CHRONO("doubles:holes:6",
|
||||
DGEMM_HOLES(_vhhh, TBChh, "T");
|
||||
REORDER(K, I, J);
|
||||
REORDER(K, I, J)
|
||||
)
|
||||
}
|
||||
)
|
||||
@ -647,32 +625,32 @@ double getEnergySame
|
||||
// TAphh[E + i*Nv + j*NoNv] * VBCph[E + k*Nv]; P0
|
||||
WITH_CHRONO("doubles:particles:1",
|
||||
DGEMM_PARTICLES(TAphh, VBCph);
|
||||
REORDER(I, J, K);
|
||||
REORDER(I, J, K)
|
||||
)
|
||||
// TAphh[E + i*Nv + k*NoNv] * VCBph[E + j*Nv]; P3
|
||||
WITH_CHRONO("doubles:particles:2",
|
||||
DGEMM_PARTICLES(TAphh, VCBph);
|
||||
REORDER(I, K, J);
|
||||
REORDER(I, K, J)
|
||||
)
|
||||
// TCphh[E + k*Nv + i*NoNv] * VABph[E + j*Nv]; P5
|
||||
WITH_CHRONO("doubles:particles:3",
|
||||
DGEMM_PARTICLES(TCphh, VABph);
|
||||
REORDER(K, I, J);
|
||||
REORDER(K, I, J)
|
||||
)
|
||||
// TCphh[E + k*Nv + j*NoNv] * VBAph[E + i*Nv]; P2
|
||||
WITH_CHRONO("doubles:particles:4",
|
||||
DGEMM_PARTICLES(TCphh, VBAph);
|
||||
REORDER(K, J, I);
|
||||
REORDER(K, J, I)
|
||||
)
|
||||
// TBphh[E + j*Nv + i*NoNv] * VACph[E + k*Nv]; P1
|
||||
WITH_CHRONO("doubles:particles:5",
|
||||
DGEMM_PARTICLES(TBphh, VACph);
|
||||
REORDER(J, I, K);
|
||||
REORDER(J, I, K)
|
||||
)
|
||||
// TBphh[E + j*Nv + k*NoNv] * VCAph[E + i*Nv]; P4
|
||||
WITH_CHRONO("doubles:particles:6",
|
||||
DGEMM_PARTICLES(TBphh, VCAph);
|
||||
REORDER(J, K, I);
|
||||
REORDER(J, K, I)
|
||||
)
|
||||
}
|
||||
)
|
||||
|
||||
@ -1,132 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
# Copyright (C) 2022 by Alejandro Gallo <aamsgallo@gmail.com>
|
||||
|
||||
set -eu
|
||||
|
||||
flags=("${@}")
|
||||
PROJECTS=()
|
||||
|
||||
#
|
||||
## Check root directory
|
||||
#
|
||||
root_project=$(git rev-parse --show-toplevel)
|
||||
configure=$root_project/configure
|
||||
if [[ $(basename $PWD) == $(basename $root_project) ]]; then
|
||||
cat <<EOF
|
||||
|
||||
You are trying to build in the root directory, create a build folder
|
||||
and then configure.
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
$(readlink -f $0)
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
fi
|
||||
|
||||
[[ -f $configure ]] || {
|
||||
cat <<EOF
|
||||
No configure script at $configure create it with bootstrap.sh or
|
||||
|
||||
autoreconf -vif
|
||||
|
||||
EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
#
|
||||
## Create configuration function
|
||||
#
|
||||
|
||||
create_config () {
|
||||
file=$1
|
||||
name=$2
|
||||
PROJECTS=(${PROJECTS[@]} "$name")
|
||||
mkdir -p $name
|
||||
cd $name
|
||||
echo "> creating: $name"
|
||||
cat <<SH > configure
|
||||
#!/usr/bin/env bash
|
||||
# created by $0 on $(date)
|
||||
|
||||
$root_project/configure $(cat $file | paste -s) \\
|
||||
$(for word in "${flags[@]}"; do
|
||||
printf " \"%s\"" "$word";
|
||||
done)
|
||||
|
||||
|
||||
exit 0
|
||||
SH
|
||||
chmod +x configure
|
||||
cd - > /dev/null
|
||||
}
|
||||
|
||||
#
|
||||
## default configuration
|
||||
#
|
||||
|
||||
tmp=`mktemp`
|
||||
cat <<EOF > $tmp
|
||||
--disable-slice
|
||||
EOF
|
||||
|
||||
create_config $tmp default
|
||||
rm $tmp
|
||||
|
||||
#
|
||||
## only-dgemm configuration
|
||||
#
|
||||
|
||||
tmp=`mktemp`
|
||||
cat <<EOF > $tmp
|
||||
--disable-slice
|
||||
--enable-only-dgemm
|
||||
EOF
|
||||
|
||||
create_config $tmp only-dgemm
|
||||
rm $tmp
|
||||
|
||||
#
|
||||
## Create makefile
|
||||
#
|
||||
|
||||
cat <<MAKE > Makefile
|
||||
|
||||
all: configure do
|
||||
do: configure
|
||||
|
||||
configure: ${PROJECTS[@]/%/\/Makefile}
|
||||
|
||||
%/Makefile: %/configure
|
||||
cd \$* && ./configure
|
||||
|
||||
do: ${PROJECTS[@]/%/\/src\/libatrip.a}
|
||||
|
||||
|
||||
%/src/libatrip.a:
|
||||
cd \$* && \$(MAKE)
|
||||
|
||||
|
||||
.PHONY: configure do all
|
||||
MAKE
|
||||
|
||||
cat <<EOF
|
||||
|
||||
Now you can do
|
||||
|
||||
make all
|
||||
|
||||
or go into one of the directories
|
||||
${PROJECTS[@]}
|
||||
and do
|
||||
./configure
|
||||
make
|
||||
|
||||
EOF
|
||||
|
||||
## Emacs stuff
|
||||
# Local Variables:
|
||||
# eval: (outline-minor-mode)
|
||||
# outline-regexp: "## "
|
||||
# End:
|
||||
Loading…
Reference in New Issue
Block a user