Compare commits

..

No commits in common. "0fa24404e5b8df4e8bf3adc4d2a32600fb2f93d1" and "b11b53aca1ff60d9e5eff5bfb1a16d00791ff496" have entirely different histories.

26 changed files with 606 additions and 2087 deletions

View File

@ -1,22 +0,0 @@
;;; Directory Local Variables
;;; For more information see (info "(emacs) Directory Variables")
((c++-mode . ((outline-regexp . "// \\[\\[file:")
(eval . (let
((root
(expand-file-name
(project-root
(project-current)))))
(setq-local flycheck-gcc-include-path
(list
(format "%s/vendor/include/" root)
(format "%s/include/" root)
(format "%s/" root)
(format "%s/bench/" root)
(format "%s/build/main/" root)))
(setq-local flycheck-clang-include-path
flycheck-gcc-include-path)))
(eval . (flycheck-mode))
(eval . (outline-minor-mode))
(indent-tabs-mode . nil)
(tab-width . 2))))

View File

@ -2,6 +2,8 @@
name: CI
on:
push:
branches: [ master, cuda ]
pull_request:
branches: [ master, cuda ]

3
.gitignore vendored
View File

@ -25,6 +25,3 @@ config.mk
/atrip.html
/TAGS
/config.h.in
/result
/result-dev
/vendor/

View File

@ -26,86 +26,3 @@ before the proper paper is released please contact me.
In the mean time the code has been used in
[[https://aip.scitation.org/doi/10.1063/5.0074936][this publication]] and can therefore been cited.
* Building
Atrip uses autotools to build the system.
Autotools works by first creating a =configure= script from
a =configure.ac= file.
Atrip should be built out of source, this means that
you have to create a build directory other that the root
directory, for instance in the =build/tutorial= directory
#+begin_src sh :exports code
mkdir -p build/tutorial/
cd build/tutorial
#+end_src
First you have to build the =configure= script by doing
#+begin_src sh :dir build/tutorial :exports code :results raw drawer
../../bootstrap.sh
#+end_src
#+RESULTS:
:results:
Creating configure script
Now you can build by doing
mkdir build
cd build
../configure
make extern
make all
:end:
And then you can see the =configure= options
#+begin_src sh :dir build/tutorial :results raw drawer :eval no
../../configure --help
#+end_src
** Benches
The script =tools/configure-benches.sh= can be used to create
a couple of configurations for benches:
#+begin_src sh :exports results :results verbatim org :results verbatim drawer replace output
awk '/begin +doc/,/end +doc/ { print $NL }' tools/configure-benches.sh |
grep -v -e "begin \+doc" -e "end \+doc" |
sed "s/^# //; s/^# *$//; /^$/d"
#+end_src
#+RESULTS:
:results:
- default ::
This configuration uses a CPU code with dgemm
and without computing slices.
- only-dgemm ::
This only runs the computation part that involves dgemms.
- slices-on-gpu-only-dgemm ::
This configuration tests that slices reside completely on the gpu
and it should use a CUDA aware MPI implementation.
It also only uses the routines that involve dgemm.
:end:
In order to generate the benches just create a suitable directory for it
#+begin_src sh :eval no
mkdir -p build/benches
cd buid/benches
../../tools/configure-benches.sh CXX=g++ ...
#+end_src
and you will get a Makefile together with several project folders.
You can either configure all projects with =make all= or
then go in each folder.
Notice that you can give a path for ctf for all of them by doing
#+begin_src sh :eval no
../../tools/configure-benches.sh --with-ctf=/absolute/path/to/ctf
#+end_src

View File

@ -1,42 +1,25 @@
AUTOMAKE_OPTIONS = subdir-objects
include $(top_srcdir)/atrip.mk
AM_CPPFLAGS = -I$(top_srcdir)/include/ -I$(top_srcdir) $(CTF_CPPFLAGS)
AM_CPPFLAGS = -I$(top_srcdir)/include/ $(CTF_CPPFLAGS)
AM_LDFLAGS = @LAPACK_LIBS@ @BLAS_LIBS@
bin_PROGRAMS = test_main
test_main_SOURCES = test_main.cxx
test_main_LDADD = \
$(top_builddir)/src/libatrip.a
if WITH_BUILD_CTF
ATRIP_CTF = $(CTF_BUILD_PATH)/lib/libctf.a
test_main_LDADD += $(CTF_BUILD_PATH)/lib/libctf.a
else
ATRIP_CTF = @LIBCTF_LD_LIBRARY_PATH@/libctf.a
test_main_LDADD += @LIBCTF_LD_LIBRARY_PATH@/libctf.a
endif
ATRIP_LIB = $(top_builddir)/src/libatrip.a $(ATRIP_CTF)
bin_PROGRAMS =
BENCHES_LDADD = $(ATRIP_LIB) $(ATRIP_CTF)
##
## main entry point and bench
##
bin_PROGRAMS += atrip
atrip_SOURCES = main.cxx
atrip_CPPFLAGS = $(AM_CPPFLAGS)
atrip_LDADD = $(BENCHES_LDADD)
if !WITH_CUDA
##
## tuples distribution
##
bin_PROGRAMS += tuples-distribution
tuples_distribution_LDADD = $(BENCHES_LDADD)
tuples_distribution_SOURCES = tuples-distribution.cxx
endif
if WITH_CUDA
AM_CPPFLAGS += $(CUDA_CXXFLAGS)
BENCHES_LDADD += $(CUDA_LDFLAGS)
test_main_CXXFLAGS = $(CUDA_CXXFLAGS)
test_main_LDADD += $(CUDA_LDFLAGS)
AM_CXXFLAGS = $(CUDA_CXXFLAGS)
AM_LDFLAGS += $(CUDA_LDFLAGS)

View File

@ -1,443 +0,0 @@
#include <iostream>
#define ATRIP_DEBUG 2
#include <atrip/Atrip.hpp>
#include <atrip/Tuples.hpp>
#include <atrip/Unions.hpp>
#include <bench/CLI11.hpp>
#include <bench/utils.hpp>
using namespace atrip;
using F = double;
using Tr = CTF::Tensor<F>;
#define INIT_DRY(name, ...) \
do { \
std::vector<int64_t> lens = __VA_ARGS__; \
int i = -1; \
name.order = lens.size(); \
name.lens = (int64_t*)malloc(sizeof(int64_t) * lens.size()); \
name.sym = (int*)malloc(sizeof(int) * lens.size()); \
name.lens[++i] = lens[i]; name.lens[++i] = lens[i]; \
name.lens[++i] = lens[i]; name.lens[++i] = lens[i]; \
i = 0; \
name.sym[i++] = NS; name.sym[i++] = NS; \
name.sym[i++] = NS; name.sym[i++] = NS; \
} while (0)
#define DEINIT_DRY(name) \
do { \
name.order = 0; \
name.lens = NULL; \
name.sym = NULL; \
} while (0)
using LocalDatabase = typename Slice<F>::LocalDatabase;
using LocalDatabaseElement = typename Slice<F>::LocalDatabaseElement;
LocalDatabase buildLocalDatabase(SliceUnion<F> &u,
ABCTuple const& abc) {
LocalDatabase result;
auto const needed = u.neededSlices(abc);
// BUILD THE DATABASE
// we need to loop over all sliceTypes that this TensorUnion
// is representing and find out how we will get the corresponding
// slice for the abc we are considering right now.
for (auto const& pair: needed) {
auto const type = pair.first;
auto const tuple = pair.second;
auto const from = u.rankMap.find(abc, type);
{
// FIRST: look up if there is already a *Ready* slice matching what we
// need
auto const& it
= std::find_if(u.slices.begin(), u.slices.end(),
[&tuple, &type](Slice<F> const& other) {
return other.info.tuple == tuple
&& other.info.type == type
// we only want another slice when it
// has already ready-to-use data
&& other.isUnwrappable()
;
});
if (it != u.slices.end()) {
// if we find this slice, it means that we don't have to do anything
result.push_back({u.name, it->info});
continue;
}
}
//
// Try to find a recyling possibility ie. find a slice with the same
// tuple and that has a valid data pointer.
//
auto const& recycleIt
= std::find_if(u.slices.begin(), u.slices.end(),
[&tuple, &type](Slice<F> const& other) {
return other.info.tuple == tuple
&& other.info.type != type
&& other.isRecyclable()
;
});
//
// if we find this recylce, then we find a Blank slice
// (which should exist by construction :THINK)
//
if (recycleIt != u.slices.end()) {
auto& blank = Slice<F>::findOneByType(u.slices, Slice<F>::Blank);
// TODO: formalize this through a method to copy information
// from another slice
blank.data = recycleIt->data;
blank.info.type = type;
blank.info.tuple = tuple;
blank.info.state = Slice<F>::Recycled;
blank.info.from = from;
blank.info.recycling = recycleIt->info.type;
result.push_back({u.name, blank.info});
WITH_RANK << "__db__: RECYCLING: n" << u.name
<< " " << pretty_print(abc)
<< " get " << pretty_print(blank.info)
<< " from " << pretty_print(recycleIt->info)
<< " ptr " << recycleIt->data
<< "\n"
;
continue;
}
// in this case we have to create a new slice
// this means that we should have a blank slice at our disposal
// and also the freePointers should have some elements inside,
// so we pop a data pointer from the freePointers container
{
auto& blank = Slice<F>::findOneByType(u.slices, Slice<F>::Blank);
blank.info.type = type;
blank.info.tuple = tuple;
blank.info.from = from;
// Handle self sufficiency
blank.info.state = Atrip::rank == from.rank
? Slice<F>::SelfSufficient
: Slice<F>::Fetch
;
if (blank.info.state == Slice<F>::SelfSufficient) {
blank.data = (F*)0xBADA55;
} else {
blank.data = (F*)0xA55A55;
}
result.push_back({u.name, blank.info});
continue;
}
}
return result;
}
void clearUnusedSlicesForNext(SliceUnion<F> &u,
ABCTuple const& abc) {
auto const needed = u.neededSlices(abc);
// CLEAN UP SLICES, FREE THE ONES THAT ARE NOT NEEDED ANYMORE
for (auto& slice: u.slices) {
// if the slice is free, then it was not used anyways
if (slice.isFree()) continue;
// try to find the slice in the needed slices list
auto const found
= std::find_if(needed.begin(), needed.end(),
[&slice] (typename Slice<F>::Ty_x_Tu const& tytu) {
return slice.info.tuple == tytu.second
&& slice.info.type == tytu.first
;
});
// if we did not find slice in needed, then erase it
if (found == needed.end()) {
// allow to gc unwrapped and recycled, never Fetch,
// if we have a Fetch slice then something has gone very wrong.
if (!slice.isUnwrapped() && slice.info.state != Slice<F>::Recycled)
throw
std::domain_error(_FORMAT("Trying to garbage collect (%d, %d) "
" a non-unwrapped slice! ",
slice.info.type,
slice.info.state));
// it can be that our slice is ready, but it has some hanging
// references lying around in the form of a recycled slice.
// Of course if we need the recycled slice the next iteration
// this would be fatal, because we would then free the pointer
// of the slice and at some point in the future we would
// overwrite it. Therefore, we must check if slice has some
// references in slices and if so then
//
// - we should mark those references as the original (since the data
// pointer should be the same)
//
// - we should make sure that the data pointer of slice
// does not get freed.
//
if (slice.info.state == Slice<F>::Ready) {
WITH_OCD WITH_RANK
<< "__gc__:" << "checking for data recycled dependencies\n";
auto recycled
= Slice<F>::hasRecycledReferencingToIt(u.slices, slice.info);
if (recycled.size()) {
Slice<F>* newReady = recycled[0];
WITH_OCD WITH_RANK
<< "__gc__:" << "swaping recycled "
<< pretty_print(newReady->info)
<< " and "
<< pretty_print(slice.info)
<< "\n";
newReady->markReady();
for (size_t i = 1; i < recycled.size(); i++) {
auto newRecyled = recycled[i];
newRecyled->info.recycling = newReady->info.type;
WITH_OCD WITH_RANK
<< "__gc__:" << "updating recycled "
<< pretty_print(newRecyled->info)
<< "\n";
}
}
}
slice.free();
} // we did not find the slice
}
}
void unwrapSlice(Slice<F>::Type t, ABCTuple abc, SliceUnion<F> *u) {
auto& slice = Slice<F>::findByTypeAbc(u->slices, t, abc);
switch (slice.info.state) {
case Slice<F>::Dispatched:
slice.markReady();
break;
case Slice<F>::Recycled:
unwrapSlice(t, abc, u);
break;
}
}
#define PRINT_VARIABLE(v) \
do { \
if (!rank) std::cout << "# " << #v << ": " << v << std::endl; \
} while (0)
int main(int argc, char** argv) {
MPI_Init(&argc, &argv);
int no(10), nv(100);
std::string tuplesDistributionString = "naive";
CLI::App app{"Main bench for atrip"};
app.add_option("--no", no, "Occupied orbitals");
app.add_option("--nv", nv, "Virtual orbitals");
app.add_option("--dist", tuplesDistributionString, "Which distribution");
CLI11_PARSE(app, argc, argv);
CTF::World world(argc, argv);
auto kaun = world.comm;
int rank, np;
MPI_Comm_rank(kaun, &rank);
MPI_Comm_size(kaun, &np);
Atrip::init(world.comm);
atrip::ABCTuples tuplesList;
atrip::TuplesDistribution *dist;
{
using namespace atrip;
if (tuplesDistributionString == "naive") {
dist = new NaiveDistribution();
tuplesList = dist->getTuples(nv, world.comm);
} else if (tuplesDistributionString == "group") {
dist = new group_and_sort::Distribution();
tuplesList = dist->getTuples(nv, world.comm);
} else {
std::cout << "--dist should be either naive or group\n";
exit(1);
}
}
double tuplesListGb
= tuplesList.size() * sizeof(tuplesList[0])
/ 1024.0 / 1024.0 / 1024.0;
std::cout << "\n";
PRINT_VARIABLE(tuplesDistributionString);
PRINT_VARIABLE(np);
PRINT_VARIABLE(no);
PRINT_VARIABLE(nv);
PRINT_VARIABLE(tuplesList.size());
PRINT_VARIABLE(tuplesListGb);
// create a fake dry tensor
Tr t_abph, t_abhh, t_tabhh, t_taphh, t_hhha;
INIT_DRY(t_abph , {nv, nv, nv, no});
INIT_DRY(t_abhh , {nv, nv, no, no});
INIT_DRY(t_tabhh , {nv, nv, no, no});
INIT_DRY(t_taphh , {nv, nv, no, no});
INIT_DRY(t_hhha , {no, no, no, nv});
ABPH<F> abph(t_abph, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
ABHH<F> abhh(t_abhh, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
TABHH<F> tabhh(t_tabhh, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
TAPHH<F> taphh(t_taphh, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
HHHA<F> hhha(t_hhha, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh};
using Database = typename Slice<F>::Database;
auto communicateDatabase
= [ &unions
, np
] (ABCTuple const& abc, MPI_Comm const& c) -> Database {
WITH_CHRONO("db:comm:type:do",
auto MPI_LDB_ELEMENT = Slice<F>::mpi::localDatabaseElement();
)
WITH_CHRONO("db:comm:ldb",
typename Slice<F>::LocalDatabase ldb;
for (auto const& tensor: unions) {
auto const& tensorDb = buildLocalDatabase(*tensor, abc);
ldb.insert(ldb.end(), tensorDb.begin(), tensorDb.end());
}
)
Database db(np * ldb.size(), ldb[0]);
WITH_CHRONO("oneshot-db:comm:allgather",
WITH_CHRONO("db:comm:allgather",
MPI_Allgather(ldb.data(),
/* ldb.size() * sizeof(typename
Slice<F>::LocalDatabaseElement) */
ldb.size(),
MPI_LDB_ELEMENT,
db.data(),
/* ldb.size() * sizeof(typename
Slice<F>::LocalDatabaseElement), */
ldb.size(),
MPI_LDB_ELEMENT,
c);
))
WITH_CHRONO("db:comm:type:free", MPI_Type_free(&MPI_LDB_ELEMENT);)
return db;
};
auto doIOPhase
= [&unions, &rank, &np] (Database const& db,
std::vector<LocalDatabaseElement> &to_send) {
const size_t localDBLength = db.size() / np;
size_t sendTag = 0
, recvTag = rank * localDBLength
;
{
// At this point, we have already send to everyone that fits
auto const& begin = &db[rank * localDBLength]
, end = begin + localDBLength
;
for (auto it = begin; it != end; ++it) {
recvTag++;
auto const& el = *it;
auto& u = unionByName(unions, el.name);
auto& slice = Slice<F>::findByInfo(u.slices, el.info);
slice.markReady();
// u.receive(el.info, recvTag);
} // recv
}
// SEND PHASE =========================================================
for (size_t otherRank = 0; otherRank < np; otherRank++) {
auto const& begin = &db[otherRank * localDBLength]
, end = begin + localDBLength
;
for (auto it = begin; it != end; ++it) {
sendTag++;
typename Slice<F>::LocalDatabaseElement const& el = *it;
if (el.info.from.rank != rank) continue;
auto& u = unionByName(unions, el.name);
if (el.info.state == Slice<F>::Fetch) {
to_send.push_back(el);
}
// u.send(otherRank, el, sendTag);
} // send phase
} // otherRank
};
std::vector<LocalDatabaseElement>
to_send;
for (size_t it = 0; it < tuplesList.size(); it++) {
const ABCTuple abc = dist->tupleIsFake(tuplesList[it])
? tuplesList[tuplesList.size() - 1]
: tuplesList[it]
;
if (it > 0) {
for (auto const& u: unions) {
clearUnusedSlicesForNext(*u, abc);
}
}
const auto db = communicateDatabase(abc, kaun);
doIOPhase(db, to_send);
if (it % 1000 == 0)
std::cout << _FORMAT("%ld :it %ld %f %% ∷ %ld ∷ %f GB\n",
rank,
it,
100.0 * double(to_send.size()) / double(tuplesList.size()),
to_send.size(),
double(to_send.size()) * sizeof(to_send[0])
/ 1024.0 / 1024.0 / 1024.0);
for (auto const& u: unions) {
for (auto type: u->sliceTypes) {
unwrapSlice(type, abc, u);
}
}
}
std::cout << "=========================================================\n";
std::cout << "FINISHING, it will segfaulten, that's ok, don't even trip"
<< std::endl;
MPI_Barrier(kaun);
DEINIT_DRY(t_abph);
DEINIT_DRY(t_abhh);
DEINIT_DRY(t_tabhh);
DEINIT_DRY(t_taphh);
DEINIT_DRY(t_hhha);
MPI_Finalize();
return 0;
}

View File

@ -1,12 +0,0 @@
#ifndef UTILS_HPP_
#define UTILS_HPP_
#define _FORMAT(_fmt, ...) \
([&] (void) -> std::string { \
int _sz = std::snprintf(nullptr, 0, _fmt, __VA_ARGS__); \
std::vector<char> _out(_sz + 1); \
std::snprintf(&_out[0], _out.size(), _fmt, __VA_ARGS__); \
return std::string(_out.data()); \
})()
#endif

View File

@ -21,6 +21,26 @@ AC_ARG_ENABLE(shared,
files (default=YES)]),
[], [enable_shared=yes])
AC_ARG_ENABLE(
[slice],
[AS_HELP_STRING(
[--disable-slice],
[Disable the step of slicing tensors for CTF, this is useful for example for benchmarking or testing.])],
[atrip_dont_slice=1
AC_DEFINE([ATRIP_DONT_SLICE],1,[Wether CTF will slice tensors or skip the step])
],
[atrip_dont_slice=0]
)
AC_ARG_ENABLE(
[atrip_dgemm],
[AS_HELP_STRING(
[--disable-dgemm],
[Disable using dgemm for the doubles equations])],
[],
[AC_DEFINE([ATRIP_USE_DGEMM],1,[Use dgemm for the doubles equations])]
)
AC_ARG_ENABLE([docs],
[AS_HELP_STRING([--enable-docs],
@ -54,53 +74,13 @@ AC_ARG_VAR([NVCC], [Path to the nvidia cuda compiler.])
AC_ARG_VAR([CUDA_LDFLAGS], [LDFLAGS to find libraries -lcuda, -lcudart, -lcublas.])
AC_ARG_VAR([CUDA_CXXFLAGS], [CXXFLAGS to find the CUDA headers])
dnl -----------------------------------------------------------------------
dnl ATRIP CPP DEFINES
dnl -----------------------------------------------------------------------
AC_ARG_WITH([atrip-debug],
[AS_HELP_STRING([--with-atrip-debug],
[Debug level for atrip, possible values:
1, 2, 3, 4])],
[Debug level for atrip, possible values: 1, 2, 3, 4])],
[AC_DEFINE([ATRIP_DEBUG],[atrip-debug],[Atrip debug level])],
[AC_DEFINE([ATRIP_DEBUG],[1],[Atrip debug level])])
AC_ARG_ENABLE([atrip_dgemm],
[AS_HELP_STRING([--disable-dgemm],
[Disable using dgemm for the doubles equations])],
[],
[AC_DEFINE([ATRIP_USE_DGEMM],
1,
[Use dgemm for the doubles equations])])
ATRIP_DEF([slice], [disable],
[ATRIP_DONT_SLICE],
[Disable the step of slicing tensors for CTF, this is useful
for example for benchmarking or testing.])
ATRIP_DEF([only-dgemm], [enable],
[ATRIP_ONLY_DGEMM],
[Run only the parts of atrip that involve dgemm calls, this
is useful for benchmarking and testing the code, it is
intended for developers of Atrip.])
ATRIP_DEF([naive-slow], [enable],
[ATRIP_NAIVE_SLOW],
[Run slow but correct code for the mapping of (iteration,
rank) to tuple of the naive tuple distribution.])
ATRIP_DEF([sources-in-gpu], [enable],
[ATRIP_SOURCES_IN_GPU],
[When using CUDA, activate storing all sources (slices of
the input tensors) in the GPU. This means that a lot of GPUs
will be needed.])
ATRIP_DEF([cuda-aware-mpi], [enable],
[ATRIP_CUDA_AWARE_MPI],
[When using MPI, assume support for CUDA aware mpi by the
given MPI implementation.])
[AC_DEFINE([ATRIP_DEBUG],[1],[Atrip debug level])]
)
dnl -----------------------------------------------------------------------
@ -164,7 +144,8 @@ AC_TYPE_SIZE_T
dnl -----------------------------------------------------------------------
dnl CHECK CTF
if test xYES = x${BUILD_CTF}; then
AC_MSG_WARN([You will have to do make ctf before building the project.])
AC_MSG_WARN([Sorry, building CTF not supported yet provide a build path
with --with-ctf=path/to/ctf/installation])
else
CPPFLAGS="$CPPFLAGS -I${LIBCTF_CPATH}"
LDFLAGS="$LDFLAGS -L${LIBCTF_LD_LIBRARY_PATH} -lctf"

View File

@ -1,8 +0,0 @@
AC_DEFUN([ATRIP_DEF],
[AC_ARG_ENABLE([$1],
[AS_HELP_STRING([--$2-$1],
[$4])],
[AC_DEFINE([$3],
1,
[$4])])])

View File

@ -16,7 +16,7 @@ $(CTF_SRC_PATH)/configure:
# Here make sure that ctf does not builld with CUDA support
# since it is broken anyways
#
#
# Also we patch the file kernel.h because it mostl
# doesn't work when we try to include ctf in a CUDACC
# compiler code.

View File

@ -20,8 +20,6 @@ in
{
pkg = myopenblas;
buildInputs = with pkgs; [
myopenblas
scalapack

View File

@ -1,27 +0,0 @@
rec {
directory = "vendor";
src = ''
_add_vendor_cpath () {
export CPATH=$CPATH:$1
mkdir -p ${directory}/include
ln -frs $1/* ${directory}/include/
}
_add_vendor_lib () {
mkdir -p ${directory}/lib
ln -frs $1/* ${directory}/lib/
}
'';
cpath = path: ''
_add_vendor_cpath ${path}
'';
lib = path: ''
_add_vendor_lib ${path}
'';
}

View File

@ -1,20 +0,0 @@
#pragma once
#include <atrip/Utils.hpp>
#include <atrip/Equations.hpp>
#include <atrip/SliceUnion.hpp>
#include <atrip/Unions.hpp>
namespace atrip {
template <typename F>
using Unions = std::vector<SliceUnion<F>*>;
template <typename F>
typename Slice<F>::Database
naiveDatabase(Unions<F> &unions,
size_t nv,
size_t np,
size_t iteration,
MPI_Comm const& c);
} // namespace atrip

View File

@ -76,7 +76,7 @@
// [[file:~/cuda/atrip/atrip.org::*Macros][Macros:2]]
#ifndef LOG
#define LOG(level, name) if (atrip::Atrip::rank == 0) std::cout << name << ": "
#define LOG(level, name) if (Atrip::rank == 0) std::cout << name << ": "
#endif
// Macros:2 ends here

View File

@ -18,12 +18,6 @@
#include <atrip/Slice.hpp>
#include <atrip/RankMap.hpp>
#if defined(ATRIP_SOURCES_IN_GPU)
# define SOURCES_DATA(s) (s)
#else
# define SOURCES_DATA(s) (s).data()
#endif
namespace atrip {
// Prolog:1 ends here
@ -201,7 +195,7 @@ template <typename F=double>
;
if (blank.info.state == Slice<F>::SelfSufficient) {
#if defined(HAVE_CUDA)
const size_t _size = sizeof(F) * sliceSize;
const size_t _size = sizeof(F) * sources[from.source].size();
// TODO: this is code duplication with downstairs
if (freePointers.size() == 0) {
std::stringstream stream;
@ -218,12 +212,12 @@ template <typename F=double>
WITH_CHRONO("cuda:memcpy:self-sufficient",
_CHECK_CUDA_SUCCESS("copying mpi data to device",
cuMemcpyHtoD(blank.data,
(void*)SOURCES_DATA(sources[from.source]),
sizeof(F) * sliceSize));
(void*)sources[from.source].data(),
sizeof(F) * sources[from.source].size()));
))
#else
blank.data = SOURCES_DATA(sources[from.source]);
blank.data = sources[from.source].data();
#endif
} else {
if (freePointers.size() == 0) {
@ -402,18 +396,15 @@ template <typename F=double>
, world(child_world)
, universe(global_world)
, sliceLength(sliceLength_)
, sliceSize(std::accumulate(sliceLength.begin(),
sliceLength.end(),
1UL, std::multiplies<size_t>()))
#if defined(ATRIP_SOURCES_IN_GPU)
, sources(rankMap.nSources())
#else
, sources(rankMap.nSources(),
std::vector<F>(sliceSize))
#endif
std::vector<F>
(std::accumulate(sliceLength.begin(),
sliceLength.end(),
1UL, std::multiplies<size_t>())))
, name(name_)
, sliceTypes(sliceTypes_)
, sliceBuffers(nSliceBuffers)
//, slices(2 * sliceTypes.size(), Slice<F>{ sources[0].size() })
{ // constructor begin
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
@ -421,7 +412,7 @@ template <typename F=double>
for (auto& ptr: sliceBuffers) {
#if defined(HAVE_CUDA)
const CUresult error =
cuMemAlloc(&ptr, sizeof(F) * sliceSize);
cuMemAlloc(&ptr, sizeof(F) * sources[0].size());
if (ptr == 0UL) {
throw "UNSUFICCIENT MEMORY ON THE GRAPHIC CARD FOR FREE POINTERS";
}
@ -432,12 +423,12 @@ template <typename F=double>
throw s.str();
}
#else
ptr = (DataPtr<F>)malloc(sizeof(F) * sliceSize);
ptr = (DataPtr<F>)malloc(sizeof(F) * sources[0].size());
#endif
}
slices
= std::vector<Slice<F>>(2 * sliceTypes.size(), { sliceSize });
= std::vector<Slice<F>>(2 * sliceTypes.size(), { sources[0].size() });
// TODO: think exactly ^------------------- about this number
// initialize the freePointers with the pointers to the buffers
@ -450,12 +441,12 @@ template <typename F=double>
LOG(1,"Atrip") << "#slices " << slices.size() << "\n";
WITH_RANK << "#slices[0] " << slices[0].size << "\n";
LOG(1,"Atrip") << "#sources " << sources.size() << "\n";
WITH_RANK << "#sources[0] " << sliceSize << "\n";
WITH_RANK << "#sources[0] " << sources[0].size() << "\n";
WITH_RANK << "#freePointers " << freePointers.size() << "\n";
LOG(1,"Atrip") << "#sliceBuffers " << sliceBuffers.size() << "\n";
LOG(1,"Atrip") << "GB*" << np << " "
<< double(sources.size() + sliceBuffers.size())
* sliceSize
* sources[0].size()
* 8 * np
/ 1073741824.0
<< "\n";
@ -504,13 +495,14 @@ template <typename F=double>
if (otherRank == info.from.rank) sendData_p = false;
if (!sendData_p) return;
MPI_Isend((void*)SOURCES_DATA(sources[info.from.source]),
sliceSize,
traits::mpi::datatypeOf<F>(),
otherRank,
tag,
universe,
&request);
MPI_Isend( sources[info.from.source].data()
, sources[info.from.source].size()
, traits::mpi::datatypeOf<F>()
, otherRank
, tag
, universe
, &request
);
WITH_CRAZY_DEBUG
WITH_RANK << "sent to " << otherRank << "\n";
@ -524,26 +516,25 @@ template <typename F=double>
if (Atrip::rank == info.from.rank) return;
if (slice.info.state == Slice<F>::Fetch) { // if-1
if (slice.info.state == Slice<F>::Fetch) {
// TODO: do it through the slice class
slice.info.state = Slice<F>::Dispatched;
#if defined(HAVE_CUDA)
# if !defined(ATRIP_CUDA_AWARE_MPI) && defined(ATRIP_SOURCES_IN_GPU)
# error "You need CUDA aware MPI to have slices on the GPU"
# endif
slice.mpi_data = (F*)malloc(sizeof(F) * slice.size);
MPI_Irecv(slice.mpi_data,
MPI_Irecv( slice.mpi_data
#else
MPI_Irecv(slice.data,
MPI_Irecv( slice.data
#endif
slice.size,
traits::mpi::datatypeOf<F>(),
info.from.rank,
tag,
universe,
&slice.request);
} // if-1
} // receive
, slice.size
, traits::mpi::datatypeOf<F>()
, info.from.rank
, tag
, universe
, &slice.request
//, MPI_STATUS_IGNORE
);
}
}
void unwrapAll(ABCTuple const& abc) {
for (auto type: sliceTypes) unwrapSlice(type, abc);
@ -606,12 +597,7 @@ template <typename F=double>
const MPI_Comm world;
const MPI_Comm universe;
const std::vector<size_t> sliceLength;
const size_t sliceSize;
#if defined(ATRIP_SOURCES_IN_GPU)
std::vector< DataPtr<F> > sources;
#else
std::vector< std::vector<F> > sources;
#endif
std::vector< Slice<F> > slices;
typename Slice<F>::Name name;
const std::vector<typename Slice<F>::Type> sliceTypes;

View File

@ -52,7 +52,43 @@ struct TuplesDistribution {
// Distributing the tuples:1 ends here
// [[file:~/cuda/atrip/atrip.org::*Node%20information][Node information:1]]
std::vector<std::string> getNodeNames(MPI_Comm comm);
std::vector<std::string> getNodeNames(MPI_Comm comm){
int rank, np;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &np);
std::vector<std::string> nodeList(np);
char nodeName[MPI_MAX_PROCESSOR_NAME];
char *nodeNames = (char*)malloc(np * MPI_MAX_PROCESSOR_NAME);
std::vector<int> nameLengths(np)
, off(np)
;
int nameLength;
MPI_Get_processor_name(nodeName, &nameLength);
MPI_Allgather(&nameLength,
1,
MPI_INT,
nameLengths.data(),
1,
MPI_INT,
comm);
for (int i(1); i < np; i++)
off[i] = off[i-1] + nameLengths[i-1];
MPI_Allgatherv(nodeName,
nameLengths[rank],
MPI_BYTE,
nodeNames,
nameLengths.data(),
off.data(),
MPI_BYTE,
comm);
for (int i(0); i < np; i++) {
std::string const s(&nodeNames[off[i]], nameLengths[i]);
nodeList[i] = s;
}
std::free(nodeNames);
return nodeList;
}
// Node information:1 ends here
// [[file:~/cuda/atrip/atrip.org::*Node%20information][Node information:2]]
@ -64,28 +100,118 @@ struct RankInfo {
const size_t ranksPerNode;
};
template <typename A>
A unique(A const &xs) {
auto result = xs;
std::sort(std::begin(result), std::end(result));
auto const& last = std::unique(std::begin(result), std::end(result));
result.erase(last, std::end(result));
return result;
}
std::vector<RankInfo>
getNodeInfos(std::vector<string> const& nodeNames);
getNodeInfos(std::vector<string> const& nodeNames) {
std::vector<RankInfo> result;
auto const uniqueNames = unique(nodeNames);
auto const index = [&uniqueNames](std::string const& s) {
auto const& it = std::find(uniqueNames.begin(), uniqueNames.end(), s);
return std::distance(uniqueNames.begin(), it);
};
std::vector<size_t> localRanks(uniqueNames.size(), 0);
size_t globalRank = 0;
for (auto const& name: nodeNames) {
const size_t nodeId = index(name);
result.push_back({name,
nodeId,
globalRank++,
localRanks[nodeId]++,
(size_t)
std::count(nodeNames.begin(),
nodeNames.end(),
name)
});
}
return result;
}
struct ClusterInfo {
const size_t nNodes, np, ranksPerNode;
const std::vector<RankInfo> rankInfos;
};
ClusterInfo getClusterInfo(MPI_Comm comm);
ClusterInfo
getClusterInfo(MPI_Comm comm) {
auto const names = getNodeNames(comm);
auto const rankInfos = getNodeInfos(names);
return ClusterInfo {
unique(names).size(),
names.size(),
rankInfos[0].ranksPerNode,
rankInfos
};
}
// Node information:2 ends here
// [[file:~/cuda/atrip/atrip.org::*Naive%20list][Naive list:1]]
ABCTuples getTuplesList(size_t Nv, size_t rank, size_t np);
ABCTuples getTuplesList(size_t Nv, size_t rank, size_t np) {
const size_t
// total number of tuples for the problem
n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv
// all ranks should have the same number of tuples_per_rank
, tuples_per_rank = n / np + size_t(n % np != 0)
// start index for the global tuples list
, start = tuples_per_rank * rank
// end index for the global tuples list
, end = tuples_per_rank * (rank + 1)
;
LOG(1,"Atrip") << "tuples_per_rank = " << tuples_per_rank << "\n";
WITH_RANK << "start, end = " << start << ", " << end << "\n";
ABCTuples result(tuples_per_rank, FAKE_TUPLE);
for (size_t a(0), r(0), g(0); a < Nv; a++)
for (size_t b(a); b < Nv; b++)
for (size_t c(b); c < Nv; c++){
if ( a == b && b == c ) continue;
if ( start <= g && g < end) result[r++] = {a, b, c};
g++;
}
return result;
}
// Naive list:1 ends here
// [[file:~/cuda/atrip/atrip.org::*Naive%20list][Naive list:2]]
ABCTuples getAllTuplesList(const size_t Nv);
ABCTuples getAllTuplesList(const size_t Nv) {
const size_t n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv;
ABCTuples result(n);
for (size_t a(0), u(0); a < Nv; a++)
for (size_t b(a); b < Nv; b++)
for (size_t c(b); c < Nv; c++){
if ( a == b && b == c ) continue;
result[u++] = {a, b, c};
}
return result;
}
// Naive list:2 ends here
// [[file:~/cuda/atrip/atrip.org::*Naive%20list][Naive list:3]]
struct NaiveDistribution : public TuplesDistribution {
ABCTuples getTuples(size_t Nv, MPI_Comm universe) override;
ABCTuples getTuples(size_t Nv, MPI_Comm universe) override {
int rank, np;
MPI_Comm_rank(universe, &rank);
MPI_Comm_size(universe, &np);
return getTuplesList(Nv, (size_t)rank, (size_t)np);
}
};
// Naive list:3 ends here
@ -98,12 +224,19 @@ namespace group_and_sort {
// Right now we distribute the slices in a round robin fashion
// over the different nodes (NOTE: not mpi ranks but nodes)
inline
size_t isOnNode(size_t tuple, size_t nNodes);
size_t isOnNode(size_t tuple, size_t nNodes) { return tuple % nNodes; }
// return the node (or all nodes) where the elements of this
// tuple are located
std::vector<size_t> getTupleNodes(ABCTuple const& t, size_t nNodes);
std::vector<size_t> getTupleNodes(ABCTuple const& t, size_t nNodes) {
std::vector<size_t>
nTuple = { isOnNode(t[0], nNodes)
, isOnNode(t[1], nNodes)
, isOnNode(t[2], nNodes)
};
return unique(nTuple);
}
struct Info {
size_t nNodes;
@ -112,16 +245,302 @@ struct Info {
// Utils:1 ends here
// [[file:~/cuda/atrip/atrip.org::*Distribution][Distribution:1]]
ABCTuples specialDistribution(Info const& info, ABCTuples const& allTuples);
ABCTuples specialDistribution(Info const& info, ABCTuples const& allTuples) {
ABCTuples nodeTuples;
size_t const nNodes(info.nNodes);
std::vector<ABCTuples>
container1d(nNodes)
, container2d(nNodes * nNodes)
, container3d(nNodes * nNodes * nNodes)
;
WITH_DBG if (info.nodeId == 0)
std::cout << "\tGoing through all "
<< allTuples.size()
<< " tuples in "
<< nNodes
<< " nodes\n";
// build container-n-d's
for (auto const& t: allTuples) {
// one which node(s) are the tuple elements located...
// put them into the right container
auto const _nodes = getTupleNodes(t, nNodes);
switch (_nodes.size()) {
case 1:
container1d[_nodes[0]].push_back(t);
break;
case 2:
container2d[ _nodes[0]
+ _nodes[1] * nNodes
].push_back(t);
break;
case 3:
container3d[ _nodes[0]
+ _nodes[1] * nNodes
+ _nodes[2] * nNodes * nNodes
].push_back(t);
break;
}
}
WITH_DBG if (info.nodeId == 0)
std::cout << "\tBuilding 1-d containers\n";
// DISTRIBUTE 1-d containers
// every tuple which is only located at one node belongs to this node
{
auto const& _tuples = container1d[info.nodeId];
nodeTuples.resize(_tuples.size(), INVALID_TUPLE);
std::copy(_tuples.begin(), _tuples.end(), nodeTuples.begin());
}
WITH_DBG if (info.nodeId == 0)
std::cout << "\tBuilding 2-d containers\n";
// DISTRIBUTE 2-d containers
//the tuples which are located at two nodes are half/half given to these nodes
for (size_t yx = 0; yx < container2d.size(); yx++) {
auto const& _tuples = container2d[yx];
const
size_t idx = yx % nNodes
// remeber: yx = idy * nNodes + idx
, idy = yx / nNodes
, n_half = _tuples.size() / 2
, size = nodeTuples.size()
;
size_t nbeg, nend;
if (info.nodeId == idx) {
nbeg = 0 * n_half;
nend = n_half;
} else if (info.nodeId == idy) {
nbeg = 1 * n_half;
nend = _tuples.size();
} else {
// either idx or idy is my node
continue;
}
size_t const nextra = nend - nbeg;
nodeTuples.resize(size + nextra, INVALID_TUPLE);
std::copy(_tuples.begin() + nbeg,
_tuples.begin() + nend,
nodeTuples.begin() + size);
}
WITH_DBG if (info.nodeId == 0)
std::cout << "\tBuilding 3-d containers\n";
// DISTRIBUTE 3-d containers
for (size_t zyx = 0; zyx < container3d.size(); zyx++) {
auto const& _tuples = container3d[zyx];
const
size_t idx = zyx % nNodes
, idy = (zyx / nNodes) % nNodes
// remember: zyx = idx + idy * nNodes + idz * nNodes^2
, idz = zyx / nNodes / nNodes
, n_third = _tuples.size() / 3
, size = nodeTuples.size()
;
size_t nbeg, nend;
if (info.nodeId == idx) {
nbeg = 0 * n_third;
nend = 1 * n_third;
} else if (info.nodeId == idy) {
nbeg = 1 * n_third;
nend = 2 * n_third;
} else if (info.nodeId == idz) {
nbeg = 2 * n_third;
nend = _tuples.size();
} else {
// either idx or idy or idz is my node
continue;
}
size_t const nextra = nend - nbeg;
nodeTuples.resize(size + nextra, INVALID_TUPLE);
std::copy(_tuples.begin() + nbeg,
_tuples.begin() + nend,
nodeTuples.begin() + size);
}
WITH_DBG if (info.nodeId == 0) std::cout << "\tswapping tuples...\n";
/*
* sort part of group-and-sort algorithm
* every tuple on a given node is sorted in a way that
* the 'home elements' are the fastest index.
* 1:yyy 2:yyn(x) 3:yny(x) 4:ynn(x) 5:nyy 6:nyn(x) 7:nny 8:nnn
*/
for (auto &nt: nodeTuples){
if ( isOnNode(nt[0], nNodes) == info.nodeId ){ // 1234
if ( isOnNode(nt[2], nNodes) != info.nodeId ){ // 24
size_t const x(nt[0]);
nt[0] = nt[2]; // switch first and last
nt[2] = x;
}
else if ( isOnNode(nt[1], nNodes) != info.nodeId){ // 3
size_t const x(nt[0]);
nt[0] = nt[1]; // switch first two
nt[1] = x;
}
} else {
if ( isOnNode(nt[1], nNodes) == info.nodeId // 56
&& isOnNode(nt[2], nNodes) != info.nodeId
) { // 6
size_t const x(nt[1]);
nt[1] = nt[2]; // switch last two
nt[2] = x;
}
}
}
WITH_DBG if (info.nodeId == 0) std::cout << "\tsorting list of tuples...\n";
//now we sort the list of tuples
std::sort(nodeTuples.begin(), nodeTuples.end());
WITH_DBG if (info.nodeId == 0) std::cout << "\trestoring tuples...\n";
// we bring the tuples abc back in the order a<b<c
for (auto &t: nodeTuples) std::sort(t.begin(), t.end());
#if ATRIP_DEBUG > 1
WITH_DBG if (info.nodeId == 0)
std::cout << "checking for validity of " << nodeTuples.size() << std::endl;
const bool anyInvalid
= std::any_of(nodeTuples.begin(),
nodeTuples.end(),
[](ABCTuple const& t) { return t == INVALID_TUPLE; });
if (anyInvalid) throw "Some tuple is invalid in group-and-sort algorithm";
#endif
WITH_DBG if (info.nodeId == 0) std::cout << "\treturning tuples...\n";
return nodeTuples;
}
// Distribution:1 ends here
// [[file:~/cuda/atrip/atrip.org::*Main][Main:1]]
std::vector<ABCTuple> main(MPI_Comm universe, size_t Nv);
std::vector<ABCTuple> main(MPI_Comm universe, size_t Nv) {
int rank, np;
MPI_Comm_rank(universe, &rank);
MPI_Comm_size(universe, &np);
std::vector<ABCTuple> result;
auto const nodeNames(getNodeNames(universe));
size_t const nNodes = unique(nodeNames).size();
auto const nodeInfos = getNodeInfos(nodeNames);
// We want to construct a communicator which only contains of one
// element per node
bool const computeDistribution
= nodeInfos[rank].localRank == 0;
std::vector<ABCTuple>
nodeTuples
= computeDistribution
? specialDistribution(Info{nNodes, nodeInfos[rank].nodeId},
getAllTuplesList(Nv))
: std::vector<ABCTuple>()
;
LOG(1,"Atrip") << "got nodeTuples\n";
// now we have to send the data from **one** rank on each node
// to all others ranks of this node
const
int color = nodeInfos[rank].nodeId
, key = nodeInfos[rank].localRank
;
MPI_Comm INTRA_COMM;
MPI_Comm_split(universe, color, key, &INTRA_COMM);
// Main:1 ends here
// [[file:~/cuda/atrip/atrip.org::*Main][Main:2]]
size_t const
tuplesPerRankLocal
= nodeTuples.size() / nodeInfos[rank].ranksPerNode
+ size_t(nodeTuples.size() % nodeInfos[rank].ranksPerNode != 0)
;
size_t tuplesPerRankGlobal;
MPI_Reduce(&tuplesPerRankLocal,
&tuplesPerRankGlobal,
1,
MPI_UINT64_T,
MPI_MAX,
0,
universe);
MPI_Bcast(&tuplesPerRankGlobal,
1,
MPI_UINT64_T,
0,
universe);
LOG(1,"Atrip") << "Tuples per rank: " << tuplesPerRankGlobal << "\n";
LOG(1,"Atrip") << "ranks per node " << nodeInfos[rank].ranksPerNode << "\n";
LOG(1,"Atrip") << "#nodes " << nNodes << "\n";
// Main:2 ends here
// [[file:~/cuda/atrip/atrip.org::*Main][Main:3]]
size_t const totalTuples
= tuplesPerRankGlobal * nodeInfos[rank].ranksPerNode;
if (computeDistribution) {
// pad with FAKE_TUPLEs
nodeTuples.insert(nodeTuples.end(),
totalTuples - nodeTuples.size(),
FAKE_TUPLE);
}
// Main:3 ends here
// [[file:~/cuda/atrip/atrip.org::*Main][Main:4]]
{
// construct mpi type for abctuple
MPI_Datatype MPI_ABCTUPLE;
MPI_Type_vector(nodeTuples[0].size(), 1, 1, MPI_UINT64_T, &MPI_ABCTUPLE);
MPI_Type_commit(&MPI_ABCTUPLE);
LOG(1,"Atrip") << "scattering tuples \n";
result.resize(tuplesPerRankGlobal);
MPI_Scatter(nodeTuples.data(),
tuplesPerRankGlobal,
MPI_ABCTUPLE,
result.data(),
tuplesPerRankGlobal,
MPI_ABCTUPLE,
0,
INTRA_COMM);
MPI_Type_free(&MPI_ABCTUPLE);
}
// Main:4 ends here
// [[file:~/cuda/atrip/atrip.org::*Main][Main:5]]
return result;
}
// Main:5 ends here
// [[file:~/cuda/atrip/atrip.org::*Interface][Interface:1]]
struct Distribution : public TuplesDistribution {
ABCTuples getTuples(size_t Nv, MPI_Comm universe) override;
ABCTuples getTuples(size_t Nv, MPI_Comm universe) override {
return main(universe, Nv);
}
};
// Interface:1 ends here

View File

@ -19,14 +19,8 @@
namespace atrip {
template <typename F=double>
static
void sliceIntoVector
#if defined(ATRIP_SOURCES_IN_GPU)
( DataPtr<F> &source
#else
( std::vector<F> &source
#endif
, size_t sliceSize
( std::vector<F> &v
, CTF::Tensor<F> &toSlice
, std::vector<int64_t> const low
, std::vector<int64_t> const up
@ -50,30 +44,18 @@ namespace atrip {
<< "\n";
#ifndef ATRIP_DONT_SLICE
toSlice.slice(toSlice_.low.data(),
toSlice_.up.data(),
0.0,
origin,
origin_.low.data(),
origin_.up.data(),
1.0);
toSlice.slice( toSlice_.low.data()
, toSlice_.up.data()
, 0.0
, origin
, origin_.low.data()
, origin_.up.data()
, 1.0);
memcpy(v.data(), toSlice.data, sizeof(F) * v.size());
#else
# pragma message("WARNING: COMPILING WITHOUT SLICING THE TENSORS")
#endif
#if defined(ATRIP_SOURCES_IN_GPU)
WITH_CHRONO("cuda:sources",
_CHECK_CUDA_SUCCESS("copying sources data to device",
cuMemcpyHtoD(source,
toSlice.data,
sliceSize));
)
#else
memcpy(source.data(),
toSlice.data,
sizeof(F) * sliceSize);
#endif
}
@ -98,15 +80,16 @@ namespace atrip {
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override
{
const int Nv = this->sliceLength[0]
, No = this->sliceLength[1]
, a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it});
;
const int
Nv = this->sliceLength[0],
No = this->sliceLength[1],
a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it});
sliceIntoVector<F>(this->sources[it], this->sliceSize,
to, {0, 0, 0}, {Nv, No, No},
from, {a, 0, 0, 0}, {a+1, Nv, No, No});
sliceIntoVector<F>( this->sources[it]
, to, {0, 0, 0}, {Nv, No, No}
, from, {a, 0, 0, 0}, {a+1, Nv, No, No}
);
}
@ -135,13 +118,14 @@ namespace atrip {
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override
{
const int
No = this->sliceLength[0],
a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it});
const int No = this->sliceLength[0]
, a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
;
sliceIntoVector<F>(this->sources[it], this->sliceSize,
to, {0, 0, 0}, {No, No, No},
from, {0, 0, 0, a}, {No, No, No, a+1});
sliceIntoVector<F>( this->sources[it]
, to, {0, 0, 0}, {No, No, No}
, from, {0, 0, 0, a}, {No, No, No, a+1}
);
}
};
@ -169,17 +153,18 @@ namespace atrip {
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
const int
Nv = this->sliceLength[0],
No = this->sliceLength[1],
el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it}),
a = el % Nv,
b = el / Nv;
const int Nv = this->sliceLength[0]
, No = this->sliceLength[1]
, el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
, a = el % Nv
, b = el / Nv
;
sliceIntoVector<F>(this->sources[it], this->sliceSize,
to, {0, 0}, {Nv, No},
from, {a, b, 0, 0}, {a+1, b+1, Nv, No});
sliceIntoVector<F>( this->sources[it]
, to, {0, 0}, {Nv, No}
, from, {a, b, 0, 0}, {a+1, b+1, Nv, No}
);
}
@ -206,17 +191,17 @@ namespace atrip {
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
const int
Nv = from.lens[0],
No = this->sliceLength[1],
el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it}),
a = el % Nv,
b = el / Nv;
const int Nv = from.lens[0]
, No = this->sliceLength[1]
, el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
, a = el % Nv
, b = el / Nv
;
sliceIntoVector<F>(this->sources[it], this->sliceSize,
to, {0, 0}, {No, No},
from, {a, b, 0, 0}, {a+1, b+1, No, No});
sliceIntoVector<F>( this->sources[it]
, to, {0, 0}, {No, No}
, from, {a, b, 0, 0}, {a+1, b+1, No, No}
);
}
@ -246,16 +231,17 @@ namespace atrip {
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
// TODO: maybe generalize this with ABHH
const int
Nv = from.lens[0],
No = this->sliceLength[1],
el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it}),
a = el % Nv,
b = el / Nv;
const int Nv = from.lens[0]
, No = this->sliceLength[1]
, el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
, a = el % Nv
, b = el / Nv
;
sliceIntoVector<F>(this->sources[it], this->sliceSize,
to, {0, 0}, {No, No},
from, {a, b, 0, 0}, {a+1, b+1, No, No});
sliceIntoVector<F>( this->sources[it]
, to, {0, 0}, {No, No}
, from, {a, b, 0, 0}, {a+1, b+1, No, No}
);
}

View File

@ -1,228 +0,0 @@
#+quicklisp
(eval-when (:compile-toplevel :load-toplevel :execute)
(ql:quickload '(vgplot fiveam)))
(defpackage :naive-tuples
(:use :cl :vgplot))
(in-package :naive-tuples)
(defun tuples-atrip (nv)
(declare (optimize (speed 3) (safety 0) (debug 0)))
(loop :for a :below nv
:append
(loop :for b :from a :below nv
:append
(loop :for c :from b :below nv
:unless (= a b c)
:collect (list a b c)))))
(defun tuples-half (nv)
(declare (optimize (speed 3) (safety 0) (debug 0)))
(loop :for a :below nv
:append
(loop :for b :from a :below nv
:append
(loop :for c :from b :below nv
:collect (list a b c)))))
(defun tuples-all (nv)
(declare (optimize (speed 3) (safety 0) (debug 0)))
(loop :for a :below nv
:append
(loop :for b :below nv
:append
(loop :for c :below nv
:collect (list a b c)))))
(defun tuples-all-nth (i nv)
(declare (optimize (speed 3) (safety 0) (debug 0)))
(list (floor i (* nv nv))
(mod (floor i nv) nv)
(mod i nv)))
(defparameter tups (tuples-all 10))
(defun compare-all (l)
(declare (optimize (speed 3) (safety 0) (debug 0)))
(let* ((tups (tuples-all l)))
(loop for i below (length tups)
do (let* ((good (nth i tups))
(bad (tuples-all-nth i l))
(eq? (equal good bad)))
(unless eq?
(print (list :|i| i
:good good
:bad bad)))))))
;; (defun a-half (i nv)
;; (let ((divisor t)
;; (j i)
;; (total-blk 0))
;; (loop :for a :below nv
;; :unless (eq divisor 0)
;; :do (let ((blk (a-block a nv)))
;; (multiple-value-bind (d r) (floor j blk)
;; (declare (ignore r))
;; (when (> d 0)
;; (incf total-blk blk))
;; (setq j (- j blk)
;; divisor d)))
;; :else
;; :return (values (- a 1)
;; i
;; total-blk))))
;; (defun b-half (i a nv a-block-sum)
;; "we have
;; \begin{equation}
;; i = \underbrace{B(a_0) +
;; \cdots +
;; B(a_{i-1})}_{\texttt{a-block-sum}}
;; + idx
;; \end{equation}
;; and with this we just have to divide.
;; "
;; (let ((bj (if (> a-block-sum 0)
;; (mod i a-block-sum)
;; i))
;; (total-blk 0))
;; (loop :for b :from a :below Nv
;; :with divisor = 1
;; :unless (eq divisor 0)
;; :do (let ((blk (+ (- nv a)
;; #|because|# 1)))
;; (incf total-blk blk)
;; (if (> blk 0)
;; (multiple-value-bind (d r) (floor bj blk)
;; (declare (ignore r))
;; (setq bj (- bj blk)
;; divisor d))
;; (setq divisor 0)))
;; :else
;; :return (values (- b 1)
;; bj
;; total-blk))))
(defun a-block (a nv)
(declare (optimize (speed 3) (safety 0) (debug 0)))
(- (* (- nv 1) (- nv (- a 1)))
(- (floor (* (- nv 1) nv)
2)
(floor (* (- a 1) (- a 2))
2))))
(defun a-block-sum (|t| nv)
(macrolet ((ssum (n) `(floor (* ,n (+ ,n 1))
2))
(qsum (n) `(floor (* ,n
(+ ,n 1)
(+ 1 (* 2 ,n)))
6)))
(let ((nv-1 (- nv 1))
(t+1 (+ |t| 1)))
(+ (* t+1 nv-1 nv)
(* nv-1 t+1)
(- (* nv-1
(ssum |t|)))
(- (* t+1
(ssum nv-1)))
(floor (- (qsum |t|)
(* 3 (ssum |t|)))
2)
t+1))))
(defun get-half (i nv &key from block)
(let ((divisor 1)
(j i)
(total-blk 0))
(loop :for α :from from :below nv
:unless (eq divisor 0)
:do (let ((blk (funcall block α nv)))
(multiple-value-bind (d r) (floor j blk)
(declare (ignore r))
(when (> d 0)
(incf total-blk blk)
(setq j (- j blk)))
(setq divisor d)))
:else
:return (values (- α 1)
j
total-blk))))
(defun tuples-half-nth (i nv)
(declare (optimize (speed 3) (safety 0) (debug 0)))
(flet ((bc-block (x %nv)
(+ 1 (- %nv x))))
(multiple-value-bind (a aj blks) (get-half i nv :from 0 :block #'a-block)
(declare (ignore blks))
(multiple-value-bind (b bj blks) (get-half aj nv
:from a
:block #'bc-block)
(declare (ignore blks))
(multiple-value-bind (c cj blks) (get-half bj nv
:from b
:block #'bc-block)
(declare (ignore cj blks))
(print (list :idxs aj bj cj))
(list a b c))))))
(defun a-block-atrip (a nv)
(declare (optimize (speed 3) (safety 0) (debug 0)))
(- (a-block a nv) 1))
(defun a-block-sum-atrip (|t| nv)
(declare (optimize (speed 3) (safety 0) (debug 0)))
(- (a-block-sum |t| nv) (+ |t| 1)))
(defun b-block-sum-atrip (a |t| nv)
(- (* nv
(1+ (- |t| a)))
(floor (- (* |t| (1+ |t|))
(* a (- a 1)))
2)
1))
(defun nth-atrip (i nv)
(let ((sums (mapcar (lambda (s) (a-block-sum-atrip s nv))
(loop :for j :below nv :collect j))))
(multiple-value-bind (a ablk)
(loop :for sum :in sums
:with a = -1
:with base = 0
:do (incf a)
:if (eq (floor i sum) 0)
:return (values a base)
:else
:do (setq base sum))
(multiple-value-bind (b bblk)
(let ((sums (mapcar (lambda (s)
(+ ablk
#+nil(- nv s 1)
(b-block-sum-atrip a s nv)))
(loop :for b :from a :below nv
:collect b))))
(loop :for sum :in sums
:with b = (- a 1)
:with base = ablk
:do (incf b)
:if (< i sum)
:return (values b base)
:else
:do (progn
;; (print sums)
(setq base sum))))
(list a b (+ b
(- i bblk)
(if (eq a b)
1
0)))))))
(defun atrip-test (i nv)
(let ((tuples (tuples-atrip nv))
(cheaper (nth-atrip i nv)))
(values (nth i tuples)
cheaper
(print (equal (nth i tuples)
cheaper)))))

View File

@ -1,5 +1,5 @@
{ compiler ? "gcc"
, pkgs ? import <nixpkgs> {}
, pkgs ? import <nixpkgs> {}
, mkl ? false
, cuda ? false
, docs ? true
@ -11,8 +11,7 @@ let
config.allowUnfree = true;
};
openblas = import ./etc/nix/openblas.nix { inherit pkgs; };
vendor = import ./etc/nix/vendor-shell.nix;
openblas = import ./etc/nix/openblas.nix { inherit pkgs; };
mkl-pkg = import ./etc/nix/mkl.nix { pkgs = unfree-pkgs; };
cuda-pkg = if cuda then (import ./cuda.nix { pkgs = unfree-pkgs; }) else {};
@ -58,15 +57,14 @@ pkgs.mkShell rec {
buildInputs
= with pkgs; [
gdb
coreutils
git
vim
git vim
openmpi
llvmPackages.openmp
binutils
emacs
gfortran
gnumake
@ -86,15 +84,6 @@ pkgs.mkShell rec {
shellHook
=
''
${vendor.src}
${vendor.cpath "${pkgs.openmpi.out}/include"}
${vendor.cpath "${openblas.pkg.dev}/include"}
${vendor.lib "${pkgs.openmpi.out}/lib"}
${vendor.lib "${openblas.pkg.out}/lib"}
export OMPI_CXX=${CXX}
export OMPI_CC=${CC}
CXX=${CXX}

View File

@ -7,7 +7,7 @@ AM_CPPFLAGS = $(CTF_CPPFLAGS)
lib_LIBRARIES = libatrip.a
libatrip_a_CPPFLAGS = -I$(top_srcdir)/include/
libatrip_a_SOURCES = ./atrip/Blas.cxx ./atrip/Tuples.cxx ./atrip/DatabaseCommunicator.cxx
libatrip_a_SOURCES = ./atrip/Blas.cxx
NVCC_FILES = ./atrip/Equations.cxx ./atrip/Complex.cxx ./atrip/Atrip.cxx
if WITH_CUDA

View File

@ -21,7 +21,6 @@
#include <atrip/SliceUnion.hpp>
#include <atrip/Unions.hpp>
#include <atrip/Checkpoint.hpp>
#include <atrip/DatabaseCommunicator.hpp>
using namespace atrip;
#if defined(HAVE_CUDA)
@ -300,23 +299,9 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
using Database = typename Slice<F>::Database;
auto communicateDatabase
= [ &unions
, &in
, Nv
, np
] (ABCTuple const& abc, MPI_Comm const& c, size_t iteration) -> Database {
] (ABCTuple const& abc, MPI_Comm const& c) -> Database {
if (in.tuplesDistribution == Atrip::Input<F>::TuplesDistribution::NAIVE) {
WITH_CHRONO("db:comm:naive",
auto const& db = naiveDatabase<F>(unions,
Nv,
np,
iteration,
c);
)
return db;
} else {
WITH_CHRONO("db:comm:type:do",
auto MPI_LDB_ELEMENT = Slice<F>::mpi::localDatabaseElement();
)
@ -349,8 +334,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
WITH_CHRONO("db:comm:type:free", MPI_Type_free(&MPI_LDB_ELEMENT);)
return db;
}
};
auto doIOPhase
@ -454,7 +437,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// START MAIN LOOP ======================================================{{{1
MPI_Barrier(universe);
double energy(0.);
size_t first_iteration = 0;
Checkpoint c;
@ -582,7 +564,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// COMM FIRST DATABASE ================================================{{{1
if (i == first_iteration) {
WITH_RANK << "__first__:first database ............ \n";
const auto db = communicateDatabase(abc, universe, i);
const auto db = communicateDatabase(abc, universe);
WITH_RANK << "__first__:first database communicated \n";
WITH_RANK << "__first__:first database io phase \n";
doIOPhase(db);
@ -597,7 +579,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
if (abcNext) {
WITH_RANK << "__comm__:" << iteration << "th communicating database\n";
WITH_CHRONO("db:comm",
const auto db = communicateDatabase(*abcNext, universe, i);
const auto db = communicateDatabase(*abcNext, universe);
)
WITH_CHRONO("db:io",
doIOPhase(db);
@ -646,9 +628,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// COMPUTE SINGLES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
OCD_Barrier(universe);
#if defined(ATRIP_ONLY_DGEMM)
if (false)
#endif
if (!isFakeTuple(i)) {
WITH_CHRONO("oneshot-unwrap",
WITH_CHRONO("unwrap",
@ -681,9 +660,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
// COMPUTE ENERGY %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
#if defined(ATRIP_ONLY_DGEMM)
if (false)
#endif
if (!isFakeTuple(i)) {
double tupleEnergy(0.);

View File

@ -1,303 +0,0 @@
#include <atrip/DatabaseCommunicator.hpp>
#include <atrip/Complex.hpp>
namespace atrip {
#if defined(ATRIP_NAIVE_SLOW)
/*
* This function is really too slow, below are more performant
* functions to get tuples.
*/
static
ABCTuples get_nth_naive_tuples(size_t Nv, size_t np, int64_t i) {
const size_t
// total number of tuples for the problem
n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv
// all ranks should have the same number of tuples_per_rank
, tuples_per_rank = n / np + size_t(n % np != 0)
;
ABCTuples result(np);
if (i < 0) return result;
std::vector<size_t>
rank_indices(np, 0);
for (size_t a(0), g(0); a < Nv; a++)
for (size_t b(a); b < Nv; b++)
for (size_t c(b); c < Nv; c++){
if ( a == b && b == c ) continue;
for (size_t rank = 0; rank < np; rank++) {
const size_t
// start index for the global tuples list
start = tuples_per_rank * rank
// end index for the global tuples list
, end = tuples_per_rank * (rank + 1)
;
if ( start <= g && g < end) {
if (rank_indices[rank] == i) {
result[rank] = {a, b, c};
}
rank_indices[rank] += 1;
}
}
g++;
}
return result;
}
#endif
static
inline
size_t a_block_sum_atrip(int64_t T, int64_t nv) {
const int64_t nv_min_1 = nv - 1, t_plus_1 = T + 1;
return t_plus_1 * nv_min_1 * nv
+ nv_min_1 * t_plus_1
- (nv_min_1 * (T * t_plus_1) / 2)
- (t_plus_1 * (nv_min_1 * nv) / 2)
// do not simplify this expression, only the addition of both parts
// is a pair integer, prepare to endure the consequences of
// simplifying otherwise
+ (((T * t_plus_1 * (1 + 2 * T)) / 6) - 3 * ((T * t_plus_1) / 2)) / 2
;
}
static
inline
int64_t b_block_sum_atrip (int64_t a, int64_t T, int64_t nv) {
return nv * ((T - a) + 1)
- (T * (T + 1) - a * (a - 1)) / 2
- 1;
}
static std::vector<size_t> a_sums;
static
inline
ABCTuple nth_atrip(size_t it, size_t nv) {
// build the sums if necessary
if (!a_sums.size()) {
a_sums.resize(nv);
for (size_t _i = 0; _i < nv; _i++) {
a_sums[_i] = a_block_sum_atrip(_i, nv);
}
}
int64_t a = -1, block_a = 0;
for (const auto& sum: a_sums) {
++a;
if (sum > it) {
break;
} else {
block_a = sum;
}
}
// build the b_sums
std::vector<int64_t> b_sums(nv - a);
for (size_t t = a, i=0; t < nv; t++) {
b_sums[i++] = b_block_sum_atrip(a, t, nv);
}
int64_t b = a - 1, block_b = block_a;
for (const auto& sum: b_sums) {
++b;
if (sum + block_a > it) {
break;
} else {
block_b = sum + block_a;
}
}
const int64_t
c = b + it - block_b + (a == b);
return {(size_t)a, (size_t)b, (size_t)c};
}
static
inline
ABCTuples nth_atrip_distributed(int64_t it, size_t nv, size_t np) {
// If we are getting the previous tuples in the first iteration,
// then just return an impossible tuple, different from the FAKE_TUPLE,
// because if FAKE_TUPLE is defined as {0,0,0} slices thereof
// are actually attainable.
//
if (it < 0) {
ABCTuples result(np, {nv, nv, nv});
return result;
}
ABCTuples result(np);
const size_t
// total number of tuples for the problem
n = nv * (nv + 1) * (nv + 2) / 6 - nv
// all ranks should have the same number of tuples_per_rank
, tuples_per_rank = n / np + size_t(n % np != 0)
;
for (size_t rank = 0; rank < np; rank++) {
const size_t
global_iteration = tuples_per_rank * rank + it;
result[rank] = nth_atrip(global_iteration, nv);
}
return result;
}
template <typename F>
static
typename Slice<F>::LocalDatabase
build_local_database_fake(ABCTuple const& abc_prev,
ABCTuple const& abc,
size_t rank,
SliceUnion<F>* u) {
typename Slice<F>::LocalDatabase result;
// vector of type x tuple
auto const needed = u->neededSlices(abc);
auto const needed_prev = u->neededSlices(abc_prev);
for (auto const& pair: needed) {
auto const type = pair.first;
auto const tuple = pair.second;
auto const from = u->rankMap.find(abc, type);
// Try to find in the previously needed slices
// one that exactly matches the tuple.
// Not necessarily has to match the type.
//
// If we find it, then it means that the fake rank
// will mark it as recycled. This covers
// the finding of Ready slices and Recycled slices.
{
auto const& it
= std::find_if(needed_prev.begin(), needed_prev.end(),
[&tuple, &type](typename Slice<F>::Ty_x_Tu const& o) {
return o.second == tuple;
});
if (it != needed_prev.end()) {
typename Slice<F>::Info info;
info.tuple = tuple;
info.type = type;
info.from = from;
info.state = Slice<F>::Recycled;
result.push_back({u->name, info});
continue;
}
}
{
typename Slice<F>::Info info;
info.type = type;
info.tuple = tuple;
info.from = from;
// Handle self sufficiency
info.state = rank == from.rank
? Slice<F>::SelfSufficient
: Slice<F>::Fetch
;
result.push_back({u->name, info});
continue;
}
}
return result;
}
template <typename F>
typename Slice<F>::Database
naiveDatabase(Unions<F> &unions,
size_t nv,
size_t np,
size_t iteration,
MPI_Comm const& c) {
using Database = typename Slice<F>::Database;
Database db;
#ifdef ATRIP_NAIVE_SLOW
WITH_CHRONO("db:comm:naive:tuples",
const auto tuples = get_nth_naive_tuples(nv,
np,
iteration);
const auto prev_tuples = get_nth_naive_tuples(nv,
np,
iteration - 1);
)
#else
WITH_CHRONO("db:comm:naive:tuples",
const auto tuples = nth_atrip_distributed(iteration,
nv,
np);
const auto prev_tuples = nth_atrip_distributed(iteration - 1,
nv,
np);
)
#endif
for (size_t rank = 0; rank < np; rank++) {
auto abc = tuples[rank];
typename Slice<F>::LocalDatabase ldb;
for (auto const& tensor: unions) {
if (rank == Atrip::rank) {
auto const& tensorDb = tensor->buildLocalDatabase(abc);
ldb.insert(ldb.end(), tensorDb.begin(), tensorDb.end());
} else {
auto const& tensorDb
= build_local_database_fake(prev_tuples[rank],
abc,
rank,
tensor);
ldb.insert(ldb.end(), tensorDb.begin(), tensorDb.end());
}
}
db.insert(db.end(), ldb.begin(), ldb.end());
}
return db;
}
template
typename Slice<double>::Database
naiveDatabase<double>(Unions<double> &unions,
size_t nv,
size_t np,
size_t iteration,
MPI_Comm const& c);
template
typename Slice<Complex>::Database
naiveDatabase<Complex>(Unions<Complex> &unions,
size_t nv,
size_t np,
size_t iteration,
MPI_Comm const& c);
} // namespace atrip

View File

@ -151,11 +151,12 @@ namespace cuda {
KJI
};
/*
/*
* Please the c++ type checker and template creator
* in order to have an argument in the signature of
* the function that helps the compiler know which
* instantiation it should take.
*
*/
template <typename F, reordering_t R>
struct reorder_proxy {};
@ -435,20 +436,22 @@ double getEnergySame
, DataFieldType<F>* Tijk_
) {
const size_t NoNo = No*No;
const size_t a = abc[0], b = abc[1], c = abc[2]
, NoNo = No*No
;
DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_;
#if defined(ATRIP_USE_DGEMM)
#if defined(HAVE_CUDA)
#define REORDER(__II, __JJ, __KK) \
reorder<<<bs, ths>>>(reorder_proxy< \
DataFieldType<F>, \
__II ## __JJ ## __KK \
>{}, \
No, \
Tijk, \
_t_buffer)
#define REORDER(__II, __JJ, __KK) \
reorder<<<bs, ths>>>(reorder_proxy< \
DataFieldType<F>, \
__II ## __JJ ## __KK \
>{}, \
No, \
Tijk, \
_t_buffer);
#define DGEMM_PARTICLES(__A, __B) \
atrip::xgemm<F>("T", \
"N", \
@ -478,18 +481,11 @@ double getEnergySame
_t_buffer, \
(int const*)&NoNo \
)
#define MAYBE_CONJ(_conj, _buffer) \
do { \
cuda::maybeConjugate<<< \
\
Atrip::kernelDimensions.ooo.blocks, \
\
Atrip::kernelDimensions.ooo.threads \
\
>>>((DataFieldType<F>*)_conj, \
(DataFieldType<F>*)_buffer, \
NoNoNo); \
} while (0)
#define MAYBE_CONJ(_conj, _buffer) \
cuda::maybeConjugate<<< \
Atrip::kernelDimensions.ooo.blocks, \
Atrip::kernelDimensions.ooo.threads \
>>>((DataFieldType<F>*)_conj, (DataFieldType<F>*)_buffer, NoNoNo);
// END CUDA ////////////////////////////////////////////////////////////////////
@ -504,9 +500,7 @@ double getEnergySame
#define REORDER(__II, __JJ, __KK) \
reorder(reorder_proxy<DataFieldType<F>, \
__II ## __JJ ## __KK >{}, \
No, \
Tijk, \
_t_buffer)
No, Tijk, _t_buffer);
#define DGEMM_PARTICLES(__A, __B) \
atrip::xgemm<F>("T", \
"N", \
@ -537,13 +531,9 @@ double getEnergySame
_t_buffer, \
(int const*)&NoNo \
)
#define MAYBE_CONJ(_conj, _buffer) \
do { \
for (size_t __i = 0; __i < NoNoNo; ++__i) { \
_conj[__i] \
= maybeConjugate<F>(_buffer[__i]); \
} \
} while (0)
#define MAYBE_CONJ(_conj, _buffer) \
for (size_t __i = 0; __i < NoNoNo; ++__i) \
_conj[__i] = maybeConjugate<F>(_buffer[__i]);
#endif
F one{1.0}, m_one{-1.0}, zero{0.0};
@ -562,12 +552,8 @@ double getEnergySame
const size_t
bs = Atrip::kernelDimensions.ooo.blocks,
ths = Atrip::kernelDimensions.ooo.threads;
#if !defined(ATRIP_ONLY_DGEMM)
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
#endif
#else
DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
@ -579,7 +565,7 @@ double getEnergySame
#endif
// Set Tijk to zero
#if defined(HAVE_CUDA) && !defined(ATRIP_ONLY_DGEMM)
#ifdef HAVE_CUDA
WITH_CHRONO("double:reorder",
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk,
NoNoNo);
@ -591,51 +577,43 @@ double getEnergySame
})
#endif
#if defined(ATRIP_ONLY_DGEMM)
#undef MAYBE_CONJ
#undef REORDER
#define MAYBE_CONJ(a, b) do {} while(0)
#define REORDER(i, j, k) do {} while(0)
#endif
// HOLES
WITH_CHRONO("doubles:holes",
{
// VhhhC[i + k*No + L*NoNo] * TABhh[L + j*No]; H1
MAYBE_CONJ(_vhhh, VhhhC);
MAYBE_CONJ(_vhhh, VhhhC)
WITH_CHRONO("doubles:holes:1",
DGEMM_HOLES(_vhhh, TABhh, "N");
REORDER(I, K, J);
REORDER(I, K, J)
)
// VhhhC[j + k*No + L*NoNo] * TABhh[i + L*No]; H0
WITH_CHRONO("doubles:holes:2",
DGEMM_HOLES(_vhhh, TABhh, "T");
REORDER(J, K, I);
REORDER(J, K, I)
)
// VhhhB[i + j*No + L*NoNo] * TAChh[L + k*No]; H5
MAYBE_CONJ(_vhhh, VhhhB);
MAYBE_CONJ(_vhhh, VhhhB)
WITH_CHRONO("doubles:holes:3",
DGEMM_HOLES(_vhhh, TAChh, "N");
REORDER(I, J, K);
REORDER(I, J, K)
)
// VhhhB[k + j*No + L*NoNo] * TAChh[i + L*No]; H3
WITH_CHRONO("doubles:holes:4",
DGEMM_HOLES(_vhhh, TAChh, "T");
REORDER(K, J, I);
REORDER(K, J, I)
)
// VhhhA[j + i*No + L*NoNo] * TBChh[L + k*No]; H1
MAYBE_CONJ(_vhhh, VhhhA);
MAYBE_CONJ(_vhhh, VhhhA)
WITH_CHRONO("doubles:holes:5",
DGEMM_HOLES(_vhhh, TBChh, "N");
REORDER(J, I, K);
REORDER(J, I, K)
)
// VhhhA[k + i*No + L*NoNo] * TBChh[j + L*No]; H4
WITH_CHRONO("doubles:holes:6",
DGEMM_HOLES(_vhhh, TBChh, "T");
REORDER(K, I, J);
REORDER(K, I, J)
)
}
)
@ -647,32 +625,32 @@ double getEnergySame
// TAphh[E + i*Nv + j*NoNv] * VBCph[E + k*Nv]; P0
WITH_CHRONO("doubles:particles:1",
DGEMM_PARTICLES(TAphh, VBCph);
REORDER(I, J, K);
REORDER(I, J, K)
)
// TAphh[E + i*Nv + k*NoNv] * VCBph[E + j*Nv]; P3
WITH_CHRONO("doubles:particles:2",
DGEMM_PARTICLES(TAphh, VCBph);
REORDER(I, K, J);
REORDER(I, K, J)
)
// TCphh[E + k*Nv + i*NoNv] * VABph[E + j*Nv]; P5
WITH_CHRONO("doubles:particles:3",
DGEMM_PARTICLES(TCphh, VABph);
REORDER(K, I, J);
REORDER(K, I, J)
)
// TCphh[E + k*Nv + j*NoNv] * VBAph[E + i*Nv]; P2
WITH_CHRONO("doubles:particles:4",
DGEMM_PARTICLES(TCphh, VBAph);
REORDER(K, J, I);
REORDER(K, J, I)
)
// TBphh[E + j*Nv + i*NoNv] * VACph[E + k*Nv]; P1
WITH_CHRONO("doubles:particles:5",
DGEMM_PARTICLES(TBphh, VACph);
REORDER(J, I, K);
REORDER(J, I, K)
)
// TBphh[E + j*Nv + k*NoNv] * VCAph[E + i*Nv]; P4
WITH_CHRONO("doubles:particles:6",
DGEMM_PARTICLES(TBphh, VCAph);
REORDER(J, K, I);
REORDER(J, K, I)
)
}
)

View File

@ -1,464 +0,0 @@
#include <atrip/Tuples.hpp>
#include <atrip/Atrip.hpp>
namespace atrip {
template <typename A>
static A unique(A const &xs) {
auto result = xs;
std::sort(std::begin(result), std::end(result));
auto const& last = std::unique(std::begin(result), std::end(result));
result.erase(last, std::end(result));
return result;
}
std::vector<std::string> getNodeNames(MPI_Comm comm){
int rank, np;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &np);
std::vector<std::string> nodeList(np);
char nodeName[MPI_MAX_PROCESSOR_NAME];
char *nodeNames = (char*)malloc(np * MPI_MAX_PROCESSOR_NAME);
std::vector<int> nameLengths(np)
, off(np)
;
int nameLength;
MPI_Get_processor_name(nodeName, &nameLength);
MPI_Allgather(&nameLength,
1,
MPI_INT,
nameLengths.data(),
1,
MPI_INT,
comm);
for (int i(1); i < np; i++)
off[i] = off[i-1] + nameLengths[i-1];
MPI_Allgatherv(nodeName,
nameLengths[rank],
MPI_BYTE,
nodeNames,
nameLengths.data(),
off.data(),
MPI_BYTE,
comm);
for (int i(0); i < np; i++) {
std::string const s(&nodeNames[off[i]], nameLengths[i]);
nodeList[i] = s;
}
std::free(nodeNames);
return nodeList;
}
std::vector<RankInfo>
getNodeInfos(std::vector<string> const& nodeNames) {
std::vector<RankInfo> result;
auto const uniqueNames = unique(nodeNames);
auto const index = [&uniqueNames](std::string const& s) {
auto const& it = std::find(uniqueNames.begin(), uniqueNames.end(), s);
return std::distance(uniqueNames.begin(), it);
};
std::vector<size_t> localRanks(uniqueNames.size(), 0);
size_t globalRank = 0;
for (auto const& name: nodeNames) {
const size_t nodeId = index(name);
result.push_back({name,
nodeId,
globalRank++,
localRanks[nodeId]++,
(size_t)
std::count(nodeNames.begin(),
nodeNames.end(),
name)
});
}
return result;
}
ClusterInfo
getClusterInfo(MPI_Comm comm) {
auto const names = getNodeNames(comm);
auto const rankInfos = getNodeInfos(names);
return ClusterInfo {
unique(names).size(),
names.size(),
rankInfos[0].ranksPerNode,
rankInfos
};
}
ABCTuples getTuplesList(size_t Nv, size_t rank, size_t np) {
const size_t
// total number of tuples for the problem
n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv
// all ranks should have the same number of tuples_per_rank
, tuples_per_rank = n / np + size_t(n % np != 0)
// start index for the global tuples list
, start = tuples_per_rank * rank
// end index for the global tuples list
, end = tuples_per_rank * (rank + 1)
;
LOG(1,"Atrip") << "tuples_per_rank = " << tuples_per_rank << "\n";
WITH_RANK << "start, end = " << start << ", " << end << "\n";
ABCTuples result(tuples_per_rank, FAKE_TUPLE);
for (size_t a(0), r(0), g(0); a < Nv; a++)
for (size_t b(a); b < Nv; b++)
for (size_t c(b); c < Nv; c++){
if ( a == b && b == c ) continue;
if ( start <= g && g < end) result[r++] = {a, b, c};
g++;
}
return result;
}
ABCTuples getAllTuplesList(const size_t Nv) {
const size_t n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv;
ABCTuples result(n);
for (size_t a(0), u(0); a < Nv; a++)
for (size_t b(a); b < Nv; b++)
for (size_t c(b); c < Nv; c++){
if ( a == b && b == c ) continue;
result[u++] = {a, b, c};
}
return result;
}
ABCTuples atrip::NaiveDistribution::getTuples(size_t Nv, MPI_Comm universe) {
int rank, np;
MPI_Comm_rank(universe, &rank);
MPI_Comm_size(universe, &np);
return getTuplesList(Nv, (size_t)rank, (size_t)np);
}
namespace group_and_sort {
inline
size_t isOnNode(size_t tuple, size_t nNodes) { return tuple % nNodes; }
std::vector<size_t> getTupleNodes(ABCTuple const& t, size_t nNodes) {
std::vector<size_t>
nTuple = { isOnNode(t[0], nNodes)
, isOnNode(t[1], nNodes)
, isOnNode(t[2], nNodes)
};
return unique(nTuple);
}
ABCTuples specialDistribution(Info const& info, ABCTuples const& allTuples) {
ABCTuples nodeTuples;
size_t const nNodes(info.nNodes);
std::vector<ABCTuples>
container1d(nNodes)
, container2d(nNodes * nNodes)
, container3d(nNodes * nNodes * nNodes)
;
WITH_DBG if (info.nodeId == 0)
std::cout << "\tGoing through all "
<< allTuples.size()
<< " tuples in "
<< nNodes
<< " nodes\n";
// build container-n-d's
for (auto const& t: allTuples) {
// one which node(s) are the tuple elements located...
// put them into the right container
auto const _nodes = getTupleNodes(t, nNodes);
switch (_nodes.size()) {
case 1:
container1d[_nodes[0]].push_back(t);
break;
case 2:
container2d[ _nodes[0]
+ _nodes[1] * nNodes
].push_back(t);
break;
case 3:
container3d[ _nodes[0]
+ _nodes[1] * nNodes
+ _nodes[2] * nNodes * nNodes
].push_back(t);
break;
}
}
WITH_DBG if (info.nodeId == 0)
std::cout << "\tBuilding 1-d containers\n";
// DISTRIBUTE 1-d containers
// every tuple which is only located at one node belongs to this node
{
auto const& _tuples = container1d[info.nodeId];
nodeTuples.resize(_tuples.size(), INVALID_TUPLE);
std::copy(_tuples.begin(), _tuples.end(), nodeTuples.begin());
}
WITH_DBG if (info.nodeId == 0)
std::cout << "\tBuilding 2-d containers\n";
// DISTRIBUTE 2-d containers
//the tuples which are located at two nodes are half/half given to these nodes
for (size_t yx = 0; yx < container2d.size(); yx++) {
auto const& _tuples = container2d[yx];
const
size_t idx = yx % nNodes
// remeber: yx = idy * nNodes + idx
, idy = yx / nNodes
, n_half = _tuples.size() / 2
, size = nodeTuples.size()
;
size_t nbeg, nend;
if (info.nodeId == idx) {
nbeg = 0 * n_half;
nend = n_half;
} else if (info.nodeId == idy) {
nbeg = 1 * n_half;
nend = _tuples.size();
} else {
// either idx or idy is my node
continue;
}
size_t const nextra = nend - nbeg;
nodeTuples.resize(size + nextra, INVALID_TUPLE);
std::copy(_tuples.begin() + nbeg,
_tuples.begin() + nend,
nodeTuples.begin() + size);
}
WITH_DBG if (info.nodeId == 0)
std::cout << "\tBuilding 3-d containers\n";
// DISTRIBUTE 3-d containers
for (size_t zyx = 0; zyx < container3d.size(); zyx++) {
auto const& _tuples = container3d[zyx];
const
size_t idx = zyx % nNodes
, idy = (zyx / nNodes) % nNodes
// remember: zyx = idx + idy * nNodes + idz * nNodes^2
, idz = zyx / nNodes / nNodes
, n_third = _tuples.size() / 3
, size = nodeTuples.size()
;
size_t nbeg, nend;
if (info.nodeId == idx) {
nbeg = 0 * n_third;
nend = 1 * n_third;
} else if (info.nodeId == idy) {
nbeg = 1 * n_third;
nend = 2 * n_third;
} else if (info.nodeId == idz) {
nbeg = 2 * n_third;
nend = _tuples.size();
} else {
// either idx or idy or idz is my node
continue;
}
size_t const nextra = nend - nbeg;
nodeTuples.resize(size + nextra, INVALID_TUPLE);
std::copy(_tuples.begin() + nbeg,
_tuples.begin() + nend,
nodeTuples.begin() + size);
}
WITH_DBG if (info.nodeId == 0) std::cout << "\tswapping tuples...\n";
/*
* sort part of group-and-sort algorithm
* every tuple on a given node is sorted in a way that
* the 'home elements' are the fastest index.
* 1:yyy 2:yyn(x) 3:yny(x) 4:ynn(x) 5:nyy 6:nyn(x) 7:nny 8:nnn
*/
for (auto &nt: nodeTuples){
if ( isOnNode(nt[0], nNodes) == info.nodeId ){ // 1234
if ( isOnNode(nt[2], nNodes) != info.nodeId ){ // 24
size_t const x(nt[0]);
nt[0] = nt[2]; // switch first and last
nt[2] = x;
}
else if ( isOnNode(nt[1], nNodes) != info.nodeId){ // 3
size_t const x(nt[0]);
nt[0] = nt[1]; // switch first two
nt[1] = x;
}
} else {
if ( isOnNode(nt[1], nNodes) == info.nodeId // 56
&& isOnNode(nt[2], nNodes) != info.nodeId
) { // 6
size_t const x(nt[1]);
nt[1] = nt[2]; // switch last two
nt[2] = x;
}
}
}
WITH_DBG if (info.nodeId == 0) std::cout << "\tsorting list of tuples...\n";
//now we sort the list of tuples
std::sort(nodeTuples.begin(), nodeTuples.end());
WITH_DBG if (info.nodeId == 0) std::cout << "\trestoring tuples...\n";
// we bring the tuples abc back in the order a<b<c
for (auto &t: nodeTuples) std::sort(t.begin(), t.end());
#if ATRIP_DEBUG > 1
WITH_DBG if (info.nodeId == 0)
std::cout << "checking for validity of " << nodeTuples.size() << std::endl;
const bool anyInvalid
= std::any_of(nodeTuples.begin(),
nodeTuples.end(),
[](ABCTuple const& t) { return t == INVALID_TUPLE; });
if (anyInvalid) throw "Some tuple is invalid in group-and-sort algorithm";
#endif
WITH_DBG if (info.nodeId == 0) std::cout << "\treturning tuples...\n";
return nodeTuples;
}
std::vector<ABCTuple> main(MPI_Comm universe, size_t Nv) {
int rank, np;
MPI_Comm_rank(universe, &rank);
MPI_Comm_size(universe, &np);
std::vector<ABCTuple> result;
auto const nodeNames(getNodeNames(universe));
size_t const nNodes = unique(nodeNames).size();
auto const nodeInfos = getNodeInfos(nodeNames);
// We want to construct a communicator which only contains of one
// element per node
bool const computeDistribution
= nodeInfos[rank].localRank == 0;
std::vector<ABCTuple>
nodeTuples
= computeDistribution
? specialDistribution(Info{nNodes, nodeInfos[rank].nodeId},
getAllTuplesList(Nv))
: std::vector<ABCTuple>()
;
LOG(1,"Atrip") << "got nodeTuples\n";
// now we have to send the data from **one** rank on each node
// to all others ranks of this node
const
int color = nodeInfos[rank].nodeId,
key = nodeInfos[rank].localRank
;
MPI_Comm INTRA_COMM;
MPI_Comm_split(universe, color, key, &INTRA_COMM);
// Main:1 ends here
// [[file:~/cuda/atrip/atrip.org::*Main][Main:2]]
size_t const
tuplesPerRankLocal
= nodeTuples.size() / nodeInfos[rank].ranksPerNode
+ size_t(nodeTuples.size() % nodeInfos[rank].ranksPerNode != 0)
;
size_t tuplesPerRankGlobal;
MPI_Reduce(&tuplesPerRankLocal,
&tuplesPerRankGlobal,
1,
MPI_UINT64_T,
MPI_MAX,
0,
universe);
MPI_Bcast(&tuplesPerRankGlobal,
1,
MPI_UINT64_T,
0,
universe);
LOG(1,"Atrip") << "Tuples per rank: " << tuplesPerRankGlobal << "\n";
LOG(1,"Atrip") << "ranks per node " << nodeInfos[rank].ranksPerNode << "\n";
LOG(1,"Atrip") << "#nodes " << nNodes << "\n";
// Main:2 ends here
// [[file:~/cuda/atrip/atrip.org::*Main][Main:3]]
size_t const totalTuples
= tuplesPerRankGlobal * nodeInfos[rank].ranksPerNode;
if (computeDistribution) {
// pad with FAKE_TUPLEs
nodeTuples.insert(nodeTuples.end(),
totalTuples - nodeTuples.size(),
FAKE_TUPLE);
}
// Main:3 ends here
// [[file:~/cuda/atrip/atrip.org::*Main][Main:4]]
{
// construct mpi type for abctuple
MPI_Datatype MPI_ABCTUPLE;
MPI_Type_vector(nodeTuples[0].size(), 1, 1, MPI_UINT64_T, &MPI_ABCTUPLE);
MPI_Type_commit(&MPI_ABCTUPLE);
LOG(1,"Atrip") << "scattering tuples \n";
result.resize(tuplesPerRankGlobal);
MPI_Scatter(nodeTuples.data(),
tuplesPerRankGlobal,
MPI_ABCTUPLE,
result.data(),
tuplesPerRankGlobal,
MPI_ABCTUPLE,
0,
INTRA_COMM);
MPI_Type_free(&MPI_ABCTUPLE);
}
return result;
}
ABCTuples Distribution::getTuples(size_t Nv, MPI_Comm universe) {
return main(universe, Nv);
}
} // namespace group_and_sort
} // namespace atrip

View File

@ -1,166 +0,0 @@
#!/usr/bin/env bash
# Copyright (C) 2022 by Alejandro Gallo <aamsgallo@gmail.com>
set -eu
flags=("${@}")
PROJECTS=()
############################################################
#
## Check root directory
#
root_project=$(git rev-parse --show-toplevel)
configure=$root_project/configure
if [[ $(basename $PWD) == $(basename $root_project) ]]; then
cat <<EOF
You are trying to build in the root directory, create a build folder
and then configure.
mkdir build
cd build
$(readlink -f $0)
EOF
exit 1
fi
[[ -f $configure ]] || {
cat <<EOF
No configure script at $configure create it with bootstrap.sh or
autoreconf -vif
EOF
exit 1
}
############################################################
#
## Create configuration function
#
create_config () {
file=$1
name=$2
PROJECTS=(${PROJECTS[@]} "$name")
mkdir -p $name
cd $name
echo "> creating: $name"
cat <<SH > configure
#!/usr/bin/env bash
# creator: $0
# date: $(date)
$root_project/configure $(cat $file | paste -s) \\
$(for word in "${flags[@]}"; do
printf " \"%s\"" "$word";
done)
exit 0
SH
chmod +x configure
cd - > /dev/null
}
############################################################
# begin doc
#
# - default ::
# This configuration uses a CPU code with dgemm
# and without computing slices.
#
# end doc
tmp=`mktemp`
cat <<EOF > $tmp
--disable-slice
EOF
create_config $tmp default
rm $tmp
# begin doc
#
# - only-dgemm ::
# This only runs the computation part that involves dgemms.
#
# end doc
tmp=`mktemp`
cat <<EOF > $tmp
--disable-slice
--enable-only-dgemm
EOF
create_config $tmp only-dgemm
rm $tmp
#
# begin doc
#
# - slices-on-gpu-only-dgemm ::
# This configuration tests that slices reside completely on the gpu
# and it should use a CUDA aware MPI implementation.
# It also only uses the routines that involve dgemm.
#
# end doc
tmp=`mktemp`
cat <<EOF > $tmp
--enable-cuda
--enable-sources-in-gpu
--enable-cuda-aware-mpi
--enable-only-dgemm
--disable-slice
EOF
create_config $tmp sources-in-gpu
rm $tmp
############################################################
#
## Create makefile
#
cat <<MAKE > Makefile
all: configure do
do: configure
configure: ${PROJECTS[@]/%/\/Makefile}
%/Makefile: %/configure
cd \$* && ./configure
do: ${PROJECTS[@]/%/\/src\/libatrip.a}
%/src/libatrip.a:
cd \$* && \$(MAKE)
.PHONY: configure do all
MAKE
cat <<EOF
Now you can do
make all
or go into one of the directories
${PROJECTS[@]}
and do
./configure
make
EOF
## Emacs stuff
# Local Variables:
# eval: (outline-minor-mode)
# outline-regexp: "############################################################"
# End: