Compare commits
45 Commits
cuda-test-
...
cuda
| Author | SHA1 | Date | |
|---|---|---|---|
| 122329eca7 | |||
|
|
58c0bf078e | ||
| 3fe15e5e5c | |||
| 0d223e6ed9 | |||
| c8bdc4239f | |||
|
|
be96e4bf8c | ||
|
|
9003c218a3 | ||
|
|
4af47a0bb7 | ||
|
|
9a5a2487be | ||
| c4ec227185 | |||
| 1ceb4cf0d6 | |||
| 34a4e79db0 | |||
| 249f1c0b51 | |||
| 1d96800d45 | |||
| 9087e3af19 | |||
| 418fd9d389 | |||
| 895cd02778 | |||
| 8efa3d911e | |||
| 0fa24404e5 | |||
| 8f7d05efda | |||
| ad542fe856 | |||
| 658397ebd7 | |||
| 26e2f2d109 | |||
| 871471aae3 | |||
| 65a64f3f8c | |||
| 4f9f09e965 | |||
| 6dc943e10a | |||
| ed347ab0d9 | |||
| 8c5c47e208 | |||
| 6871372cac | |||
| 452c0fe001 | |||
| b636b89a64 | |||
| e59d298a01 | |||
|
|
6143d1ae73 | ||
|
|
3addd86826 | ||
| 3ddd507c17 | |||
| ae6736fc21 | |||
| d5cfe31b12 | |||
| ddb4574380 | |||
| 118df09128 | |||
| 1e391e3749 | |||
| 7734efeb97 | |||
| fa1a29c583 | |||
| 2cbff5c8c9 | |||
| 50896e3cd0 |
22
.dir-locals.el
Normal file
22
.dir-locals.el
Normal file
@ -0,0 +1,22 @@
|
|||||||
|
;;; Directory Local Variables
|
||||||
|
;;; For more information see (info "(emacs) Directory Variables")
|
||||||
|
|
||||||
|
((c++-mode . ((outline-regexp . "// \\[\\[file:")
|
||||||
|
(eval . (let
|
||||||
|
((root
|
||||||
|
(expand-file-name
|
||||||
|
(project-root
|
||||||
|
(project-current)))))
|
||||||
|
(setq-local flycheck-gcc-include-path
|
||||||
|
(list
|
||||||
|
(format "%s/vendor/include/" root)
|
||||||
|
(format "%s/include/" root)
|
||||||
|
(format "%s/" root)
|
||||||
|
(format "%s/bench/" root)
|
||||||
|
(format "%s/build/main/" root)))
|
||||||
|
(setq-local flycheck-clang-include-path
|
||||||
|
flycheck-gcc-include-path)))
|
||||||
|
(eval . (flycheck-mode))
|
||||||
|
(eval . (outline-minor-mode))
|
||||||
|
(indent-tabs-mode . nil)
|
||||||
|
(tab-width . 2))))
|
||||||
3
.gitignore
vendored
3
.gitignore
vendored
@ -25,3 +25,6 @@ config.mk
|
|||||||
/atrip.html
|
/atrip.html
|
||||||
/TAGS
|
/TAGS
|
||||||
/config.h.in
|
/config.h.in
|
||||||
|
/result
|
||||||
|
/result-dev
|
||||||
|
/vendor/
|
||||||
|
|||||||
107
README.org
107
README.org
@ -26,3 +26,110 @@ before the proper paper is released please contact me.
|
|||||||
|
|
||||||
In the mean time the code has been used in
|
In the mean time the code has been used in
|
||||||
[[https://aip.scitation.org/doi/10.1063/5.0074936][this publication]] and can therefore been cited.
|
[[https://aip.scitation.org/doi/10.1063/5.0074936][this publication]] and can therefore been cited.
|
||||||
|
|
||||||
|
* Building
|
||||||
|
|
||||||
|
Atrip uses autotools to build the system.
|
||||||
|
Autotools works by first creating a =configure= script from
|
||||||
|
a =configure.ac= file.
|
||||||
|
|
||||||
|
Atrip should be built out of source, this means that
|
||||||
|
you have to create a build directory other that the root
|
||||||
|
directory, for instance in the =build/tutorial= directory
|
||||||
|
|
||||||
|
#+begin_src sh :exports code
|
||||||
|
mkdir -p build/tutorial/
|
||||||
|
cd build/tutorial
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
First you have to build the =configure= script by doing
|
||||||
|
|
||||||
|
#+begin_src sh :dir build/tutorial :exports code :results raw drawer
|
||||||
|
../../bootstrap.sh
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
#+RESULTS:
|
||||||
|
:results:
|
||||||
|
|
||||||
|
Creating configure script
|
||||||
|
|
||||||
|
|
||||||
|
Now you can build by doing
|
||||||
|
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
../configure
|
||||||
|
make extern
|
||||||
|
make all
|
||||||
|
|
||||||
|
:end:
|
||||||
|
|
||||||
|
And then you can see the =configure= options
|
||||||
|
#+begin_src sh :dir build/tutorial :results raw drawer :eval no
|
||||||
|
../../configure --help
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
** Benches
|
||||||
|
|
||||||
|
The script =tools/configure-benches.sh= can be used to create
|
||||||
|
a couple of configurations for benches:
|
||||||
|
|
||||||
|
#+begin_src sh :exports results :results verbatim org :results verbatim drawer replace output
|
||||||
|
awk '/begin +doc/,/end +doc/ { print $NL }' tools/configure-benches.sh |
|
||||||
|
grep -v -e "begin \+doc" -e "end \+doc" |
|
||||||
|
sed "s/^# //; s/^# *$//; /^$/d"
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
#+RESULTS:
|
||||||
|
:results:
|
||||||
|
- default ::
|
||||||
|
This configuration uses a CPU code with dgemm
|
||||||
|
and without computing slices.
|
||||||
|
- only-dgemm ::
|
||||||
|
This only runs the computation part that involves dgemms.
|
||||||
|
- cuda-only-dgemm ::
|
||||||
|
This is the naive CUDA implementation compiling only the dgemm parts
|
||||||
|
of the compute.
|
||||||
|
- cuda-slices-on-gpu-only-dgemm ::
|
||||||
|
This configuration tests that slices reside completely on the gpu
|
||||||
|
and it should use a CUDA aware MPI implementation.
|
||||||
|
It also only uses the routines that involve dgemm.
|
||||||
|
:end:
|
||||||
|
|
||||||
|
In order to generate the benches just create a suitable directory for it
|
||||||
|
|
||||||
|
#+begin_src sh :eval no
|
||||||
|
mkdir -p build/benches
|
||||||
|
cd buid/benches
|
||||||
|
../../tools/configure-benches.sh CXX=g++ ...
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
and you will get a Makefile together with several project folders.
|
||||||
|
You can either configure all projects with =make all= or
|
||||||
|
then go in each folder.
|
||||||
|
|
||||||
|
Notice that you can give a path for ctf for all of them by doing
|
||||||
|
#+begin_src sh :eval no
|
||||||
|
../../tools/configure-benches.sh --with-ctf=/absolute/path/to/ctf
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
* Running benches
|
||||||
|
|
||||||
|
** Main benchmark
|
||||||
|
|
||||||
|
The main benchmark gets built in =bench/atrip= and is used to run an
|
||||||
|
atrip run with random tensors.
|
||||||
|
|
||||||
|
A common run of this script will be the following
|
||||||
|
|
||||||
|
#+begin_src sh
|
||||||
|
bench/atrip \
|
||||||
|
--no 100 \
|
||||||
|
--nv 1000 \
|
||||||
|
--mod 1 \
|
||||||
|
--% 0 \
|
||||||
|
--dist group \
|
||||||
|
--nocheckpoint \
|
||||||
|
--max-iterations 1000
|
||||||
|
#+end_src
|
||||||
|
|
||||||
|
|||||||
@ -1,25 +1,42 @@
|
|||||||
AUTOMAKE_OPTIONS = subdir-objects
|
AUTOMAKE_OPTIONS = subdir-objects
|
||||||
include $(top_srcdir)/atrip.mk
|
include $(top_srcdir)/atrip.mk
|
||||||
|
|
||||||
AM_CPPFLAGS = -I$(top_srcdir)/include/ $(CTF_CPPFLAGS)
|
AM_CPPFLAGS = -I$(top_srcdir)/include/ -I$(top_srcdir) $(CTF_CPPFLAGS)
|
||||||
AM_LDFLAGS = @LAPACK_LIBS@ @BLAS_LIBS@
|
AM_LDFLAGS = @LAPACK_LIBS@ @BLAS_LIBS@
|
||||||
|
|
||||||
bin_PROGRAMS = test_main
|
|
||||||
test_main_SOURCES = test_main.cxx
|
|
||||||
|
|
||||||
|
|
||||||
test_main_LDADD = \
|
|
||||||
$(top_builddir)/src/libatrip.a
|
|
||||||
|
|
||||||
if WITH_BUILD_CTF
|
if WITH_BUILD_CTF
|
||||||
test_main_LDADD += $(CTF_BUILD_PATH)/lib/libctf.a
|
ATRIP_CTF = $(CTF_BUILD_PATH)/lib/libctf.a
|
||||||
else
|
else
|
||||||
test_main_LDADD += @LIBCTF_LD_LIBRARY_PATH@/libctf.a
|
ATRIP_CTF = @LIBCTF_LD_LIBRARY_PATH@/libctf.a
|
||||||
|
endif
|
||||||
|
ATRIP_LIB = $(top_builddir)/src/libatrip.a $(ATRIP_CTF)
|
||||||
|
|
||||||
|
bin_PROGRAMS =
|
||||||
|
BENCHES_LDADD = $(ATRIP_LIB) $(ATRIP_CTF)
|
||||||
|
|
||||||
|
|
||||||
|
##
|
||||||
|
## main entry point and bench
|
||||||
|
##
|
||||||
|
bin_PROGRAMS += atrip
|
||||||
|
atrip_SOURCES = main.cxx
|
||||||
|
atrip_CPPFLAGS = $(AM_CPPFLAGS)
|
||||||
|
atrip_LDADD = $(BENCHES_LDADD)
|
||||||
|
|
||||||
|
|
||||||
|
if !WITH_CUDA
|
||||||
|
##
|
||||||
|
## tuples distribution
|
||||||
|
##
|
||||||
|
bin_PROGRAMS += tuples-distribution
|
||||||
|
tuples_distribution_LDADD = $(BENCHES_LDADD)
|
||||||
|
tuples_distribution_SOURCES = tuples-distribution.cxx
|
||||||
endif
|
endif
|
||||||
|
|
||||||
|
|
||||||
if WITH_CUDA
|
if WITH_CUDA
|
||||||
test_main_CXXFLAGS = $(CUDA_CXXFLAGS)
|
AM_CPPFLAGS += $(CUDA_CXXFLAGS)
|
||||||
test_main_LDADD += $(CUDA_LDFLAGS)
|
BENCHES_LDADD += $(CUDA_LDFLAGS)
|
||||||
|
|
||||||
AM_CXXFLAGS = $(CUDA_CXXFLAGS)
|
AM_CXXFLAGS = $(CUDA_CXXFLAGS)
|
||||||
AM_LDFLAGS += $(CUDA_LDFLAGS)
|
AM_LDFLAGS += $(CUDA_LDFLAGS)
|
||||||
|
|||||||
@ -5,18 +5,20 @@
|
|||||||
#include <CLI11.hpp>
|
#include <CLI11.hpp>
|
||||||
|
|
||||||
#define _print_size(what, size) \
|
#define _print_size(what, size) \
|
||||||
|
do { \
|
||||||
if (rank == 0) { \
|
if (rank == 0) { \
|
||||||
std::cout << #what \
|
std::cout << #what \
|
||||||
<< " => " \
|
<< " => " \
|
||||||
<< (double)size * elem_to_gb \
|
<< (double)size * elem_to_gb \
|
||||||
<< "GB" \
|
<< "GB" \
|
||||||
<< std::endl; \
|
<< std::endl; \
|
||||||
}
|
} \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
MPI_Init(&argc, &argv);
|
MPI_Init(&argc, &argv);
|
||||||
|
|
||||||
size_t checkpoint_it;
|
size_t checkpoint_it, max_iterations;
|
||||||
int no(10), nv(100), itMod(-1), percentageMod(10);
|
int no(10), nv(100), itMod(-1), percentageMod(10);
|
||||||
float checkpoint_percentage;
|
float checkpoint_percentage;
|
||||||
bool
|
bool
|
||||||
@ -30,6 +32,9 @@ int main(int argc, char** argv) {
|
|||||||
app.add_option("--no", no, "Occupied orbitals");
|
app.add_option("--no", no, "Occupied orbitals");
|
||||||
app.add_option("--nv", nv, "Virtual orbitals");
|
app.add_option("--nv", nv, "Virtual orbitals");
|
||||||
app.add_option("--mod", itMod, "Iteration modifier");
|
app.add_option("--mod", itMod, "Iteration modifier");
|
||||||
|
app.add_option("--max-iterations",
|
||||||
|
max_iterations,
|
||||||
|
"Maximum number of iterations to run");
|
||||||
app.add_flag("--keep-vppph", keepVppph, "Do not delete Vppph");
|
app.add_flag("--keep-vppph", keepVppph, "Do not delete Vppph");
|
||||||
app.add_flag("--nochrono", nochrono, "Do not print chrono");
|
app.add_flag("--nochrono", nochrono, "Do not print chrono");
|
||||||
app.add_flag("--rank-round-robin", rankRoundRobin, "Do rank round robin");
|
app.add_flag("--rank-round-robin", rankRoundRobin, "Do rank round robin");
|
||||||
@ -45,6 +50,19 @@ int main(int argc, char** argv) {
|
|||||||
checkpoint_percentage,
|
checkpoint_percentage,
|
||||||
"Percentage for checkpoints");
|
"Percentage for checkpoints");
|
||||||
|
|
||||||
|
// Optional tensor files
|
||||||
|
std::string
|
||||||
|
ei_path, ea_path,
|
||||||
|
Tph_path, Tpphh_path,
|
||||||
|
Vpphh_path, Vhhhp_path, Vppph_path;
|
||||||
|
app.add_option("--ei", ei_path, "Path for ei");
|
||||||
|
app.add_option("--ea", ea_path, "Path for ea");
|
||||||
|
app.add_option("--Tpphh", Tpphh_path, "Path for Tpphh");
|
||||||
|
app.add_option("--Tph", Tph_path, "Path for Tph");
|
||||||
|
app.add_option("--Vpphh", Vpphh_path, "Path for Vpphh");
|
||||||
|
app.add_option("--Vhhhp", Vhhhp_path, "Path for Vhhhp");
|
||||||
|
app.add_option("--Vppph", Vppph_path, "Path for Vppph");
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
size_t ooo_threads = 0, ooo_blocks = 0;
|
size_t ooo_threads = 0, ooo_blocks = 0;
|
||||||
app.add_option("--ooo-blocks",
|
app.add_option("--ooo-blocks",
|
||||||
@ -148,37 +166,64 @@ int main(int argc, char** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::vector<int> symmetries(4, NS)
|
std::vector<int>
|
||||||
, vo({nv, no})
|
symmetries(4, NS),
|
||||||
, vvoo({nv, nv, no, no})
|
vo({nv, no}),
|
||||||
, ooov({no, no, no, nv})
|
vvoo({nv, nv, no, no}),
|
||||||
, vvvo({nv, nv, nv, no})
|
ooov({no, no, no, nv}),
|
||||||
;
|
vvvo({nv, nv, nv, no});
|
||||||
|
|
||||||
CTF::Tensor<double>
|
CTF::Tensor<double>
|
||||||
ei(1, ooov.data(), symmetries.data(), world)
|
ei(1, ooov.data(), symmetries.data(), world),
|
||||||
, ea(1, vo.data(), symmetries.data(), world)
|
ea(1, vo.data(), symmetries.data(), world),
|
||||||
, Tph(2, vo.data(), symmetries.data(), world)
|
Tph(2, vo.data(), symmetries.data(), world),
|
||||||
, Tpphh(4, vvoo.data(), symmetries.data(), world)
|
Tpphh(4, vvoo.data(), symmetries.data(), world),
|
||||||
, Vpphh(4, vvoo.data(), symmetries.data(), world)
|
Vpphh(4, vvoo.data(), symmetries.data(), world),
|
||||||
, Vhhhp(4, ooov.data(), symmetries.data(), world)
|
Vhhhp(4, ooov.data(), symmetries.data(), world);
|
||||||
;
|
|
||||||
|
|
||||||
// initialize deletable tensors in heap
|
// initialize deletable tensors in heap
|
||||||
auto Vppph
|
auto Vppph
|
||||||
= new CTF::Tensor<double>(4, vvvo.data(), symmetries.data(), world);
|
= new CTF::Tensor<double>(4, vvvo.data(), symmetries.data(), world);
|
||||||
|
|
||||||
_print_size(Vabci, no*nv*nv*nv)
|
_print_size(Vabci, no*nv*nv*nv);
|
||||||
_print_size(Vabij, no*no*nv*nv)
|
_print_size(Vabij, no*no*nv*nv);
|
||||||
_print_size(Vijka, no*no*no*nv)
|
_print_size(Vijka, no*no*no*nv);
|
||||||
|
|
||||||
|
if (ei_path.size()) {
|
||||||
|
ei.read_dense_from_file(ei_path.c_str());
|
||||||
|
} else {
|
||||||
ei.fill_random(-40.0, -2);
|
ei.fill_random(-40.0, -2);
|
||||||
|
}
|
||||||
|
if (ea_path.size()) {
|
||||||
|
ea.read_dense_from_file(ea_path.c_str());
|
||||||
|
} else {
|
||||||
ea.fill_random(2, 50);
|
ea.fill_random(2, 50);
|
||||||
|
}
|
||||||
|
if (Tpphh_path.size()) {
|
||||||
|
Tpphh.read_dense_from_file(Tpphh_path.c_str());
|
||||||
|
} else {
|
||||||
Tpphh.fill_random(0, 1);
|
Tpphh.fill_random(0, 1);
|
||||||
|
}
|
||||||
|
if (Tph_path.size()) {
|
||||||
|
Tph.read_dense_from_file(Tph_path.c_str());
|
||||||
|
} else {
|
||||||
Tph.fill_random(0, 1);
|
Tph.fill_random(0, 1);
|
||||||
|
}
|
||||||
|
if (Vpphh_path.size()) {
|
||||||
|
Vpphh.read_dense_from_file(Vpphh_path.c_str());
|
||||||
|
} else {
|
||||||
Vpphh.fill_random(0, 1);
|
Vpphh.fill_random(0, 1);
|
||||||
|
}
|
||||||
|
if (Vhhhp_path.size()) {
|
||||||
|
Vhhhp.read_dense_from_file(Vhhhp_path.c_str());
|
||||||
|
} else {
|
||||||
Vhhhp.fill_random(0, 1);
|
Vhhhp.fill_random(0, 1);
|
||||||
|
}
|
||||||
|
if (Vppph_path.size()) {
|
||||||
|
Vppph->read_dense_from_file(Vppph_path.c_str());
|
||||||
|
} else {
|
||||||
Vppph->fill_random(0, 1);
|
Vppph->fill_random(0, 1);
|
||||||
|
}
|
||||||
|
|
||||||
atrip::Atrip::init(MPI_COMM_WORLD);
|
atrip::Atrip::init(MPI_COMM_WORLD);
|
||||||
const auto in
|
const auto in
|
||||||
@ -199,6 +244,7 @@ int main(int argc, char** argv) {
|
|||||||
.with_iterationMod(itMod)
|
.with_iterationMod(itMod)
|
||||||
.with_percentageMod(percentageMod)
|
.with_percentageMod(percentageMod)
|
||||||
.with_tuplesDistribution(tuplesDistribution)
|
.with_tuplesDistribution(tuplesDistribution)
|
||||||
|
.with_maxIterations(max_iterations)
|
||||||
// checkpoint options
|
// checkpoint options
|
||||||
.with_checkpointAtEveryIteration(checkpoint_it)
|
.with_checkpointAtEveryIteration(checkpoint_it)
|
||||||
.with_checkpointAtPercentage(checkpoint_percentage)
|
.with_checkpointAtPercentage(checkpoint_percentage)
|
||||||
443
bench/tuples-distribution.cxx
Normal file
443
bench/tuples-distribution.cxx
Normal file
@ -0,0 +1,443 @@
|
|||||||
|
#include <iostream>
|
||||||
|
#define ATRIP_DEBUG 2
|
||||||
|
#include <atrip/Atrip.hpp>
|
||||||
|
#include <atrip/Tuples.hpp>
|
||||||
|
#include <atrip/Unions.hpp>
|
||||||
|
#include <bench/CLI11.hpp>
|
||||||
|
#include <bench/utils.hpp>
|
||||||
|
|
||||||
|
using namespace atrip;
|
||||||
|
|
||||||
|
using F = double;
|
||||||
|
using Tr = CTF::Tensor<F>;
|
||||||
|
|
||||||
|
#define INIT_DRY(name, ...) \
|
||||||
|
do { \
|
||||||
|
std::vector<int64_t> lens = __VA_ARGS__; \
|
||||||
|
int i = -1; \
|
||||||
|
name.order = lens.size(); \
|
||||||
|
name.lens = (int64_t*)malloc(sizeof(int64_t) * lens.size()); \
|
||||||
|
name.sym = (int*)malloc(sizeof(int) * lens.size()); \
|
||||||
|
name.lens[++i] = lens[i]; name.lens[++i] = lens[i]; \
|
||||||
|
name.lens[++i] = lens[i]; name.lens[++i] = lens[i]; \
|
||||||
|
i = 0; \
|
||||||
|
name.sym[i++] = NS; name.sym[i++] = NS; \
|
||||||
|
name.sym[i++] = NS; name.sym[i++] = NS; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
#define DEINIT_DRY(name) \
|
||||||
|
do { \
|
||||||
|
name.order = 0; \
|
||||||
|
name.lens = NULL; \
|
||||||
|
name.sym = NULL; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
using LocalDatabase = typename Slice<F>::LocalDatabase;
|
||||||
|
using LocalDatabaseElement = typename Slice<F>::LocalDatabaseElement;
|
||||||
|
|
||||||
|
LocalDatabase buildLocalDatabase(SliceUnion<F> &u,
|
||||||
|
ABCTuple const& abc) {
|
||||||
|
LocalDatabase result;
|
||||||
|
|
||||||
|
auto const needed = u.neededSlices(abc);
|
||||||
|
|
||||||
|
// BUILD THE DATABASE
|
||||||
|
// we need to loop over all sliceTypes that this TensorUnion
|
||||||
|
// is representing and find out how we will get the corresponding
|
||||||
|
// slice for the abc we are considering right now.
|
||||||
|
for (auto const& pair: needed) {
|
||||||
|
auto const type = pair.first;
|
||||||
|
auto const tuple = pair.second;
|
||||||
|
auto const from = u.rankMap.find(abc, type);
|
||||||
|
|
||||||
|
{
|
||||||
|
// FIRST: look up if there is already a *Ready* slice matching what we
|
||||||
|
// need
|
||||||
|
auto const& it
|
||||||
|
= std::find_if(u.slices.begin(), u.slices.end(),
|
||||||
|
[&tuple, &type](Slice<F> const& other) {
|
||||||
|
return other.info.tuple == tuple
|
||||||
|
&& other.info.type == type
|
||||||
|
// we only want another slice when it
|
||||||
|
// has already ready-to-use data
|
||||||
|
&& other.isUnwrappable()
|
||||||
|
;
|
||||||
|
});
|
||||||
|
if (it != u.slices.end()) {
|
||||||
|
// if we find this slice, it means that we don't have to do anything
|
||||||
|
result.push_back({u.name, it->info});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
// Try to find a recyling possibility ie. find a slice with the same
|
||||||
|
// tuple and that has a valid data pointer.
|
||||||
|
//
|
||||||
|
auto const& recycleIt
|
||||||
|
= std::find_if(u.slices.begin(), u.slices.end(),
|
||||||
|
[&tuple, &type](Slice<F> const& other) {
|
||||||
|
return other.info.tuple == tuple
|
||||||
|
&& other.info.type != type
|
||||||
|
&& other.isRecyclable()
|
||||||
|
;
|
||||||
|
});
|
||||||
|
|
||||||
|
//
|
||||||
|
// if we find this recylce, then we find a Blank slice
|
||||||
|
// (which should exist by construction :THINK)
|
||||||
|
//
|
||||||
|
if (recycleIt != u.slices.end()) {
|
||||||
|
auto& blank = Slice<F>::findOneByType(u.slices, Slice<F>::Blank);
|
||||||
|
// TODO: formalize this through a method to copy information
|
||||||
|
// from another slice
|
||||||
|
blank.data = recycleIt->data;
|
||||||
|
blank.info.type = type;
|
||||||
|
blank.info.tuple = tuple;
|
||||||
|
blank.info.state = Slice<F>::Recycled;
|
||||||
|
blank.info.from = from;
|
||||||
|
blank.info.recycling = recycleIt->info.type;
|
||||||
|
result.push_back({u.name, blank.info});
|
||||||
|
WITH_RANK << "__db__: RECYCLING: n" << u.name
|
||||||
|
<< " " << pretty_print(abc)
|
||||||
|
<< " get " << pretty_print(blank.info)
|
||||||
|
<< " from " << pretty_print(recycleIt->info)
|
||||||
|
<< " ptr " << recycleIt->data
|
||||||
|
<< "\n"
|
||||||
|
;
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
// in this case we have to create a new slice
|
||||||
|
// this means that we should have a blank slice at our disposal
|
||||||
|
// and also the freePointers should have some elements inside,
|
||||||
|
// so we pop a data pointer from the freePointers container
|
||||||
|
{
|
||||||
|
auto& blank = Slice<F>::findOneByType(u.slices, Slice<F>::Blank);
|
||||||
|
blank.info.type = type;
|
||||||
|
blank.info.tuple = tuple;
|
||||||
|
blank.info.from = from;
|
||||||
|
|
||||||
|
// Handle self sufficiency
|
||||||
|
blank.info.state = Atrip::rank == from.rank
|
||||||
|
? Slice<F>::SelfSufficient
|
||||||
|
: Slice<F>::Fetch
|
||||||
|
;
|
||||||
|
if (blank.info.state == Slice<F>::SelfSufficient) {
|
||||||
|
blank.data = (F*)0xBADA55;
|
||||||
|
} else {
|
||||||
|
blank.data = (F*)0xA55A55;
|
||||||
|
}
|
||||||
|
|
||||||
|
result.push_back({u.name, blank.info});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
void clearUnusedSlicesForNext(SliceUnion<F> &u,
|
||||||
|
ABCTuple const& abc) {
|
||||||
|
auto const needed = u.neededSlices(abc);
|
||||||
|
|
||||||
|
// CLEAN UP SLICES, FREE THE ONES THAT ARE NOT NEEDED ANYMORE
|
||||||
|
for (auto& slice: u.slices) {
|
||||||
|
// if the slice is free, then it was not used anyways
|
||||||
|
if (slice.isFree()) continue;
|
||||||
|
|
||||||
|
|
||||||
|
// try to find the slice in the needed slices list
|
||||||
|
auto const found
|
||||||
|
= std::find_if(needed.begin(), needed.end(),
|
||||||
|
[&slice] (typename Slice<F>::Ty_x_Tu const& tytu) {
|
||||||
|
return slice.info.tuple == tytu.second
|
||||||
|
&& slice.info.type == tytu.first
|
||||||
|
;
|
||||||
|
});
|
||||||
|
|
||||||
|
// if we did not find slice in needed, then erase it
|
||||||
|
if (found == needed.end()) {
|
||||||
|
|
||||||
|
// allow to gc unwrapped and recycled, never Fetch,
|
||||||
|
// if we have a Fetch slice then something has gone very wrong.
|
||||||
|
if (!slice.isUnwrapped() && slice.info.state != Slice<F>::Recycled)
|
||||||
|
throw
|
||||||
|
std::domain_error(_FORMAT("Trying to garbage collect (%d, %d) "
|
||||||
|
" a non-unwrapped slice! ",
|
||||||
|
slice.info.type,
|
||||||
|
slice.info.state));
|
||||||
|
|
||||||
|
// it can be that our slice is ready, but it has some hanging
|
||||||
|
// references lying around in the form of a recycled slice.
|
||||||
|
// Of course if we need the recycled slice the next iteration
|
||||||
|
// this would be fatal, because we would then free the pointer
|
||||||
|
// of the slice and at some point in the future we would
|
||||||
|
// overwrite it. Therefore, we must check if slice has some
|
||||||
|
// references in slices and if so then
|
||||||
|
//
|
||||||
|
// - we should mark those references as the original (since the data
|
||||||
|
// pointer should be the same)
|
||||||
|
//
|
||||||
|
// - we should make sure that the data pointer of slice
|
||||||
|
// does not get freed.
|
||||||
|
//
|
||||||
|
if (slice.info.state == Slice<F>::Ready) {
|
||||||
|
WITH_OCD WITH_RANK
|
||||||
|
<< "__gc__:" << "checking for data recycled dependencies\n";
|
||||||
|
auto recycled
|
||||||
|
= Slice<F>::hasRecycledReferencingToIt(u.slices, slice.info);
|
||||||
|
if (recycled.size()) {
|
||||||
|
Slice<F>* newReady = recycled[0];
|
||||||
|
WITH_OCD WITH_RANK
|
||||||
|
<< "__gc__:" << "swaping recycled "
|
||||||
|
<< pretty_print(newReady->info)
|
||||||
|
<< " and "
|
||||||
|
<< pretty_print(slice.info)
|
||||||
|
<< "\n";
|
||||||
|
newReady->markReady();
|
||||||
|
|
||||||
|
for (size_t i = 1; i < recycled.size(); i++) {
|
||||||
|
auto newRecyled = recycled[i];
|
||||||
|
newRecyled->info.recycling = newReady->info.type;
|
||||||
|
WITH_OCD WITH_RANK
|
||||||
|
<< "__gc__:" << "updating recycled "
|
||||||
|
<< pretty_print(newRecyled->info)
|
||||||
|
<< "\n";
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
slice.free();
|
||||||
|
} // we did not find the slice
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
void unwrapSlice(Slice<F>::Type t, ABCTuple abc, SliceUnion<F> *u) {
|
||||||
|
auto& slice = Slice<F>::findByTypeAbc(u->slices, t, abc);
|
||||||
|
switch (slice.info.state) {
|
||||||
|
case Slice<F>::Dispatched:
|
||||||
|
slice.markReady();
|
||||||
|
break;
|
||||||
|
case Slice<F>::Recycled:
|
||||||
|
unwrapSlice(t, abc, u);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#define PRINT_VARIABLE(v) \
|
||||||
|
do { \
|
||||||
|
if (!rank) std::cout << "# " << #v << ": " << v << std::endl; \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
int main(int argc, char** argv) {
|
||||||
|
MPI_Init(&argc, &argv);
|
||||||
|
|
||||||
|
int no(10), nv(100);
|
||||||
|
std::string tuplesDistributionString = "naive";
|
||||||
|
|
||||||
|
CLI::App app{"Main bench for atrip"};
|
||||||
|
app.add_option("--no", no, "Occupied orbitals");
|
||||||
|
app.add_option("--nv", nv, "Virtual orbitals");
|
||||||
|
app.add_option("--dist", tuplesDistributionString, "Which distribution");
|
||||||
|
CLI11_PARSE(app, argc, argv);
|
||||||
|
|
||||||
|
CTF::World world(argc, argv);
|
||||||
|
auto kaun = world.comm;
|
||||||
|
int rank, np;
|
||||||
|
MPI_Comm_rank(kaun, &rank);
|
||||||
|
MPI_Comm_size(kaun, &np);
|
||||||
|
Atrip::init(world.comm);
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
atrip::ABCTuples tuplesList;
|
||||||
|
atrip::TuplesDistribution *dist;
|
||||||
|
{
|
||||||
|
using namespace atrip;
|
||||||
|
if (tuplesDistributionString == "naive") {
|
||||||
|
dist = new NaiveDistribution();
|
||||||
|
tuplesList = dist->getTuples(nv, world.comm);
|
||||||
|
} else if (tuplesDistributionString == "group") {
|
||||||
|
dist = new group_and_sort::Distribution();
|
||||||
|
tuplesList = dist->getTuples(nv, world.comm);
|
||||||
|
} else {
|
||||||
|
std::cout << "--dist should be either naive or group\n";
|
||||||
|
exit(1);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
double tuplesListGb
|
||||||
|
= tuplesList.size() * sizeof(tuplesList[0])
|
||||||
|
/ 1024.0 / 1024.0 / 1024.0;
|
||||||
|
|
||||||
|
std::cout << "\n";
|
||||||
|
PRINT_VARIABLE(tuplesDistributionString);
|
||||||
|
PRINT_VARIABLE(np);
|
||||||
|
PRINT_VARIABLE(no);
|
||||||
|
PRINT_VARIABLE(nv);
|
||||||
|
PRINT_VARIABLE(tuplesList.size());
|
||||||
|
PRINT_VARIABLE(tuplesListGb);
|
||||||
|
|
||||||
|
// create a fake dry tensor
|
||||||
|
Tr t_abph, t_abhh, t_tabhh, t_taphh, t_hhha;
|
||||||
|
INIT_DRY(t_abph , {nv, nv, nv, no});
|
||||||
|
INIT_DRY(t_abhh , {nv, nv, no, no});
|
||||||
|
INIT_DRY(t_tabhh , {nv, nv, no, no});
|
||||||
|
INIT_DRY(t_taphh , {nv, nv, no, no});
|
||||||
|
INIT_DRY(t_hhha , {no, no, no, nv});
|
||||||
|
|
||||||
|
ABPH<F> abph(t_abph, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
|
||||||
|
ABHH<F> abhh(t_abhh, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
|
||||||
|
TABHH<F> tabhh(t_tabhh, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
|
||||||
|
TAPHH<F> taphh(t_taphh, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
|
||||||
|
HHHA<F> hhha(t_hhha, (size_t)no, (size_t)nv, (size_t)np, kaun, kaun);
|
||||||
|
std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh};
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
using Database = typename Slice<F>::Database;
|
||||||
|
auto communicateDatabase
|
||||||
|
= [ &unions
|
||||||
|
, np
|
||||||
|
] (ABCTuple const& abc, MPI_Comm const& c) -> Database {
|
||||||
|
|
||||||
|
WITH_CHRONO("db:comm:type:do",
|
||||||
|
auto MPI_LDB_ELEMENT = Slice<F>::mpi::localDatabaseElement();
|
||||||
|
)
|
||||||
|
|
||||||
|
WITH_CHRONO("db:comm:ldb",
|
||||||
|
typename Slice<F>::LocalDatabase ldb;
|
||||||
|
for (auto const& tensor: unions) {
|
||||||
|
auto const& tensorDb = buildLocalDatabase(*tensor, abc);
|
||||||
|
ldb.insert(ldb.end(), tensorDb.begin(), tensorDb.end());
|
||||||
|
}
|
||||||
|
)
|
||||||
|
|
||||||
|
Database db(np * ldb.size(), ldb[0]);
|
||||||
|
|
||||||
|
WITH_CHRONO("oneshot-db:comm:allgather",
|
||||||
|
WITH_CHRONO("db:comm:allgather",
|
||||||
|
MPI_Allgather(ldb.data(),
|
||||||
|
/* ldb.size() * sizeof(typename
|
||||||
|
Slice<F>::LocalDatabaseElement) */
|
||||||
|
ldb.size(),
|
||||||
|
MPI_LDB_ELEMENT,
|
||||||
|
db.data(),
|
||||||
|
/* ldb.size() * sizeof(typename
|
||||||
|
Slice<F>::LocalDatabaseElement), */
|
||||||
|
ldb.size(),
|
||||||
|
MPI_LDB_ELEMENT,
|
||||||
|
c);
|
||||||
|
))
|
||||||
|
|
||||||
|
WITH_CHRONO("db:comm:type:free", MPI_Type_free(&MPI_LDB_ELEMENT);)
|
||||||
|
|
||||||
|
return db;
|
||||||
|
};
|
||||||
|
|
||||||
|
auto doIOPhase
|
||||||
|
= [&unions, &rank, &np] (Database const& db,
|
||||||
|
std::vector<LocalDatabaseElement> &to_send) {
|
||||||
|
|
||||||
|
const size_t localDBLength = db.size() / np;
|
||||||
|
|
||||||
|
size_t sendTag = 0
|
||||||
|
, recvTag = rank * localDBLength
|
||||||
|
;
|
||||||
|
|
||||||
|
{
|
||||||
|
// At this point, we have already send to everyone that fits
|
||||||
|
auto const& begin = &db[rank * localDBLength]
|
||||||
|
, end = begin + localDBLength
|
||||||
|
;
|
||||||
|
for (auto it = begin; it != end; ++it) {
|
||||||
|
recvTag++;
|
||||||
|
auto const& el = *it;
|
||||||
|
auto& u = unionByName(unions, el.name);
|
||||||
|
auto& slice = Slice<F>::findByInfo(u.slices, el.info);
|
||||||
|
slice.markReady();
|
||||||
|
// u.receive(el.info, recvTag);
|
||||||
|
|
||||||
|
} // recv
|
||||||
|
}
|
||||||
|
|
||||||
|
// SEND PHASE =========================================================
|
||||||
|
for (size_t otherRank = 0; otherRank < np; otherRank++) {
|
||||||
|
auto const& begin = &db[otherRank * localDBLength]
|
||||||
|
, end = begin + localDBLength
|
||||||
|
;
|
||||||
|
for (auto it = begin; it != end; ++it) {
|
||||||
|
sendTag++;
|
||||||
|
typename Slice<F>::LocalDatabaseElement const& el = *it;
|
||||||
|
if (el.info.from.rank != rank) continue;
|
||||||
|
auto& u = unionByName(unions, el.name);
|
||||||
|
if (el.info.state == Slice<F>::Fetch) {
|
||||||
|
to_send.push_back(el);
|
||||||
|
}
|
||||||
|
// u.send(otherRank, el, sendTag);
|
||||||
|
|
||||||
|
} // send phase
|
||||||
|
|
||||||
|
} // otherRank
|
||||||
|
|
||||||
|
|
||||||
|
};
|
||||||
|
|
||||||
|
std::vector<LocalDatabaseElement>
|
||||||
|
to_send;
|
||||||
|
|
||||||
|
for (size_t it = 0; it < tuplesList.size(); it++) {
|
||||||
|
|
||||||
|
|
||||||
|
const ABCTuple abc = dist->tupleIsFake(tuplesList[it])
|
||||||
|
? tuplesList[tuplesList.size() - 1]
|
||||||
|
: tuplesList[it]
|
||||||
|
;
|
||||||
|
|
||||||
|
if (it > 0) {
|
||||||
|
for (auto const& u: unions) {
|
||||||
|
clearUnusedSlicesForNext(*u, abc);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const auto db = communicateDatabase(abc, kaun);
|
||||||
|
doIOPhase(db, to_send);
|
||||||
|
|
||||||
|
if (it % 1000 == 0)
|
||||||
|
std::cout << _FORMAT("%ld :it %ld %f %% ∷ %ld ∷ %f GB\n",
|
||||||
|
rank,
|
||||||
|
it,
|
||||||
|
100.0 * double(to_send.size()) / double(tuplesList.size()),
|
||||||
|
to_send.size(),
|
||||||
|
double(to_send.size()) * sizeof(to_send[0])
|
||||||
|
/ 1024.0 / 1024.0 / 1024.0);
|
||||||
|
|
||||||
|
|
||||||
|
for (auto const& u: unions) {
|
||||||
|
for (auto type: u->sliceTypes) {
|
||||||
|
unwrapSlice(type, abc, u);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
std::cout << "=========================================================\n";
|
||||||
|
std::cout << "FINISHING, it will segfaulten, that's ok, don't even trip"
|
||||||
|
<< std::endl;
|
||||||
|
MPI_Barrier(kaun);
|
||||||
|
DEINIT_DRY(t_abph);
|
||||||
|
DEINIT_DRY(t_abhh);
|
||||||
|
DEINIT_DRY(t_tabhh);
|
||||||
|
DEINIT_DRY(t_taphh);
|
||||||
|
DEINIT_DRY(t_hhha);
|
||||||
|
|
||||||
|
MPI_Finalize();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
12
bench/utils.hpp
Normal file
12
bench/utils.hpp
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
#ifndef UTILS_HPP_
|
||||||
|
#define UTILS_HPP_
|
||||||
|
|
||||||
|
#define _FORMAT(_fmt, ...) \
|
||||||
|
([&] (void) -> std::string { \
|
||||||
|
int _sz = std::snprintf(nullptr, 0, _fmt, __VA_ARGS__); \
|
||||||
|
std::vector<char> _out(_sz + 1); \
|
||||||
|
std::snprintf(&_out[0], _out.size(), _fmt, __VA_ARGS__); \
|
||||||
|
return std::string(_out.data()); \
|
||||||
|
})()
|
||||||
|
|
||||||
|
#endif
|
||||||
69
configure.ac
69
configure.ac
@ -21,26 +21,6 @@ AC_ARG_ENABLE(shared,
|
|||||||
files (default=YES)]),
|
files (default=YES)]),
|
||||||
[], [enable_shared=yes])
|
[], [enable_shared=yes])
|
||||||
|
|
||||||
AC_ARG_ENABLE(
|
|
||||||
[slice],
|
|
||||||
[AS_HELP_STRING(
|
|
||||||
[--disable-slice],
|
|
||||||
[Disable the step of slicing tensors for CTF, this is useful for example for benchmarking or testing.])],
|
|
||||||
[atrip_dont_slice=1
|
|
||||||
AC_DEFINE([ATRIP_DONT_SLICE],1,[Wether CTF will slice tensors or skip the step])
|
|
||||||
],
|
|
||||||
[atrip_dont_slice=0]
|
|
||||||
)
|
|
||||||
|
|
||||||
AC_ARG_ENABLE(
|
|
||||||
[atrip_dgemm],
|
|
||||||
[AS_HELP_STRING(
|
|
||||||
[--disable-dgemm],
|
|
||||||
[Disable using dgemm for the doubles equations])],
|
|
||||||
[],
|
|
||||||
[AC_DEFINE([ATRIP_USE_DGEMM],1,[Use dgemm for the doubles equations])]
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
AC_ARG_ENABLE([docs],
|
AC_ARG_ENABLE([docs],
|
||||||
[AS_HELP_STRING([--enable-docs],
|
[AS_HELP_STRING([--enable-docs],
|
||||||
@ -74,13 +54,53 @@ AC_ARG_VAR([NVCC], [Path to the nvidia cuda compiler.])
|
|||||||
AC_ARG_VAR([CUDA_LDFLAGS], [LDFLAGS to find libraries -lcuda, -lcudart, -lcublas.])
|
AC_ARG_VAR([CUDA_LDFLAGS], [LDFLAGS to find libraries -lcuda, -lcudart, -lcublas.])
|
||||||
AC_ARG_VAR([CUDA_CXXFLAGS], [CXXFLAGS to find the CUDA headers])
|
AC_ARG_VAR([CUDA_CXXFLAGS], [CXXFLAGS to find the CUDA headers])
|
||||||
|
|
||||||
|
dnl -----------------------------------------------------------------------
|
||||||
|
dnl ATRIP CPP DEFINES
|
||||||
|
dnl -----------------------------------------------------------------------
|
||||||
|
|
||||||
AC_ARG_WITH([atrip-debug],
|
AC_ARG_WITH([atrip-debug],
|
||||||
[AS_HELP_STRING([--with-atrip-debug],
|
[AS_HELP_STRING([--with-atrip-debug],
|
||||||
[Debug level for atrip, possible values: 1, 2, 3, 4])],
|
[Debug level for atrip, possible values:
|
||||||
|
1, 2, 3, 4])],
|
||||||
[AC_DEFINE([ATRIP_DEBUG],[atrip-debug],[Atrip debug level])],
|
[AC_DEFINE([ATRIP_DEBUG],[atrip-debug],[Atrip debug level])],
|
||||||
[AC_DEFINE([ATRIP_DEBUG],[1],[Atrip debug level])]
|
[AC_DEFINE([ATRIP_DEBUG],[1],[Atrip debug level])])
|
||||||
)
|
|
||||||
|
|
||||||
|
AC_ARG_ENABLE([atrip_dgemm],
|
||||||
|
[AS_HELP_STRING([--disable-dgemm],
|
||||||
|
[Disable using dgemm for the doubles equations])],
|
||||||
|
[],
|
||||||
|
[AC_DEFINE([ATRIP_USE_DGEMM],
|
||||||
|
1,
|
||||||
|
[Use dgemm for the doubles equations])])
|
||||||
|
|
||||||
|
ATRIP_DEF([slice], [disable],
|
||||||
|
[ATRIP_DONT_SLICE],
|
||||||
|
[Disable the step of slicing tensors for CTF, this is useful
|
||||||
|
for example for benchmarking or testing.])
|
||||||
|
|
||||||
|
ATRIP_DEF([only-dgemm], [enable],
|
||||||
|
[ATRIP_ONLY_DGEMM],
|
||||||
|
[Run only the parts of atrip that involve dgemm calls, this
|
||||||
|
is useful for benchmarking and testing the code, it is
|
||||||
|
intended for developers of Atrip.])
|
||||||
|
|
||||||
|
ATRIP_DEF([naive-slow], [enable],
|
||||||
|
[ATRIP_NAIVE_SLOW],
|
||||||
|
[Run slow but correct code for the mapping of (iteration,
|
||||||
|
rank) to tuple of the naive tuple distribution.])
|
||||||
|
|
||||||
|
ATRIP_DEF([sources-in-gpu], [enable],
|
||||||
|
[ATRIP_SOURCES_IN_GPU],
|
||||||
|
[When using CUDA, activate storing all sources (slices of
|
||||||
|
the input tensors) in the GPU. This means that a lot of GPUs
|
||||||
|
will be needed.])
|
||||||
|
|
||||||
|
ATRIP_DEF([cuda-aware-mpi], [enable],
|
||||||
|
[ATRIP_CUDA_AWARE_MPI],
|
||||||
|
[When using MPI, assume support for CUDA aware mpi by the
|
||||||
|
given MPI implementation.])
|
||||||
|
|
||||||
|
|
||||||
dnl -----------------------------------------------------------------------
|
dnl -----------------------------------------------------------------------
|
||||||
|
|
||||||
@ -144,8 +164,7 @@ AC_TYPE_SIZE_T
|
|||||||
dnl -----------------------------------------------------------------------
|
dnl -----------------------------------------------------------------------
|
||||||
dnl CHECK CTF
|
dnl CHECK CTF
|
||||||
if test xYES = x${BUILD_CTF}; then
|
if test xYES = x${BUILD_CTF}; then
|
||||||
AC_MSG_WARN([Sorry, building CTF not supported yet provide a build path
|
AC_MSG_WARN([You will have to do make ctf before building the project.])
|
||||||
with --with-ctf=path/to/ctf/installation])
|
|
||||||
else
|
else
|
||||||
CPPFLAGS="$CPPFLAGS -I${LIBCTF_CPATH}"
|
CPPFLAGS="$CPPFLAGS -I${LIBCTF_CPATH}"
|
||||||
LDFLAGS="$LDFLAGS -L${LIBCTF_LD_LIBRARY_PATH} -lctf"
|
LDFLAGS="$LDFLAGS -L${LIBCTF_LD_LIBRARY_PATH} -lctf"
|
||||||
|
|||||||
56
etc/env/raven/cuda
vendored
Normal file
56
etc/env/raven/cuda
vendored
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
mods=(
|
||||||
|
cuda/11.6
|
||||||
|
intel/19.1.2
|
||||||
|
mkl/2020.4
|
||||||
|
impi/2019.8
|
||||||
|
autoconf/2.69
|
||||||
|
automake/1.15
|
||||||
|
libtool/2.4.6
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
module purge
|
||||||
|
module load ${mods[@]}
|
||||||
|
LIB_PATH="${CUDA_HOME}/lib64"
|
||||||
|
export CUDA_ROOT=${CUDA_HOME}
|
||||||
|
export CUDA_LDFLAGS="-L${LIB_PATH} -lcuda -L${LIB_PATH} -lcudart -L${LIB_PATH} -lcublas"
|
||||||
|
export CUDA_CXXFLAGS="-I${CUDA_HOME}/include"
|
||||||
|
|
||||||
|
export LD_LIBRARY_PATH="${MKL_HOME}/lib/intel64_lin:${LD_LIBRARY_PATH}"
|
||||||
|
|
||||||
|
BLAS_STATIC_PATH="$MKL_HOME/lib/intel64/libmkl_intel_lp64.a"
|
||||||
|
|
||||||
|
ls ${LIB_PATH}/libcublas.so
|
||||||
|
ls ${LIB_PATH}/libcudart.so
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
info
|
||||||
|
////////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
|
||||||
|
MKL_HOME = $MKL_HOME
|
||||||
|
BLAS_STATIC_PATH = $BLAS_STATIC_PATH
|
||||||
|
|
||||||
|
CUDA_ROOT = ${CUDA_HOME}
|
||||||
|
CUDA_LDFLAGS = "-L${LIB_PATH} -lcuda -L${LIB_PATH} -lcudart -L${LIB_PATH} -lcublas"
|
||||||
|
CUDA_CXXFLAGS = "-I${CUDA_HOME}/include"
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Consider now runnng the following
|
||||||
|
|
||||||
|
../configure \\
|
||||||
|
--enable-cuda \\
|
||||||
|
--disable-slice \\
|
||||||
|
--with-blas="-L\$MKL_HOME/lib/intel64/ -lmkl_intel_lp64 -mkl" \\
|
||||||
|
CXX=mpiicpc \\
|
||||||
|
CC=mpiicc \\
|
||||||
|
MPICXX=mpiicpc
|
||||||
|
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
|
||||||
|
return
|
||||||
8
etc/m4/atrip-def.m4
Normal file
8
etc/m4/atrip-def.m4
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
AC_DEFUN([ATRIP_DEF],
|
||||||
|
[AC_ARG_ENABLE([$1],
|
||||||
|
[AS_HELP_STRING([--$2-$1],
|
||||||
|
[$4])],
|
||||||
|
[AC_DEFINE([$3],
|
||||||
|
1,
|
||||||
|
[$4])])])
|
||||||
|
|
||||||
@ -20,6 +20,8 @@ in
|
|||||||
|
|
||||||
{
|
{
|
||||||
|
|
||||||
|
pkg = myopenblas;
|
||||||
|
|
||||||
buildInputs = with pkgs; [
|
buildInputs = with pkgs; [
|
||||||
myopenblas
|
myopenblas
|
||||||
scalapack
|
scalapack
|
||||||
|
|||||||
27
etc/nix/vendor-shell.nix
Normal file
27
etc/nix/vendor-shell.nix
Normal file
@ -0,0 +1,27 @@
|
|||||||
|
rec {
|
||||||
|
|
||||||
|
directory = "vendor";
|
||||||
|
src = ''
|
||||||
|
|
||||||
|
_add_vendor_cpath () {
|
||||||
|
export CPATH=$CPATH:$1
|
||||||
|
mkdir -p ${directory}/include
|
||||||
|
ln -frs $1/* ${directory}/include/
|
||||||
|
}
|
||||||
|
|
||||||
|
_add_vendor_lib () {
|
||||||
|
mkdir -p ${directory}/lib
|
||||||
|
ln -frs $1/* ${directory}/lib/
|
||||||
|
}
|
||||||
|
|
||||||
|
'';
|
||||||
|
|
||||||
|
cpath = path: ''
|
||||||
|
_add_vendor_cpath ${path}
|
||||||
|
'';
|
||||||
|
|
||||||
|
lib = path: ''
|
||||||
|
_add_vendor_lib ${path}
|
||||||
|
'';
|
||||||
|
|
||||||
|
}
|
||||||
@ -86,7 +86,7 @@ namespace atrip {
|
|||||||
ADD_ATTRIBUTE(bool, rankRoundRobin, false)
|
ADD_ATTRIBUTE(bool, rankRoundRobin, false)
|
||||||
ADD_ATTRIBUTE(bool, chrono, false)
|
ADD_ATTRIBUTE(bool, chrono, false)
|
||||||
ADD_ATTRIBUTE(bool, barrier, false)
|
ADD_ATTRIBUTE(bool, barrier, false)
|
||||||
ADD_ATTRIBUTE(int, maxIterations, 0)
|
ADD_ATTRIBUTE(size_t, maxIterations, 0)
|
||||||
ADD_ATTRIBUTE(int, iterationMod, -1)
|
ADD_ATTRIBUTE(int, iterationMod, -1)
|
||||||
ADD_ATTRIBUTE(int, percentageMod, -1)
|
ADD_ATTRIBUTE(int, percentageMod, -1)
|
||||||
ADD_ATTRIBUTE(TuplesDistribution, tuplesDistribution, NAIVE)
|
ADD_ATTRIBUTE(TuplesDistribution, tuplesDistribution, NAIVE)
|
||||||
|
|||||||
@ -11,11 +11,22 @@
|
|||||||
#if defined(HAVE_CUDA) && defined(__CUDACC__)
|
#if defined(HAVE_CUDA) && defined(__CUDACC__)
|
||||||
# define __MAYBE_GLOBAL__ __global__
|
# define __MAYBE_GLOBAL__ __global__
|
||||||
# define __MAYBE_DEVICE__ __device__
|
# define __MAYBE_DEVICE__ __device__
|
||||||
|
# define __MAYBE_HOST__ __host__
|
||||||
|
# define __INLINE__ __inline__
|
||||||
#else
|
#else
|
||||||
# define __MAYBE_GLOBAL__
|
# define __MAYBE_GLOBAL__
|
||||||
# define __MAYBE_DEVICE__
|
# define __MAYBE_DEVICE__
|
||||||
|
# define __MAYBE_HOST__
|
||||||
|
# define __INLINE__ inline
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
#define ACC_FUNCALL(fname, i, j, ...) fname<<<(i), (j)>>>(__VA_ARGS__)
|
||||||
|
#else
|
||||||
|
#define ACC_FUNCALL(fname, i, j, ...) fname(__VA_ARGS__)
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
|
|
||||||
#define _CHECK_CUDA_SUCCESS(message, ...) \
|
#define _CHECK_CUDA_SUCCESS(message, ...) \
|
||||||
do { \
|
do { \
|
||||||
CUresult result = __VA_ARGS__; \
|
CUresult result = __VA_ARGS__; \
|
||||||
|
|||||||
20
include/atrip/DatabaseCommunicator.hpp
Normal file
20
include/atrip/DatabaseCommunicator.hpp
Normal file
@ -0,0 +1,20 @@
|
|||||||
|
#pragma once
|
||||||
|
#include <atrip/Utils.hpp>
|
||||||
|
#include <atrip/Equations.hpp>
|
||||||
|
#include <atrip/SliceUnion.hpp>
|
||||||
|
#include <atrip/Unions.hpp>
|
||||||
|
|
||||||
|
namespace atrip {
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
using Unions = std::vector<SliceUnion<F>*>;
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
typename Slice<F>::Database
|
||||||
|
naiveDatabase(Unions<F> &unions,
|
||||||
|
size_t nv,
|
||||||
|
size_t np,
|
||||||
|
size_t iteration,
|
||||||
|
MPI_Comm const& c);
|
||||||
|
|
||||||
|
} // namespace atrip
|
||||||
@ -76,7 +76,7 @@
|
|||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Macros][Macros:2]]
|
// [[file:~/cuda/atrip/atrip.org::*Macros][Macros:2]]
|
||||||
#ifndef LOG
|
#ifndef LOG
|
||||||
#define LOG(level, name) if (Atrip::rank == 0) std::cout << name << ": "
|
#define LOG(level, name) if (atrip::Atrip::rank == 0) std::cout << name << ": "
|
||||||
#endif
|
#endif
|
||||||
// Macros:2 ends here
|
// Macros:2 ends here
|
||||||
|
|
||||||
|
|||||||
@ -23,6 +23,8 @@
|
|||||||
#include<thrust/device_vector.h>
|
#include<thrust/device_vector.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#include<atrip/CUDA.hpp>
|
||||||
|
|
||||||
|
|
||||||
namespace atrip {
|
namespace atrip {
|
||||||
using ABCTuple = std::array<size_t, 3>;
|
using ABCTuple = std::array<size_t, 3>;
|
||||||
@ -32,21 +34,25 @@ using ABCTuples = std::vector<ABCTuple>;
|
|||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:1]]
|
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:1]]
|
||||||
template <typename F=double>
|
template <typename F=double>
|
||||||
double getEnergyDistinct
|
__MAYBE_GLOBAL__
|
||||||
|
void getEnergyDistinct
|
||||||
( F const epsabc
|
( F const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, F* const epsi
|
, F* const epsi
|
||||||
, F* const Tijk
|
, F* const Tijk
|
||||||
, F* const Zijk
|
, F* const Zijk
|
||||||
|
, double* energy
|
||||||
);
|
);
|
||||||
|
|
||||||
template <typename F=double>
|
template <typename F=double>
|
||||||
double getEnergySame
|
__MAYBE_GLOBAL__
|
||||||
|
void getEnergySame
|
||||||
( F const epsabc
|
( F const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, F* const epsi
|
, F* const epsi
|
||||||
, F* const Tijk
|
, F* const Tijk
|
||||||
, F* const Zijk
|
, F* const Zijk
|
||||||
|
, double* energy
|
||||||
);
|
);
|
||||||
// Energy:1 ends here
|
// Energy:1 ends here
|
||||||
|
|
||||||
@ -97,6 +103,11 @@ void singlesContribution
|
|||||||
// -- TIJK
|
// -- TIJK
|
||||||
// , DataPtr<F> Tijk
|
// , DataPtr<F> Tijk
|
||||||
, DataFieldType<F>* Tijk_
|
, DataFieldType<F>* Tijk_
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
// -- tmp buffers
|
||||||
|
, DataFieldType<F>* _t_buffer
|
||||||
|
, DataFieldType<F>* _vhhh
|
||||||
|
#endif
|
||||||
);
|
);
|
||||||
// Doubles contribution:1 ends here
|
// Doubles contribution:1 ends here
|
||||||
|
|
||||||
|
|||||||
171
include/atrip/Operations.hpp
Normal file
171
include/atrip/Operations.hpp
Normal file
@ -0,0 +1,171 @@
|
|||||||
|
// Copyright 2022 Alejandro Gallo
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#ifndef OPERATIONS_HPP_
|
||||||
|
#define OPERATIONS_HPP_
|
||||||
|
|
||||||
|
#include <atrip/CUDA.hpp>
|
||||||
|
#include <atrip/Types.hpp>
|
||||||
|
#include <atrip/Complex.hpp>
|
||||||
|
|
||||||
|
namespace atrip {
|
||||||
|
namespace acc {
|
||||||
|
|
||||||
|
// cuda kernels
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_GLOBAL__
|
||||||
|
void zeroing(F* a, size_t n) {
|
||||||
|
F zero = {0};
|
||||||
|
for (size_t i = 0; i < n; i++) {
|
||||||
|
a[i] = zero;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
////
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
F maybeConjugateScalar(const F &a) { return a; }
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
template <>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
cuDoubleComplex maybeConjugateScalar(const cuDoubleComplex &a) {
|
||||||
|
return {a.x, -a.y};
|
||||||
|
}
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_GLOBAL__
|
||||||
|
void maybeConjugate(F* to, F* from, size_t n) {
|
||||||
|
for (size_t i = 0; i < n; ++i) {
|
||||||
|
to[i] = maybeConjugateScalar<F>(from[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__
|
||||||
|
void reorder(F* to, F* from, size_t size, size_t I, size_t J, size_t K) {
|
||||||
|
size_t idx = 0;
|
||||||
|
const size_t IDX = I + J*size + K*size*size;
|
||||||
|
for (size_t k = 0; k < size; k++)
|
||||||
|
for (size_t j = 0; j < size; j++)
|
||||||
|
for (size_t i = 0; i < size; i++, idx++)
|
||||||
|
to[idx] += from[IDX];
|
||||||
|
}
|
||||||
|
|
||||||
|
// Multiplication operation
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
F prod(const F &a, const F &b) { return a * b; }
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
template <>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
cuDoubleComplex prod(const cuDoubleComplex &a, const cuDoubleComplex &b) {
|
||||||
|
return cuCmul(a, b);
|
||||||
|
}
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
|
// Division operation
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
F div(const F &a, const F &b) { return a / b; }
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
template <>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
cuDoubleComplex div(const cuDoubleComplex &a, const cuDoubleComplex &b) {
|
||||||
|
return cuCdiv(a, b);
|
||||||
|
}
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
|
// Real part
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_HOST__ __INLINE__
|
||||||
|
double real(F &a) { return std::real(a); }
|
||||||
|
|
||||||
|
template <>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
double real(double &a) {
|
||||||
|
return a;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
template <>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
double real(cuDoubleComplex &a) {
|
||||||
|
return cuCreal(a);
|
||||||
|
}
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
|
// Substraction operator
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
F sub(const F &a, const F &b) { return a - b; }
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
template <>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
cuDoubleComplex sub(const cuDoubleComplex &a,
|
||||||
|
const cuDoubleComplex &b) {
|
||||||
|
return cuCsub(a, b);
|
||||||
|
}
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
|
// Addition operator
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
F add(const F &a, const F &b) { return a + b; }
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
template <>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
||||||
|
cuDoubleComplex add(const cuDoubleComplex &a, const cuDoubleComplex &b) {
|
||||||
|
return cuCadd(a, b);
|
||||||
|
}
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
|
// Sum in place operator
|
||||||
|
//////////////////////////////////////////////////////////////////////////////
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__
|
||||||
|
void sum_in_place(F* to, const F* from) { *to += *from; }
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
template <>
|
||||||
|
__MAYBE_DEVICE__ __MAYBE_HOST__
|
||||||
|
void sum_in_place(cuDoubleComplex* to, const cuDoubleComplex* from) {
|
||||||
|
to->x += from->x;
|
||||||
|
to->y += from->y;
|
||||||
|
}
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace acc
|
||||||
|
} // namespace atrip
|
||||||
|
|
||||||
|
#endif
|
||||||
@ -352,7 +352,7 @@ Info info;
|
|||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Attributes][Attributes:2]]
|
// [[file:~/cuda/atrip/atrip.org::*Attributes][Attributes:2]]
|
||||||
DataPtr<F> data;
|
DataPtr<F> data;
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA) && !defined (ATRIP_SOURCES_IN_GPU)
|
||||||
F* mpi_data;
|
F* mpi_data;
|
||||||
#endif
|
#endif
|
||||||
// Attributes:2 ends here
|
// Attributes:2 ends here
|
||||||
@ -456,7 +456,7 @@ void unwrapAndMarkReady() {
|
|||||||
if (errorCode != MPI_SUCCESS)
|
if (errorCode != MPI_SUCCESS)
|
||||||
throw "Atrip: Unexpected error MPI ERROR";
|
throw "Atrip: Unexpected error MPI ERROR";
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
|
||||||
// copy the retrieved mpi data to the device
|
// copy the retrieved mpi data to the device
|
||||||
WITH_CHRONO("cuda:memcpy",
|
WITH_CHRONO("cuda:memcpy",
|
||||||
_CHECK_CUDA_SUCCESS("copying mpi data to device",
|
_CHECK_CUDA_SUCCESS("copying mpi data to device",
|
||||||
@ -488,7 +488,7 @@ void unwrapAndMarkReady() {
|
|||||||
Slice(size_t size_)
|
Slice(size_t size_)
|
||||||
: info({})
|
: info({})
|
||||||
, data(DataNullPtr)
|
, data(DataNullPtr)
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
|
||||||
, mpi_data(nullptr)
|
, mpi_data(nullptr)
|
||||||
#endif
|
#endif
|
||||||
, size(size_)
|
, size(size_)
|
||||||
|
|||||||
@ -18,6 +18,12 @@
|
|||||||
#include <atrip/Slice.hpp>
|
#include <atrip/Slice.hpp>
|
||||||
#include <atrip/RankMap.hpp>
|
#include <atrip/RankMap.hpp>
|
||||||
|
|
||||||
|
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||||
|
# define SOURCES_DATA(s) (s)
|
||||||
|
#else
|
||||||
|
# define SOURCES_DATA(s) (s).data()
|
||||||
|
#endif
|
||||||
|
|
||||||
namespace atrip {
|
namespace atrip {
|
||||||
// Prolog:1 ends here
|
// Prolog:1 ends here
|
||||||
|
|
||||||
@ -195,7 +201,7 @@ template <typename F=double>
|
|||||||
;
|
;
|
||||||
if (blank.info.state == Slice<F>::SelfSufficient) {
|
if (blank.info.state == Slice<F>::SelfSufficient) {
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
const size_t _size = sizeof(F) * sources[from.source].size();
|
const size_t _size = sizeof(F) * sliceSize;
|
||||||
// TODO: this is code duplication with downstairs
|
// TODO: this is code duplication with downstairs
|
||||||
if (freePointers.size() == 0) {
|
if (freePointers.size() == 0) {
|
||||||
std::stringstream stream;
|
std::stringstream stream;
|
||||||
@ -212,12 +218,12 @@ template <typename F=double>
|
|||||||
WITH_CHRONO("cuda:memcpy:self-sufficient",
|
WITH_CHRONO("cuda:memcpy:self-sufficient",
|
||||||
_CHECK_CUDA_SUCCESS("copying mpi data to device",
|
_CHECK_CUDA_SUCCESS("copying mpi data to device",
|
||||||
cuMemcpyHtoD(blank.data,
|
cuMemcpyHtoD(blank.data,
|
||||||
(void*)sources[from.source].data(),
|
(void*)SOURCES_DATA(sources[from.source]),
|
||||||
sizeof(F) * sources[from.source].size()));
|
sizeof(F) * sliceSize));
|
||||||
))
|
))
|
||||||
|
|
||||||
#else
|
#else
|
||||||
blank.data = sources[from.source].data();
|
blank.data = SOURCES_DATA(sources[from.source]);
|
||||||
#endif
|
#endif
|
||||||
} else {
|
} else {
|
||||||
if (freePointers.size() == 0) {
|
if (freePointers.size() == 0) {
|
||||||
@ -396,23 +402,44 @@ template <typename F=double>
|
|||||||
, world(child_world)
|
, world(child_world)
|
||||||
, universe(global_world)
|
, universe(global_world)
|
||||||
, sliceLength(sliceLength_)
|
, sliceLength(sliceLength_)
|
||||||
, sources(rankMap.nSources(),
|
, sliceSize(std::accumulate(sliceLength.begin(),
|
||||||
std::vector<F>
|
|
||||||
(std::accumulate(sliceLength.begin(),
|
|
||||||
sliceLength.end(),
|
sliceLength.end(),
|
||||||
1UL, std::multiplies<size_t>())))
|
1UL, std::multiplies<size_t>()))
|
||||||
|
|
||||||
|
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||||
|
, sources(rankMap.nSources())
|
||||||
|
#else
|
||||||
|
, sources(rankMap.nSources(),
|
||||||
|
std::vector<F>(sliceSize))
|
||||||
|
#endif
|
||||||
, name(name_)
|
, name(name_)
|
||||||
, sliceTypes(sliceTypes_)
|
, sliceTypes(sliceTypes_)
|
||||||
, sliceBuffers(nSliceBuffers)
|
, sliceBuffers(nSliceBuffers)
|
||||||
//, slices(2 * sliceTypes.size(), Slice<F>{ sources[0].size() })
|
|
||||||
{ // constructor begin
|
{ // constructor begin
|
||||||
|
|
||||||
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
|
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
|
||||||
|
printf("sliceSize %d, number of slices %d\n\n\n", sliceSize, sources.size());
|
||||||
|
|
||||||
|
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||||
|
for (auto& ptr: sources) {
|
||||||
|
const CUresult sourceError =
|
||||||
|
cuMemAlloc(&ptr, sizeof(F) * sliceSize);
|
||||||
|
if (ptr == 0UL) {
|
||||||
|
throw "UNSUFICCIENT MEMORY ON THE GRAPHIC CARD FOR SOURCES";
|
||||||
|
}
|
||||||
|
if (sourceError != CUDA_SUCCESS) {
|
||||||
|
std::stringstream s;
|
||||||
|
s << "Error allocating memory for sources "
|
||||||
|
<< "code " << sourceError << "\n";
|
||||||
|
throw s.str();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
for (auto& ptr: sliceBuffers) {
|
for (auto& ptr: sliceBuffers) {
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
const CUresult error =
|
const CUresult error =
|
||||||
cuMemAlloc(&ptr, sizeof(F) * sources[0].size());
|
cuMemAlloc(&ptr, sizeof(F) * sliceSize);
|
||||||
if (ptr == 0UL) {
|
if (ptr == 0UL) {
|
||||||
throw "UNSUFICCIENT MEMORY ON THE GRAPHIC CARD FOR FREE POINTERS";
|
throw "UNSUFICCIENT MEMORY ON THE GRAPHIC CARD FOR FREE POINTERS";
|
||||||
}
|
}
|
||||||
@ -423,12 +450,12 @@ template <typename F=double>
|
|||||||
throw s.str();
|
throw s.str();
|
||||||
}
|
}
|
||||||
#else
|
#else
|
||||||
ptr = (DataPtr<F>)malloc(sizeof(F) * sources[0].size());
|
ptr = (DataPtr<F>)malloc(sizeof(F) * sliceSize);
|
||||||
#endif
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
slices
|
slices
|
||||||
= std::vector<Slice<F>>(2 * sliceTypes.size(), { sources[0].size() });
|
= std::vector<Slice<F>>(2 * sliceTypes.size(), { sliceSize });
|
||||||
// TODO: think exactly ^------------------- about this number
|
// TODO: think exactly ^------------------- about this number
|
||||||
|
|
||||||
// initialize the freePointers with the pointers to the buffers
|
// initialize the freePointers with the pointers to the buffers
|
||||||
@ -436,17 +463,45 @@ template <typename F=double>
|
|||||||
std::inserter(freePointers, freePointers.begin()),
|
std::inserter(freePointers, freePointers.begin()),
|
||||||
[](DataPtr<F> ptr) { return ptr; });
|
[](DataPtr<F> ptr) { return ptr; });
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
LOG(1,"Atrip") << "warming communication up " << slices.size() << "\n";
|
||||||
|
WITH_CHRONO("cuda:warmup",
|
||||||
|
int nRanks=Atrip::np, requestCount=0;
|
||||||
|
int nSends=sliceBuffers.size()*nRanks;
|
||||||
|
MPI_Request *requests = (MPI_Request*) malloc(nSends*2 * sizeof(MPI_Request));
|
||||||
|
MPI_Status *statuses = (MPI_Status*) malloc(nSends*2 * sizeof(MPI_Status));
|
||||||
|
for (int sliceId=0; sliceId<sliceBuffers.size(); sliceId++){
|
||||||
|
for (int rankId=0; rankId<nRanks; rankId++){
|
||||||
|
MPI_Isend((void*)SOURCES_DATA(sources[0]),
|
||||||
|
sliceSize,
|
||||||
|
traits::mpi::datatypeOf<F>(),
|
||||||
|
rankId,
|
||||||
|
100,
|
||||||
|
universe,
|
||||||
|
&requests[requestCount++]);
|
||||||
|
MPI_Irecv((void*)sliceBuffers[sliceId],
|
||||||
|
sliceSize,
|
||||||
|
traits::mpi::datatypeOf<F>(),
|
||||||
|
rankId,
|
||||||
|
100,
|
||||||
|
universe,
|
||||||
|
&requests[requestCount++]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
MPI_Waitall(nSends*2, requests, statuses);
|
||||||
|
)
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
LOG(1,"Atrip") << "#slices " << slices.size() << "\n";
|
LOG(1,"Atrip") << "#slices " << slices.size() << "\n";
|
||||||
WITH_RANK << "#slices[0] " << slices[0].size << "\n";
|
WITH_RANK << "#slices[0] " << slices[0].size << "\n";
|
||||||
LOG(1,"Atrip") << "#sources " << sources.size() << "\n";
|
LOG(1,"Atrip") << "#sources " << sources.size() << "\n";
|
||||||
WITH_RANK << "#sources[0] " << sources[0].size() << "\n";
|
WITH_RANK << "#sources[0] " << sliceSize << "\n";
|
||||||
WITH_RANK << "#freePointers " << freePointers.size() << "\n";
|
WITH_RANK << "#freePointers " << freePointers.size() << "\n";
|
||||||
LOG(1,"Atrip") << "#sliceBuffers " << sliceBuffers.size() << "\n";
|
LOG(1,"Atrip") << "#sliceBuffers " << sliceBuffers.size() << "\n";
|
||||||
LOG(1,"Atrip") << "GB*" << np << " "
|
LOG(1,"Atrip") << "GB*" << np << " "
|
||||||
<< double(sources.size() + sliceBuffers.size())
|
<< double(sources.size() + sliceBuffers.size())
|
||||||
* sources[0].size()
|
* sliceSize
|
||||||
* 8 * np
|
* 8 * np
|
||||||
/ 1073741824.0
|
/ 1073741824.0
|
||||||
<< "\n";
|
<< "\n";
|
||||||
@ -495,14 +550,13 @@ template <typename F=double>
|
|||||||
if (otherRank == info.from.rank) sendData_p = false;
|
if (otherRank == info.from.rank) sendData_p = false;
|
||||||
if (!sendData_p) return;
|
if (!sendData_p) return;
|
||||||
|
|
||||||
MPI_Isend( sources[info.from.source].data()
|
MPI_Isend((void*)SOURCES_DATA(sources[info.from.source]),
|
||||||
, sources[info.from.source].size()
|
sliceSize,
|
||||||
, traits::mpi::datatypeOf<F>()
|
traits::mpi::datatypeOf<F>(),
|
||||||
, otherRank
|
otherRank,
|
||||||
, tag
|
tag,
|
||||||
, universe
|
universe,
|
||||||
, &request
|
&request);
|
||||||
);
|
|
||||||
WITH_CRAZY_DEBUG
|
WITH_CRAZY_DEBUG
|
||||||
WITH_RANK << "sent to " << otherRank << "\n";
|
WITH_RANK << "sent to " << otherRank << "\n";
|
||||||
|
|
||||||
@ -516,25 +570,25 @@ template <typename F=double>
|
|||||||
|
|
||||||
if (Atrip::rank == info.from.rank) return;
|
if (Atrip::rank == info.from.rank) return;
|
||||||
|
|
||||||
if (slice.info.state == Slice<F>::Fetch) {
|
if (slice.info.state == Slice<F>::Fetch) { // if-1
|
||||||
// TODO: do it through the slice class
|
// TODO: do it through the slice class
|
||||||
slice.info.state = Slice<F>::Dispatched;
|
slice.info.state = Slice<F>::Dispatched;
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA) && defined(ATRIP_SOURCES_IN_GPU)
|
||||||
slice.mpi_data = (F*)malloc(sizeof(F) * slice.size);
|
# if !defined(ATRIP_CUDA_AWARE_MPI)
|
||||||
MPI_Irecv( slice.mpi_data
|
# error "You need CUDA aware MPI to have slices on the GPU"
|
||||||
|
# endif
|
||||||
|
MPI_Irecv((void*)slice.data,
|
||||||
#else
|
#else
|
||||||
MPI_Irecv( slice.data
|
MPI_Irecv(slice.data,
|
||||||
#endif
|
#endif
|
||||||
, slice.size
|
slice.size,
|
||||||
, traits::mpi::datatypeOf<F>()
|
traits::mpi::datatypeOf<F>(),
|
||||||
, info.from.rank
|
info.from.rank,
|
||||||
, tag
|
tag,
|
||||||
, universe
|
universe,
|
||||||
, &slice.request
|
&slice.request);
|
||||||
//, MPI_STATUS_IGNORE
|
} // if-1
|
||||||
);
|
} // receive
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void unwrapAll(ABCTuple const& abc) {
|
void unwrapAll(ABCTuple const& abc) {
|
||||||
for (auto type: sliceTypes) unwrapSlice(type, abc);
|
for (auto type: sliceTypes) unwrapSlice(type, abc);
|
||||||
@ -597,7 +651,12 @@ template <typename F=double>
|
|||||||
const MPI_Comm world;
|
const MPI_Comm world;
|
||||||
const MPI_Comm universe;
|
const MPI_Comm universe;
|
||||||
const std::vector<size_t> sliceLength;
|
const std::vector<size_t> sliceLength;
|
||||||
|
const size_t sliceSize;
|
||||||
|
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||||
|
std::vector< DataPtr<F> > sources;
|
||||||
|
#else
|
||||||
std::vector< std::vector<F> > sources;
|
std::vector< std::vector<F> > sources;
|
||||||
|
#endif
|
||||||
std::vector< Slice<F> > slices;
|
std::vector< Slice<F> > slices;
|
||||||
typename Slice<F>::Name name;
|
typename Slice<F>::Name name;
|
||||||
const std::vector<typename Slice<F>::Type> sliceTypes;
|
const std::vector<typename Slice<F>::Type> sliceTypes;
|
||||||
|
|||||||
@ -52,43 +52,7 @@ struct TuplesDistribution {
|
|||||||
// Distributing the tuples:1 ends here
|
// Distributing the tuples:1 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Node%20information][Node information:1]]
|
// [[file:~/cuda/atrip/atrip.org::*Node%20information][Node information:1]]
|
||||||
std::vector<std::string> getNodeNames(MPI_Comm comm){
|
std::vector<std::string> getNodeNames(MPI_Comm comm);
|
||||||
int rank, np;
|
|
||||||
MPI_Comm_rank(comm, &rank);
|
|
||||||
MPI_Comm_size(comm, &np);
|
|
||||||
|
|
||||||
std::vector<std::string> nodeList(np);
|
|
||||||
char nodeName[MPI_MAX_PROCESSOR_NAME];
|
|
||||||
char *nodeNames = (char*)malloc(np * MPI_MAX_PROCESSOR_NAME);
|
|
||||||
std::vector<int> nameLengths(np)
|
|
||||||
, off(np)
|
|
||||||
;
|
|
||||||
int nameLength;
|
|
||||||
MPI_Get_processor_name(nodeName, &nameLength);
|
|
||||||
MPI_Allgather(&nameLength,
|
|
||||||
1,
|
|
||||||
MPI_INT,
|
|
||||||
nameLengths.data(),
|
|
||||||
1,
|
|
||||||
MPI_INT,
|
|
||||||
comm);
|
|
||||||
for (int i(1); i < np; i++)
|
|
||||||
off[i] = off[i-1] + nameLengths[i-1];
|
|
||||||
MPI_Allgatherv(nodeName,
|
|
||||||
nameLengths[rank],
|
|
||||||
MPI_BYTE,
|
|
||||||
nodeNames,
|
|
||||||
nameLengths.data(),
|
|
||||||
off.data(),
|
|
||||||
MPI_BYTE,
|
|
||||||
comm);
|
|
||||||
for (int i(0); i < np; i++) {
|
|
||||||
std::string const s(&nodeNames[off[i]], nameLengths[i]);
|
|
||||||
nodeList[i] = s;
|
|
||||||
}
|
|
||||||
std::free(nodeNames);
|
|
||||||
return nodeList;
|
|
||||||
}
|
|
||||||
// Node information:1 ends here
|
// Node information:1 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Node%20information][Node information:2]]
|
// [[file:~/cuda/atrip/atrip.org::*Node%20information][Node information:2]]
|
||||||
@ -100,118 +64,28 @@ struct RankInfo {
|
|||||||
const size_t ranksPerNode;
|
const size_t ranksPerNode;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <typename A>
|
|
||||||
A unique(A const &xs) {
|
|
||||||
auto result = xs;
|
|
||||||
std::sort(std::begin(result), std::end(result));
|
|
||||||
auto const& last = std::unique(std::begin(result), std::end(result));
|
|
||||||
result.erase(last, std::end(result));
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<RankInfo>
|
std::vector<RankInfo>
|
||||||
getNodeInfos(std::vector<string> const& nodeNames) {
|
getNodeInfos(std::vector<string> const& nodeNames);
|
||||||
std::vector<RankInfo> result;
|
|
||||||
auto const uniqueNames = unique(nodeNames);
|
|
||||||
auto const index = [&uniqueNames](std::string const& s) {
|
|
||||||
auto const& it = std::find(uniqueNames.begin(), uniqueNames.end(), s);
|
|
||||||
return std::distance(uniqueNames.begin(), it);
|
|
||||||
};
|
|
||||||
std::vector<size_t> localRanks(uniqueNames.size(), 0);
|
|
||||||
size_t globalRank = 0;
|
|
||||||
for (auto const& name: nodeNames) {
|
|
||||||
const size_t nodeId = index(name);
|
|
||||||
result.push_back({name,
|
|
||||||
nodeId,
|
|
||||||
globalRank++,
|
|
||||||
localRanks[nodeId]++,
|
|
||||||
(size_t)
|
|
||||||
std::count(nodeNames.begin(),
|
|
||||||
nodeNames.end(),
|
|
||||||
name)
|
|
||||||
});
|
|
||||||
}
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
|
|
||||||
struct ClusterInfo {
|
struct ClusterInfo {
|
||||||
const size_t nNodes, np, ranksPerNode;
|
const size_t nNodes, np, ranksPerNode;
|
||||||
const std::vector<RankInfo> rankInfos;
|
const std::vector<RankInfo> rankInfos;
|
||||||
};
|
};
|
||||||
|
|
||||||
ClusterInfo
|
ClusterInfo getClusterInfo(MPI_Comm comm);
|
||||||
getClusterInfo(MPI_Comm comm) {
|
|
||||||
auto const names = getNodeNames(comm);
|
|
||||||
auto const rankInfos = getNodeInfos(names);
|
|
||||||
|
|
||||||
return ClusterInfo {
|
|
||||||
unique(names).size(),
|
|
||||||
names.size(),
|
|
||||||
rankInfos[0].ranksPerNode,
|
|
||||||
rankInfos
|
|
||||||
};
|
|
||||||
|
|
||||||
}
|
|
||||||
// Node information:2 ends here
|
// Node information:2 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Naive%20list][Naive list:1]]
|
// [[file:~/cuda/atrip/atrip.org::*Naive%20list][Naive list:1]]
|
||||||
ABCTuples getTuplesList(size_t Nv, size_t rank, size_t np) {
|
ABCTuples getTuplesList(size_t Nv, size_t rank, size_t np);
|
||||||
|
|
||||||
const size_t
|
|
||||||
// total number of tuples for the problem
|
|
||||||
n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv
|
|
||||||
|
|
||||||
// all ranks should have the same number of tuples_per_rank
|
|
||||||
, tuples_per_rank = n / np + size_t(n % np != 0)
|
|
||||||
|
|
||||||
// start index for the global tuples list
|
|
||||||
, start = tuples_per_rank * rank
|
|
||||||
|
|
||||||
// end index for the global tuples list
|
|
||||||
, end = tuples_per_rank * (rank + 1)
|
|
||||||
;
|
|
||||||
|
|
||||||
LOG(1,"Atrip") << "tuples_per_rank = " << tuples_per_rank << "\n";
|
|
||||||
WITH_RANK << "start, end = " << start << ", " << end << "\n";
|
|
||||||
ABCTuples result(tuples_per_rank, FAKE_TUPLE);
|
|
||||||
|
|
||||||
for (size_t a(0), r(0), g(0); a < Nv; a++)
|
|
||||||
for (size_t b(a); b < Nv; b++)
|
|
||||||
for (size_t c(b); c < Nv; c++){
|
|
||||||
if ( a == b && b == c ) continue;
|
|
||||||
if ( start <= g && g < end) result[r++] = {a, b, c};
|
|
||||||
g++;
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
|
|
||||||
}
|
|
||||||
// Naive list:1 ends here
|
// Naive list:1 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Naive%20list][Naive list:2]]
|
// [[file:~/cuda/atrip/atrip.org::*Naive%20list][Naive list:2]]
|
||||||
ABCTuples getAllTuplesList(const size_t Nv) {
|
ABCTuples getAllTuplesList(const size_t Nv);
|
||||||
const size_t n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv;
|
|
||||||
ABCTuples result(n);
|
|
||||||
|
|
||||||
for (size_t a(0), u(0); a < Nv; a++)
|
|
||||||
for (size_t b(a); b < Nv; b++)
|
|
||||||
for (size_t c(b); c < Nv; c++){
|
|
||||||
if ( a == b && b == c ) continue;
|
|
||||||
result[u++] = {a, b, c};
|
|
||||||
}
|
|
||||||
|
|
||||||
return result;
|
|
||||||
}
|
|
||||||
// Naive list:2 ends here
|
// Naive list:2 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Naive%20list][Naive list:3]]
|
// [[file:~/cuda/atrip/atrip.org::*Naive%20list][Naive list:3]]
|
||||||
struct NaiveDistribution : public TuplesDistribution {
|
struct NaiveDistribution : public TuplesDistribution {
|
||||||
ABCTuples getTuples(size_t Nv, MPI_Comm universe) override {
|
ABCTuples getTuples(size_t Nv, MPI_Comm universe) override;
|
||||||
int rank, np;
|
|
||||||
MPI_Comm_rank(universe, &rank);
|
|
||||||
MPI_Comm_size(universe, &np);
|
|
||||||
return getTuplesList(Nv, (size_t)rank, (size_t)np);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
// Naive list:3 ends here
|
// Naive list:3 ends here
|
||||||
|
|
||||||
@ -224,19 +98,12 @@ namespace group_and_sort {
|
|||||||
// Right now we distribute the slices in a round robin fashion
|
// Right now we distribute the slices in a round robin fashion
|
||||||
// over the different nodes (NOTE: not mpi ranks but nodes)
|
// over the different nodes (NOTE: not mpi ranks but nodes)
|
||||||
inline
|
inline
|
||||||
size_t isOnNode(size_t tuple, size_t nNodes) { return tuple % nNodes; }
|
size_t isOnNode(size_t tuple, size_t nNodes);
|
||||||
|
|
||||||
|
|
||||||
// return the node (or all nodes) where the elements of this
|
// return the node (or all nodes) where the elements of this
|
||||||
// tuple are located
|
// tuple are located
|
||||||
std::vector<size_t> getTupleNodes(ABCTuple const& t, size_t nNodes) {
|
std::vector<size_t> getTupleNodes(ABCTuple const& t, size_t nNodes);
|
||||||
std::vector<size_t>
|
|
||||||
nTuple = { isOnNode(t[0], nNodes)
|
|
||||||
, isOnNode(t[1], nNodes)
|
|
||||||
, isOnNode(t[2], nNodes)
|
|
||||||
};
|
|
||||||
return unique(nTuple);
|
|
||||||
}
|
|
||||||
|
|
||||||
struct Info {
|
struct Info {
|
||||||
size_t nNodes;
|
size_t nNodes;
|
||||||
@ -245,302 +112,16 @@ struct Info {
|
|||||||
// Utils:1 ends here
|
// Utils:1 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Distribution][Distribution:1]]
|
// [[file:~/cuda/atrip/atrip.org::*Distribution][Distribution:1]]
|
||||||
ABCTuples specialDistribution(Info const& info, ABCTuples const& allTuples) {
|
ABCTuples specialDistribution(Info const& info, ABCTuples const& allTuples);
|
||||||
|
|
||||||
ABCTuples nodeTuples;
|
|
||||||
size_t const nNodes(info.nNodes);
|
|
||||||
|
|
||||||
std::vector<ABCTuples>
|
|
||||||
container1d(nNodes)
|
|
||||||
, container2d(nNodes * nNodes)
|
|
||||||
, container3d(nNodes * nNodes * nNodes)
|
|
||||||
;
|
|
||||||
|
|
||||||
WITH_DBG if (info.nodeId == 0)
|
|
||||||
std::cout << "\tGoing through all "
|
|
||||||
<< allTuples.size()
|
|
||||||
<< " tuples in "
|
|
||||||
<< nNodes
|
|
||||||
<< " nodes\n";
|
|
||||||
|
|
||||||
// build container-n-d's
|
|
||||||
for (auto const& t: allTuples) {
|
|
||||||
// one which node(s) are the tuple elements located...
|
|
||||||
// put them into the right container
|
|
||||||
auto const _nodes = getTupleNodes(t, nNodes);
|
|
||||||
|
|
||||||
switch (_nodes.size()) {
|
|
||||||
case 1:
|
|
||||||
container1d[_nodes[0]].push_back(t);
|
|
||||||
break;
|
|
||||||
case 2:
|
|
||||||
container2d[ _nodes[0]
|
|
||||||
+ _nodes[1] * nNodes
|
|
||||||
].push_back(t);
|
|
||||||
break;
|
|
||||||
case 3:
|
|
||||||
container3d[ _nodes[0]
|
|
||||||
+ _nodes[1] * nNodes
|
|
||||||
+ _nodes[2] * nNodes * nNodes
|
|
||||||
].push_back(t);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
WITH_DBG if (info.nodeId == 0)
|
|
||||||
std::cout << "\tBuilding 1-d containers\n";
|
|
||||||
// DISTRIBUTE 1-d containers
|
|
||||||
// every tuple which is only located at one node belongs to this node
|
|
||||||
{
|
|
||||||
auto const& _tuples = container1d[info.nodeId];
|
|
||||||
nodeTuples.resize(_tuples.size(), INVALID_TUPLE);
|
|
||||||
std::copy(_tuples.begin(), _tuples.end(), nodeTuples.begin());
|
|
||||||
}
|
|
||||||
|
|
||||||
WITH_DBG if (info.nodeId == 0)
|
|
||||||
std::cout << "\tBuilding 2-d containers\n";
|
|
||||||
// DISTRIBUTE 2-d containers
|
|
||||||
//the tuples which are located at two nodes are half/half given to these nodes
|
|
||||||
for (size_t yx = 0; yx < container2d.size(); yx++) {
|
|
||||||
|
|
||||||
auto const& _tuples = container2d[yx];
|
|
||||||
const
|
|
||||||
size_t idx = yx % nNodes
|
|
||||||
// remeber: yx = idy * nNodes + idx
|
|
||||||
, idy = yx / nNodes
|
|
||||||
, n_half = _tuples.size() / 2
|
|
||||||
, size = nodeTuples.size()
|
|
||||||
;
|
|
||||||
|
|
||||||
size_t nbeg, nend;
|
|
||||||
if (info.nodeId == idx) {
|
|
||||||
nbeg = 0 * n_half;
|
|
||||||
nend = n_half;
|
|
||||||
} else if (info.nodeId == idy) {
|
|
||||||
nbeg = 1 * n_half;
|
|
||||||
nend = _tuples.size();
|
|
||||||
} else {
|
|
||||||
// either idx or idy is my node
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t const nextra = nend - nbeg;
|
|
||||||
nodeTuples.resize(size + nextra, INVALID_TUPLE);
|
|
||||||
std::copy(_tuples.begin() + nbeg,
|
|
||||||
_tuples.begin() + nend,
|
|
||||||
nodeTuples.begin() + size);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
WITH_DBG if (info.nodeId == 0)
|
|
||||||
std::cout << "\tBuilding 3-d containers\n";
|
|
||||||
// DISTRIBUTE 3-d containers
|
|
||||||
for (size_t zyx = 0; zyx < container3d.size(); zyx++) {
|
|
||||||
auto const& _tuples = container3d[zyx];
|
|
||||||
|
|
||||||
const
|
|
||||||
size_t idx = zyx % nNodes
|
|
||||||
, idy = (zyx / nNodes) % nNodes
|
|
||||||
// remember: zyx = idx + idy * nNodes + idz * nNodes^2
|
|
||||||
, idz = zyx / nNodes / nNodes
|
|
||||||
, n_third = _tuples.size() / 3
|
|
||||||
, size = nodeTuples.size()
|
|
||||||
;
|
|
||||||
|
|
||||||
size_t nbeg, nend;
|
|
||||||
if (info.nodeId == idx) {
|
|
||||||
nbeg = 0 * n_third;
|
|
||||||
nend = 1 * n_third;
|
|
||||||
} else if (info.nodeId == idy) {
|
|
||||||
nbeg = 1 * n_third;
|
|
||||||
nend = 2 * n_third;
|
|
||||||
} else if (info.nodeId == idz) {
|
|
||||||
nbeg = 2 * n_third;
|
|
||||||
nend = _tuples.size();
|
|
||||||
} else {
|
|
||||||
// either idx or idy or idz is my node
|
|
||||||
continue;
|
|
||||||
}
|
|
||||||
|
|
||||||
size_t const nextra = nend - nbeg;
|
|
||||||
nodeTuples.resize(size + nextra, INVALID_TUPLE);
|
|
||||||
std::copy(_tuples.begin() + nbeg,
|
|
||||||
_tuples.begin() + nend,
|
|
||||||
nodeTuples.begin() + size);
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
WITH_DBG if (info.nodeId == 0) std::cout << "\tswapping tuples...\n";
|
|
||||||
/*
|
|
||||||
* sort part of group-and-sort algorithm
|
|
||||||
* every tuple on a given node is sorted in a way that
|
|
||||||
* the 'home elements' are the fastest index.
|
|
||||||
* 1:yyy 2:yyn(x) 3:yny(x) 4:ynn(x) 5:nyy 6:nyn(x) 7:nny 8:nnn
|
|
||||||
*/
|
|
||||||
for (auto &nt: nodeTuples){
|
|
||||||
if ( isOnNode(nt[0], nNodes) == info.nodeId ){ // 1234
|
|
||||||
if ( isOnNode(nt[2], nNodes) != info.nodeId ){ // 24
|
|
||||||
size_t const x(nt[0]);
|
|
||||||
nt[0] = nt[2]; // switch first and last
|
|
||||||
nt[2] = x;
|
|
||||||
}
|
|
||||||
else if ( isOnNode(nt[1], nNodes) != info.nodeId){ // 3
|
|
||||||
size_t const x(nt[0]);
|
|
||||||
nt[0] = nt[1]; // switch first two
|
|
||||||
nt[1] = x;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if ( isOnNode(nt[1], nNodes) == info.nodeId // 56
|
|
||||||
&& isOnNode(nt[2], nNodes) != info.nodeId
|
|
||||||
) { // 6
|
|
||||||
size_t const x(nt[1]);
|
|
||||||
nt[1] = nt[2]; // switch last two
|
|
||||||
nt[2] = x;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
WITH_DBG if (info.nodeId == 0) std::cout << "\tsorting list of tuples...\n";
|
|
||||||
//now we sort the list of tuples
|
|
||||||
std::sort(nodeTuples.begin(), nodeTuples.end());
|
|
||||||
|
|
||||||
WITH_DBG if (info.nodeId == 0) std::cout << "\trestoring tuples...\n";
|
|
||||||
// we bring the tuples abc back in the order a<b<c
|
|
||||||
for (auto &t: nodeTuples) std::sort(t.begin(), t.end());
|
|
||||||
|
|
||||||
#if ATRIP_DEBUG > 1
|
|
||||||
WITH_DBG if (info.nodeId == 0)
|
|
||||||
std::cout << "checking for validity of " << nodeTuples.size() << std::endl;
|
|
||||||
const bool anyInvalid
|
|
||||||
= std::any_of(nodeTuples.begin(),
|
|
||||||
nodeTuples.end(),
|
|
||||||
[](ABCTuple const& t) { return t == INVALID_TUPLE; });
|
|
||||||
if (anyInvalid) throw "Some tuple is invalid in group-and-sort algorithm";
|
|
||||||
#endif
|
|
||||||
|
|
||||||
WITH_DBG if (info.nodeId == 0) std::cout << "\treturning tuples...\n";
|
|
||||||
return nodeTuples;
|
|
||||||
|
|
||||||
}
|
|
||||||
// Distribution:1 ends here
|
// Distribution:1 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Main][Main:1]]
|
// [[file:~/cuda/atrip/atrip.org::*Main][Main:1]]
|
||||||
std::vector<ABCTuple> main(MPI_Comm universe, size_t Nv) {
|
std::vector<ABCTuple> main(MPI_Comm universe, size_t Nv);
|
||||||
|
|
||||||
int rank, np;
|
|
||||||
MPI_Comm_rank(universe, &rank);
|
|
||||||
MPI_Comm_size(universe, &np);
|
|
||||||
|
|
||||||
std::vector<ABCTuple> result;
|
|
||||||
|
|
||||||
auto const nodeNames(getNodeNames(universe));
|
|
||||||
size_t const nNodes = unique(nodeNames).size();
|
|
||||||
auto const nodeInfos = getNodeInfos(nodeNames);
|
|
||||||
|
|
||||||
// We want to construct a communicator which only contains of one
|
|
||||||
// element per node
|
|
||||||
bool const computeDistribution
|
|
||||||
= nodeInfos[rank].localRank == 0;
|
|
||||||
|
|
||||||
std::vector<ABCTuple>
|
|
||||||
nodeTuples
|
|
||||||
= computeDistribution
|
|
||||||
? specialDistribution(Info{nNodes, nodeInfos[rank].nodeId},
|
|
||||||
getAllTuplesList(Nv))
|
|
||||||
: std::vector<ABCTuple>()
|
|
||||||
;
|
|
||||||
|
|
||||||
LOG(1,"Atrip") << "got nodeTuples\n";
|
|
||||||
|
|
||||||
// now we have to send the data from **one** rank on each node
|
|
||||||
// to all others ranks of this node
|
|
||||||
const
|
|
||||||
int color = nodeInfos[rank].nodeId
|
|
||||||
, key = nodeInfos[rank].localRank
|
|
||||||
;
|
|
||||||
|
|
||||||
|
|
||||||
MPI_Comm INTRA_COMM;
|
|
||||||
MPI_Comm_split(universe, color, key, &INTRA_COMM);
|
|
||||||
// Main:1 ends here
|
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Main][Main:2]]
|
|
||||||
size_t const
|
|
||||||
tuplesPerRankLocal
|
|
||||||
= nodeTuples.size() / nodeInfos[rank].ranksPerNode
|
|
||||||
+ size_t(nodeTuples.size() % nodeInfos[rank].ranksPerNode != 0)
|
|
||||||
;
|
|
||||||
|
|
||||||
size_t tuplesPerRankGlobal;
|
|
||||||
|
|
||||||
MPI_Reduce(&tuplesPerRankLocal,
|
|
||||||
&tuplesPerRankGlobal,
|
|
||||||
1,
|
|
||||||
MPI_UINT64_T,
|
|
||||||
MPI_MAX,
|
|
||||||
0,
|
|
||||||
universe);
|
|
||||||
|
|
||||||
MPI_Bcast(&tuplesPerRankGlobal,
|
|
||||||
1,
|
|
||||||
MPI_UINT64_T,
|
|
||||||
0,
|
|
||||||
universe);
|
|
||||||
|
|
||||||
LOG(1,"Atrip") << "Tuples per rank: " << tuplesPerRankGlobal << "\n";
|
|
||||||
LOG(1,"Atrip") << "ranks per node " << nodeInfos[rank].ranksPerNode << "\n";
|
|
||||||
LOG(1,"Atrip") << "#nodes " << nNodes << "\n";
|
|
||||||
// Main:2 ends here
|
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Main][Main:3]]
|
|
||||||
size_t const totalTuples
|
|
||||||
= tuplesPerRankGlobal * nodeInfos[rank].ranksPerNode;
|
|
||||||
|
|
||||||
if (computeDistribution) {
|
|
||||||
// pad with FAKE_TUPLEs
|
|
||||||
nodeTuples.insert(nodeTuples.end(),
|
|
||||||
totalTuples - nodeTuples.size(),
|
|
||||||
FAKE_TUPLE);
|
|
||||||
}
|
|
||||||
// Main:3 ends here
|
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Main][Main:4]]
|
|
||||||
{
|
|
||||||
// construct mpi type for abctuple
|
|
||||||
MPI_Datatype MPI_ABCTUPLE;
|
|
||||||
MPI_Type_vector(nodeTuples[0].size(), 1, 1, MPI_UINT64_T, &MPI_ABCTUPLE);
|
|
||||||
MPI_Type_commit(&MPI_ABCTUPLE);
|
|
||||||
|
|
||||||
LOG(1,"Atrip") << "scattering tuples \n";
|
|
||||||
|
|
||||||
result.resize(tuplesPerRankGlobal);
|
|
||||||
MPI_Scatter(nodeTuples.data(),
|
|
||||||
tuplesPerRankGlobal,
|
|
||||||
MPI_ABCTUPLE,
|
|
||||||
result.data(),
|
|
||||||
tuplesPerRankGlobal,
|
|
||||||
MPI_ABCTUPLE,
|
|
||||||
0,
|
|
||||||
INTRA_COMM);
|
|
||||||
|
|
||||||
MPI_Type_free(&MPI_ABCTUPLE);
|
|
||||||
|
|
||||||
}
|
|
||||||
// Main:4 ends here
|
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Main][Main:5]]
|
|
||||||
return result;
|
|
||||||
|
|
||||||
}
|
|
||||||
// Main:5 ends here
|
// Main:5 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Interface][Interface:1]]
|
// [[file:~/cuda/atrip/atrip.org::*Interface][Interface:1]]
|
||||||
struct Distribution : public TuplesDistribution {
|
struct Distribution : public TuplesDistribution {
|
||||||
ABCTuples getTuples(size_t Nv, MPI_Comm universe) override {
|
ABCTuples getTuples(size_t Nv, MPI_Comm universe) override;
|
||||||
return main(universe, Nv);
|
|
||||||
}
|
|
||||||
};
|
};
|
||||||
// Interface:1 ends here
|
// Interface:1 ends here
|
||||||
|
|
||||||
|
|||||||
@ -19,8 +19,14 @@
|
|||||||
namespace atrip {
|
namespace atrip {
|
||||||
|
|
||||||
template <typename F=double>
|
template <typename F=double>
|
||||||
|
static
|
||||||
void sliceIntoVector
|
void sliceIntoVector
|
||||||
( std::vector<F> &v
|
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||||
|
( DataPtr<F> &source
|
||||||
|
#else
|
||||||
|
( std::vector<F> &source
|
||||||
|
#endif
|
||||||
|
, size_t sliceSize
|
||||||
, CTF::Tensor<F> &toSlice
|
, CTF::Tensor<F> &toSlice
|
||||||
, std::vector<int64_t> const low
|
, std::vector<int64_t> const low
|
||||||
, std::vector<int64_t> const up
|
, std::vector<int64_t> const up
|
||||||
@ -44,18 +50,30 @@ namespace atrip {
|
|||||||
<< "\n";
|
<< "\n";
|
||||||
|
|
||||||
#ifndef ATRIP_DONT_SLICE
|
#ifndef ATRIP_DONT_SLICE
|
||||||
toSlice.slice( toSlice_.low.data()
|
toSlice.slice(toSlice_.low.data(),
|
||||||
, toSlice_.up.data()
|
toSlice_.up.data(),
|
||||||
, 0.0
|
0.0,
|
||||||
, origin
|
origin,
|
||||||
, origin_.low.data()
|
origin_.low.data(),
|
||||||
, origin_.up.data()
|
origin_.up.data(),
|
||||||
, 1.0);
|
1.0);
|
||||||
memcpy(v.data(), toSlice.data, sizeof(F) * v.size());
|
|
||||||
#else
|
#else
|
||||||
# pragma message("WARNING: COMPILING WITHOUT SLICING THE TENSORS")
|
# pragma message("WARNING: COMPILING WITHOUT SLICING THE TENSORS")
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||||
|
WITH_CHRONO("cuda:sources",
|
||||||
|
_CHECK_CUDA_SUCCESS("copying sources data to device",
|
||||||
|
cuMemcpyHtoD(source,
|
||||||
|
toSlice.data,
|
||||||
|
sliceSize));
|
||||||
|
)
|
||||||
|
#else
|
||||||
|
memcpy(source.data(),
|
||||||
|
toSlice.data,
|
||||||
|
sizeof(F) * sliceSize);
|
||||||
|
#endif
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -80,16 +98,15 @@ namespace atrip {
|
|||||||
|
|
||||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override
|
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override
|
||||||
{
|
{
|
||||||
const int Nv = this->sliceLength[0]
|
|
||||||
, No = this->sliceLength[1]
|
|
||||||
, a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it});
|
|
||||||
;
|
|
||||||
|
|
||||||
|
const int
|
||||||
|
Nv = this->sliceLength[0],
|
||||||
|
No = this->sliceLength[1],
|
||||||
|
a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it});
|
||||||
|
|
||||||
sliceIntoVector<F>( this->sources[it]
|
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||||
, to, {0, 0, 0}, {Nv, No, No}
|
to, {0, 0, 0}, {Nv, No, No},
|
||||||
, from, {a, 0, 0, 0}, {a+1, Nv, No, No}
|
from, {a, 0, 0, 0}, {a+1, Nv, No, No});
|
||||||
);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -118,14 +135,13 @@ namespace atrip {
|
|||||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override
|
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override
|
||||||
{
|
{
|
||||||
|
|
||||||
const int No = this->sliceLength[0]
|
const int
|
||||||
, a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
|
No = this->sliceLength[0],
|
||||||
;
|
a = this->rankMap.find({static_cast<size_t>(Atrip::rank), it});
|
||||||
|
|
||||||
sliceIntoVector<F>( this->sources[it]
|
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||||
, to, {0, 0, 0}, {No, No, No}
|
to, {0, 0, 0}, {No, No, No},
|
||||||
, from, {0, 0, 0, a}, {No, No, No, a+1}
|
from, {0, 0, 0, a}, {No, No, No, a+1});
|
||||||
);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
@ -153,18 +169,17 @@ namespace atrip {
|
|||||||
|
|
||||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
|
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
|
||||||
|
|
||||||
const int Nv = this->sliceLength[0]
|
const int
|
||||||
, No = this->sliceLength[1]
|
Nv = this->sliceLength[0],
|
||||||
, el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
|
No = this->sliceLength[1],
|
||||||
, a = el % Nv
|
el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it}),
|
||||||
, b = el / Nv
|
a = el % Nv,
|
||||||
;
|
b = el / Nv;
|
||||||
|
|
||||||
|
|
||||||
sliceIntoVector<F>( this->sources[it]
|
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||||
, to, {0, 0}, {Nv, No}
|
to, {0, 0}, {Nv, No},
|
||||||
, from, {a, b, 0, 0}, {a+1, b+1, Nv, No}
|
from, {a, b, 0, 0}, {a+1, b+1, Nv, No});
|
||||||
);
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -191,17 +206,17 @@ namespace atrip {
|
|||||||
|
|
||||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
|
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
|
||||||
|
|
||||||
const int Nv = from.lens[0]
|
const int
|
||||||
, No = this->sliceLength[1]
|
Nv = from.lens[0],
|
||||||
, el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
|
No = this->sliceLength[1],
|
||||||
, a = el % Nv
|
el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it}),
|
||||||
, b = el / Nv
|
a = el % Nv,
|
||||||
;
|
b = el / Nv;
|
||||||
|
|
||||||
sliceIntoVector<F>( this->sources[it]
|
|
||||||
, to, {0, 0}, {No, No}
|
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||||
, from, {a, b, 0, 0}, {a+1, b+1, No, No}
|
to, {0, 0}, {No, No},
|
||||||
);
|
from, {a, b, 0, 0}, {a+1, b+1, No, No});
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
@ -231,17 +246,16 @@ namespace atrip {
|
|||||||
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
|
void sliceIntoBuffer(size_t it, CTF::Tensor<F> &to, CTF::Tensor<F> const& from) override {
|
||||||
// TODO: maybe generalize this with ABHH
|
// TODO: maybe generalize this with ABHH
|
||||||
|
|
||||||
const int Nv = from.lens[0]
|
const int
|
||||||
, No = this->sliceLength[1]
|
Nv = from.lens[0],
|
||||||
, el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it})
|
No = this->sliceLength[1],
|
||||||
, a = el % Nv
|
el = this->rankMap.find({static_cast<size_t>(Atrip::rank), it}),
|
||||||
, b = el / Nv
|
a = el % Nv,
|
||||||
;
|
b = el / Nv;
|
||||||
|
|
||||||
sliceIntoVector<F>( this->sources[it]
|
sliceIntoVector<F>(this->sources[it], this->sliceSize,
|
||||||
, to, {0, 0}, {No, No}
|
to, {0, 0}, {No, No},
|
||||||
, from, {a, b, 0, 0}, {a+1, b+1, No, No}
|
from, {a, b, 0, 0}, {a+1, b+1, No, No});
|
||||||
);
|
|
||||||
|
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|||||||
228
misc/naive-tuples.lisp
Normal file
228
misc/naive-tuples.lisp
Normal file
@ -0,0 +1,228 @@
|
|||||||
|
#+quicklisp
|
||||||
|
(eval-when (:compile-toplevel :load-toplevel :execute)
|
||||||
|
(ql:quickload '(vgplot fiveam)))
|
||||||
|
|
||||||
|
(defpackage :naive-tuples
|
||||||
|
(:use :cl :vgplot))
|
||||||
|
(in-package :naive-tuples)
|
||||||
|
|
||||||
|
(defun tuples-atrip (nv)
|
||||||
|
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||||
|
(loop :for a :below nv
|
||||||
|
:append
|
||||||
|
(loop :for b :from a :below nv
|
||||||
|
:append
|
||||||
|
(loop :for c :from b :below nv
|
||||||
|
:unless (= a b c)
|
||||||
|
:collect (list a b c)))))
|
||||||
|
|
||||||
|
(defun tuples-half (nv)
|
||||||
|
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||||
|
(loop :for a :below nv
|
||||||
|
:append
|
||||||
|
(loop :for b :from a :below nv
|
||||||
|
:append
|
||||||
|
(loop :for c :from b :below nv
|
||||||
|
:collect (list a b c)))))
|
||||||
|
|
||||||
|
(defun tuples-all (nv)
|
||||||
|
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||||
|
(loop :for a :below nv
|
||||||
|
:append
|
||||||
|
(loop :for b :below nv
|
||||||
|
:append
|
||||||
|
(loop :for c :below nv
|
||||||
|
:collect (list a b c)))))
|
||||||
|
|
||||||
|
(defun tuples-all-nth (i nv)
|
||||||
|
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||||
|
(list (floor i (* nv nv))
|
||||||
|
(mod (floor i nv) nv)
|
||||||
|
(mod i nv)))
|
||||||
|
|
||||||
|
|
||||||
|
(defparameter tups (tuples-all 10))
|
||||||
|
|
||||||
|
(defun compare-all (l)
|
||||||
|
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||||
|
(let* ((tups (tuples-all l)))
|
||||||
|
(loop for i below (length tups)
|
||||||
|
do (let* ((good (nth i tups))
|
||||||
|
(bad (tuples-all-nth i l))
|
||||||
|
(eq? (equal good bad)))
|
||||||
|
(unless eq?
|
||||||
|
(print (list :|i| i
|
||||||
|
:good good
|
||||||
|
:bad bad)))))))
|
||||||
|
|
||||||
|
|
||||||
|
;; (defun a-half (i nv)
|
||||||
|
;; (let ((divisor t)
|
||||||
|
;; (j i)
|
||||||
|
;; (total-blk 0))
|
||||||
|
;; (loop :for a :below nv
|
||||||
|
;; :unless (eq divisor 0)
|
||||||
|
;; :do (let ((blk (a-block a nv)))
|
||||||
|
;; (multiple-value-bind (d r) (floor j blk)
|
||||||
|
;; (declare (ignore r))
|
||||||
|
;; (when (> d 0)
|
||||||
|
;; (incf total-blk blk))
|
||||||
|
;; (setq j (- j blk)
|
||||||
|
;; divisor d)))
|
||||||
|
;; :else
|
||||||
|
;; :return (values (- a 1)
|
||||||
|
;; i
|
||||||
|
;; total-blk))))
|
||||||
|
|
||||||
|
;; (defun b-half (i a nv a-block-sum)
|
||||||
|
;; "we have
|
||||||
|
;; \begin{equation}
|
||||||
|
;; i = \underbrace{B(a_0) +
|
||||||
|
;; \cdots +
|
||||||
|
;; B(a_{i-1})}_{\texttt{a-block-sum}}
|
||||||
|
;; + idx
|
||||||
|
;; \end{equation}
|
||||||
|
;; and with this we just have to divide.
|
||||||
|
;; "
|
||||||
|
;; (let ((bj (if (> a-block-sum 0)
|
||||||
|
;; (mod i a-block-sum)
|
||||||
|
;; i))
|
||||||
|
;; (total-blk 0))
|
||||||
|
;; (loop :for b :from a :below Nv
|
||||||
|
;; :with divisor = 1
|
||||||
|
;; :unless (eq divisor 0)
|
||||||
|
;; :do (let ((blk (+ (- nv a)
|
||||||
|
;; #|because|# 1)))
|
||||||
|
;; (incf total-blk blk)
|
||||||
|
;; (if (> blk 0)
|
||||||
|
;; (multiple-value-bind (d r) (floor bj blk)
|
||||||
|
;; (declare (ignore r))
|
||||||
|
;; (setq bj (- bj blk)
|
||||||
|
;; divisor d))
|
||||||
|
;; (setq divisor 0)))
|
||||||
|
;; :else
|
||||||
|
;; :return (values (- b 1)
|
||||||
|
;; bj
|
||||||
|
;; total-blk))))
|
||||||
|
|
||||||
|
(defun a-block (a nv)
|
||||||
|
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||||
|
(- (* (- nv 1) (- nv (- a 1)))
|
||||||
|
(- (floor (* (- nv 1) nv)
|
||||||
|
2)
|
||||||
|
(floor (* (- a 1) (- a 2))
|
||||||
|
2))))
|
||||||
|
|
||||||
|
(defun a-block-sum (|t| nv)
|
||||||
|
(macrolet ((ssum (n) `(floor (* ,n (+ ,n 1))
|
||||||
|
2))
|
||||||
|
(qsum (n) `(floor (* ,n
|
||||||
|
(+ ,n 1)
|
||||||
|
(+ 1 (* 2 ,n)))
|
||||||
|
6)))
|
||||||
|
(let ((nv-1 (- nv 1))
|
||||||
|
(t+1 (+ |t| 1)))
|
||||||
|
(+ (* t+1 nv-1 nv)
|
||||||
|
(* nv-1 t+1)
|
||||||
|
(- (* nv-1
|
||||||
|
(ssum |t|)))
|
||||||
|
(- (* t+1
|
||||||
|
(ssum nv-1)))
|
||||||
|
(floor (- (qsum |t|)
|
||||||
|
(* 3 (ssum |t|)))
|
||||||
|
2)
|
||||||
|
t+1))))
|
||||||
|
|
||||||
|
(defun get-half (i nv &key from block)
|
||||||
|
(let ((divisor 1)
|
||||||
|
(j i)
|
||||||
|
(total-blk 0))
|
||||||
|
(loop :for α :from from :below nv
|
||||||
|
:unless (eq divisor 0)
|
||||||
|
:do (let ((blk (funcall block α nv)))
|
||||||
|
(multiple-value-bind (d r) (floor j blk)
|
||||||
|
(declare (ignore r))
|
||||||
|
(when (> d 0)
|
||||||
|
(incf total-blk blk)
|
||||||
|
(setq j (- j blk)))
|
||||||
|
(setq divisor d)))
|
||||||
|
:else
|
||||||
|
:return (values (- α 1)
|
||||||
|
j
|
||||||
|
total-blk))))
|
||||||
|
|
||||||
|
(defun tuples-half-nth (i nv)
|
||||||
|
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||||
|
(flet ((bc-block (x %nv)
|
||||||
|
(+ 1 (- %nv x))))
|
||||||
|
(multiple-value-bind (a aj blks) (get-half i nv :from 0 :block #'a-block)
|
||||||
|
(declare (ignore blks))
|
||||||
|
(multiple-value-bind (b bj blks) (get-half aj nv
|
||||||
|
:from a
|
||||||
|
:block #'bc-block)
|
||||||
|
(declare (ignore blks))
|
||||||
|
(multiple-value-bind (c cj blks) (get-half bj nv
|
||||||
|
:from b
|
||||||
|
:block #'bc-block)
|
||||||
|
(declare (ignore cj blks))
|
||||||
|
(print (list :idxs aj bj cj))
|
||||||
|
(list a b c))))))
|
||||||
|
|
||||||
|
(defun a-block-atrip (a nv)
|
||||||
|
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||||
|
(- (a-block a nv) 1))
|
||||||
|
|
||||||
|
(defun a-block-sum-atrip (|t| nv)
|
||||||
|
(declare (optimize (speed 3) (safety 0) (debug 0)))
|
||||||
|
(- (a-block-sum |t| nv) (+ |t| 1)))
|
||||||
|
|
||||||
|
(defun b-block-sum-atrip (a |t| nv)
|
||||||
|
(- (* nv
|
||||||
|
(1+ (- |t| a)))
|
||||||
|
(floor (- (* |t| (1+ |t|))
|
||||||
|
(* a (- a 1)))
|
||||||
|
2)
|
||||||
|
1))
|
||||||
|
|
||||||
|
(defun nth-atrip (i nv)
|
||||||
|
(let ((sums (mapcar (lambda (s) (a-block-sum-atrip s nv))
|
||||||
|
(loop :for j :below nv :collect j))))
|
||||||
|
(multiple-value-bind (a ablk)
|
||||||
|
(loop :for sum :in sums
|
||||||
|
:with a = -1
|
||||||
|
:with base = 0
|
||||||
|
:do (incf a)
|
||||||
|
:if (eq (floor i sum) 0)
|
||||||
|
:return (values a base)
|
||||||
|
:else
|
||||||
|
:do (setq base sum))
|
||||||
|
(multiple-value-bind (b bblk)
|
||||||
|
(let ((sums (mapcar (lambda (s)
|
||||||
|
(+ ablk
|
||||||
|
#+nil(- nv s 1)
|
||||||
|
(b-block-sum-atrip a s nv)))
|
||||||
|
(loop :for b :from a :below nv
|
||||||
|
:collect b))))
|
||||||
|
(loop :for sum :in sums
|
||||||
|
:with b = (- a 1)
|
||||||
|
:with base = ablk
|
||||||
|
:do (incf b)
|
||||||
|
:if (< i sum)
|
||||||
|
:return (values b base)
|
||||||
|
:else
|
||||||
|
:do (progn
|
||||||
|
;; (print sums)
|
||||||
|
(setq base sum))))
|
||||||
|
(list a b (+ b
|
||||||
|
(- i bblk)
|
||||||
|
(if (eq a b)
|
||||||
|
1
|
||||||
|
0)))))))
|
||||||
|
|
||||||
|
(defun atrip-test (i nv)
|
||||||
|
(let ((tuples (tuples-atrip nv))
|
||||||
|
(cheaper (nth-atrip i nv)))
|
||||||
|
(values (nth i tuples)
|
||||||
|
cheaper
|
||||||
|
(print (equal (nth i tuples)
|
||||||
|
cheaper)))))
|
||||||
15
shell.nix
15
shell.nix
@ -12,6 +12,7 @@ let
|
|||||||
};
|
};
|
||||||
|
|
||||||
openblas = import ./etc/nix/openblas.nix { inherit pkgs; };
|
openblas = import ./etc/nix/openblas.nix { inherit pkgs; };
|
||||||
|
vendor = import ./etc/nix/vendor-shell.nix;
|
||||||
|
|
||||||
mkl-pkg = import ./etc/nix/mkl.nix { pkgs = unfree-pkgs; };
|
mkl-pkg = import ./etc/nix/mkl.nix { pkgs = unfree-pkgs; };
|
||||||
cuda-pkg = if cuda then (import ./cuda.nix { pkgs = unfree-pkgs; }) else {};
|
cuda-pkg = if cuda then (import ./cuda.nix { pkgs = unfree-pkgs; }) else {};
|
||||||
@ -57,14 +58,15 @@ pkgs.mkShell rec {
|
|||||||
buildInputs
|
buildInputs
|
||||||
= with pkgs; [
|
= with pkgs; [
|
||||||
|
|
||||||
|
gdb
|
||||||
coreutils
|
coreutils
|
||||||
git vim
|
git
|
||||||
|
vim
|
||||||
|
|
||||||
openmpi
|
openmpi
|
||||||
llvmPackages.openmp
|
llvmPackages.openmp
|
||||||
|
|
||||||
binutils
|
binutils
|
||||||
emacs
|
|
||||||
gfortran
|
gfortran
|
||||||
|
|
||||||
gnumake
|
gnumake
|
||||||
@ -84,6 +86,15 @@ pkgs.mkShell rec {
|
|||||||
shellHook
|
shellHook
|
||||||
=
|
=
|
||||||
''
|
''
|
||||||
|
|
||||||
|
${vendor.src}
|
||||||
|
|
||||||
|
${vendor.cpath "${pkgs.openmpi.out}/include"}
|
||||||
|
${vendor.cpath "${openblas.pkg.dev}/include"}
|
||||||
|
|
||||||
|
${vendor.lib "${pkgs.openmpi.out}/lib"}
|
||||||
|
${vendor.lib "${openblas.pkg.out}/lib"}
|
||||||
|
|
||||||
export OMPI_CXX=${CXX}
|
export OMPI_CXX=${CXX}
|
||||||
export OMPI_CC=${CC}
|
export OMPI_CC=${CC}
|
||||||
CXX=${CXX}
|
CXX=${CXX}
|
||||||
|
|||||||
@ -7,7 +7,7 @@ AM_CPPFLAGS = $(CTF_CPPFLAGS)
|
|||||||
lib_LIBRARIES = libatrip.a
|
lib_LIBRARIES = libatrip.a
|
||||||
|
|
||||||
libatrip_a_CPPFLAGS = -I$(top_srcdir)/include/
|
libatrip_a_CPPFLAGS = -I$(top_srcdir)/include/
|
||||||
libatrip_a_SOURCES = ./atrip/Blas.cxx
|
libatrip_a_SOURCES = ./atrip/Blas.cxx ./atrip/Tuples.cxx ./atrip/DatabaseCommunicator.cxx
|
||||||
NVCC_FILES = ./atrip/Equations.cxx ./atrip/Complex.cxx ./atrip/Atrip.cxx
|
NVCC_FILES = ./atrip/Equations.cxx ./atrip/Complex.cxx ./atrip/Atrip.cxx
|
||||||
|
|
||||||
if WITH_CUDA
|
if WITH_CUDA
|
||||||
|
|||||||
@ -21,6 +21,7 @@
|
|||||||
#include <atrip/SliceUnion.hpp>
|
#include <atrip/SliceUnion.hpp>
|
||||||
#include <atrip/Unions.hpp>
|
#include <atrip/Unions.hpp>
|
||||||
#include <atrip/Checkpoint.hpp>
|
#include <atrip/Checkpoint.hpp>
|
||||||
|
#include <atrip/DatabaseCommunicator.hpp>
|
||||||
|
|
||||||
using namespace atrip;
|
using namespace atrip;
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
@ -201,7 +202,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
_CHECK_CUDA_SUCCESS("Zijk",
|
_CHECK_CUDA_SUCCESS("Zijk",
|
||||||
cuMemAlloc(&Zijk, sizeof(F) * No * No * No));
|
cuMemAlloc(&Zijk, sizeof(F) * No * No * No));
|
||||||
#else
|
#else
|
||||||
std::vector<F> &Tai = _Tai, &epsi = _epsi, &epsa = _epsa;
|
DataPtr<F> Tai = _Tai.data(), epsi = _epsi.data(), epsa = _epsa.data();
|
||||||
Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
|
Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
|
||||||
Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
|
Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
|
||||||
#endif
|
#endif
|
||||||
@ -257,6 +258,25 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
// all tensors
|
// all tensors
|
||||||
std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh};
|
std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh};
|
||||||
|
|
||||||
|
#ifdef HAVE_CUDA
|
||||||
|
// TODO: free buffers
|
||||||
|
DataFieldType<F>* _t_buffer;
|
||||||
|
DataFieldType<F>* _vhhh;
|
||||||
|
WITH_CHRONO("double:cuda:alloc",
|
||||||
|
_CHECK_CUDA_SUCCESS("Allocating _t_buffer",
|
||||||
|
cuMemAlloc((CUdeviceptr*)&_t_buffer,
|
||||||
|
No*No*No * sizeof(DataFieldType<F>)));
|
||||||
|
_CHECK_CUDA_SUCCESS("Allocating _vhhh",
|
||||||
|
cuMemAlloc((CUdeviceptr*)&_vhhh,
|
||||||
|
No*No*No * sizeof(DataFieldType<F>)));
|
||||||
|
)
|
||||||
|
//const size_t
|
||||||
|
// bs = Atrip::kernelDimensions.ooo.blocks,
|
||||||
|
//ths = Atrip::kernelDimensions.ooo.threads;
|
||||||
|
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
|
||||||
|
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
|
||||||
|
#endif
|
||||||
|
|
||||||
// get tuples for the current rank
|
// get tuples for the current rank
|
||||||
TuplesDistribution *distribution;
|
TuplesDistribution *distribution;
|
||||||
|
|
||||||
@ -299,9 +319,23 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
using Database = typename Slice<F>::Database;
|
using Database = typename Slice<F>::Database;
|
||||||
auto communicateDatabase
|
auto communicateDatabase
|
||||||
= [ &unions
|
= [ &unions
|
||||||
|
, &in
|
||||||
|
, Nv
|
||||||
, np
|
, np
|
||||||
] (ABCTuple const& abc, MPI_Comm const& c) -> Database {
|
] (ABCTuple const& abc, MPI_Comm const& c, size_t iteration) -> Database {
|
||||||
|
|
||||||
|
if (in.tuplesDistribution == Atrip::Input<F>::TuplesDistribution::NAIVE) {
|
||||||
|
|
||||||
|
WITH_CHRONO("db:comm:naive",
|
||||||
|
auto const& db = naiveDatabase<F>(unions,
|
||||||
|
Nv,
|
||||||
|
np,
|
||||||
|
iteration,
|
||||||
|
c);
|
||||||
|
)
|
||||||
|
return db;
|
||||||
|
|
||||||
|
} else {
|
||||||
WITH_CHRONO("db:comm:type:do",
|
WITH_CHRONO("db:comm:type:do",
|
||||||
auto MPI_LDB_ELEMENT = Slice<F>::mpi::localDatabaseElement();
|
auto MPI_LDB_ELEMENT = Slice<F>::mpi::localDatabaseElement();
|
||||||
)
|
)
|
||||||
@ -334,6 +368,8 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
WITH_CHRONO("db:comm:type:free", MPI_Type_free(&MPI_LDB_ELEMENT);)
|
WITH_CHRONO("db:comm:type:free", MPI_Type_free(&MPI_LDB_ELEMENT);)
|
||||||
|
|
||||||
return db;
|
return db;
|
||||||
|
}
|
||||||
|
|
||||||
};
|
};
|
||||||
|
|
||||||
auto doIOPhase
|
auto doIOPhase
|
||||||
@ -437,6 +473,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
|
|
||||||
// START MAIN LOOP ======================================================{{{1
|
// START MAIN LOOP ======================================================{{{1
|
||||||
|
|
||||||
|
MPI_Barrier(universe);
|
||||||
double energy(0.);
|
double energy(0.);
|
||||||
size_t first_iteration = 0;
|
size_t first_iteration = 0;
|
||||||
Checkpoint c;
|
Checkpoint c;
|
||||||
@ -564,7 +601,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
// COMM FIRST DATABASE ================================================{{{1
|
// COMM FIRST DATABASE ================================================{{{1
|
||||||
if (i == first_iteration) {
|
if (i == first_iteration) {
|
||||||
WITH_RANK << "__first__:first database ............ \n";
|
WITH_RANK << "__first__:first database ............ \n";
|
||||||
const auto db = communicateDatabase(abc, universe);
|
const auto db = communicateDatabase(abc, universe, i);
|
||||||
WITH_RANK << "__first__:first database communicated \n";
|
WITH_RANK << "__first__:first database communicated \n";
|
||||||
WITH_RANK << "__first__:first database io phase \n";
|
WITH_RANK << "__first__:first database io phase \n";
|
||||||
doIOPhase(db);
|
doIOPhase(db);
|
||||||
@ -579,7 +616,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
if (abcNext) {
|
if (abcNext) {
|
||||||
WITH_RANK << "__comm__:" << iteration << "th communicating database\n";
|
WITH_RANK << "__comm__:" << iteration << "th communicating database\n";
|
||||||
WITH_CHRONO("db:comm",
|
WITH_CHRONO("db:comm",
|
||||||
const auto db = communicateDatabase(*abcNext, universe);
|
const auto db = communicateDatabase(*abcNext, universe, i);
|
||||||
)
|
)
|
||||||
WITH_CHRONO("db:io",
|
WITH_CHRONO("db:io",
|
||||||
doIOPhase(db);
|
doIOPhase(db);
|
||||||
@ -621,14 +658,23 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
tabhh.unwrapSlice(Slice<F>::AC, abc),
|
tabhh.unwrapSlice(Slice<F>::AC, abc),
|
||||||
tabhh.unwrapSlice(Slice<F>::BC, abc),
|
tabhh.unwrapSlice(Slice<F>::BC, abc),
|
||||||
// -- TIJK
|
// -- TIJK
|
||||||
(DataFieldType<F>*)Tijk);
|
(DataFieldType<F>*)Tijk
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
// -- tmp buffers
|
||||||
|
,(DataFieldType<F>*)_t_buffer
|
||||||
|
,(DataFieldType<F>*)_vhhh
|
||||||
|
#endif
|
||||||
|
);
|
||||||
|
|
||||||
WITH_RANK << iteration << "-th doubles done\n";
|
WITH_RANK << iteration << "-th doubles done\n";
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
|
|
||||||
// COMPUTE SINGLES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
|
// COMPUTE SINGLES %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
|
||||||
OCD_Barrier(universe);
|
OCD_Barrier(universe);
|
||||||
|
#if defined(ATRIP_ONLY_DGEMM)
|
||||||
if (false)
|
if (false)
|
||||||
|
#endif
|
||||||
if (!isFakeTuple(i)) {
|
if (!isFakeTuple(i)) {
|
||||||
WITH_CHRONO("oneshot-unwrap",
|
WITH_CHRONO("oneshot-unwrap",
|
||||||
WITH_CHRONO("unwrap",
|
WITH_CHRONO("unwrap",
|
||||||
@ -647,7 +693,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
(DataFieldType<F>*)Tai,
|
(DataFieldType<F>*)Tai,
|
||||||
#else
|
#else
|
||||||
singlesContribution<F>(No, Nv, abc[0], abc[1], abc[2],
|
singlesContribution<F>(No, Nv, abc[0], abc[1], abc[2],
|
||||||
Tai.data(),
|
Tai,
|
||||||
#endif
|
#endif
|
||||||
(DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB,
|
(DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB,
|
||||||
abc),
|
abc),
|
||||||
@ -661,30 +707,73 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
|
|
||||||
|
|
||||||
// COMPUTE ENERGY %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
|
// COMPUTE ENERGY %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
|
||||||
|
#if defined(ATRIP_ONLY_DGEMM)
|
||||||
|
if (false)
|
||||||
|
#endif /* defined(ATRIP_ONLY_DGEMM) */
|
||||||
if (!isFakeTuple(i)) {
|
if (!isFakeTuple(i)) {
|
||||||
double tupleEnergy(0.);
|
#if defined(HAVE_CUDA)
|
||||||
|
double *tupleEnergy;
|
||||||
|
cuMemAlloc((DataPtr<double>*)&tupleEnergy, sizeof(double));
|
||||||
|
#else
|
||||||
|
double _tupleEnergy(0.);
|
||||||
|
double *tupleEnergy = &_tupleEnergy;
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
int distinct(0);
|
int distinct(0);
|
||||||
if (abc[0] == abc[1]) distinct++;
|
if (abc[0] == abc[1]) distinct++;
|
||||||
if (abc[1] == abc[2]) distinct--;
|
if (abc[1] == abc[2]) distinct--;
|
||||||
const F epsabc(_epsa[abc[0]] + _epsa[abc[1]] + _epsa[abc[2]]);
|
const double
|
||||||
|
epsabc = std::real(_epsa[abc[0]] + _epsa[abc[1]] + _epsa[abc[2]]);
|
||||||
|
|
||||||
|
DataFieldType<F> _epsabc{epsabc};
|
||||||
|
|
||||||
// LOG(0, "AtripCUDA") << "doing energy " << i << "distinct " << distinct << "\n";
|
|
||||||
WITH_CHRONO("energy",
|
WITH_CHRONO("energy",
|
||||||
/*
|
if ( distinct == 0) {
|
||||||
TODO: think about how to do this on the GPU in the best way possible
|
ACC_FUNCALL(getEnergyDistinct<DataFieldType<F>>,
|
||||||
if ( distinct == 0)
|
1, 1, // for cuda
|
||||||
tupleEnergy = getEnergyDistinct<F>(epsabc, No, (F*)epsi, (F*)Tijk, (F*)Zijk);
|
_epsabc,
|
||||||
else
|
No,
|
||||||
tupleEnergy = getEnergySame<F>(epsabc, No, (F*)epsi, (F*)Tijk, (F*)Zijk);
|
#if defined(HAVE_CUDA)
|
||||||
*/
|
(DataFieldType<F>*)epsi,
|
||||||
)
|
(DataFieldType<F>*)Tijk,
|
||||||
|
(DataFieldType<F>*)Zijk,
|
||||||
|
#else
|
||||||
|
epsi,
|
||||||
|
Tijk,
|
||||||
|
Zijk,
|
||||||
|
#endif
|
||||||
|
tupleEnergy);
|
||||||
|
} else {
|
||||||
|
ACC_FUNCALL(getEnergySame<DataFieldType<F>>,
|
||||||
|
1, 1, // for cuda
|
||||||
|
_epsabc,
|
||||||
|
No,
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
(DataFieldType<F>*)epsi,
|
||||||
|
(DataFieldType<F>*)Tijk,
|
||||||
|
(DataFieldType<F>*)Zijk,
|
||||||
|
#else
|
||||||
|
epsi,
|
||||||
|
Tijk,
|
||||||
|
Zijk,
|
||||||
|
#endif
|
||||||
|
tupleEnergy);
|
||||||
|
})
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
double host_tuple_energy;
|
||||||
|
cuMemcpyDtoH((void*)&host_tuple_energy,
|
||||||
|
(DataPtr<double>)tupleEnergy,
|
||||||
|
sizeof(double));
|
||||||
|
#else
|
||||||
|
double host_tuple_energy = *tupleEnergy;
|
||||||
|
#endif /* defined(HAVE_CUDA) */
|
||||||
|
|
||||||
#if defined(HAVE_OCD) || defined(ATRIP_PRINT_TUPLES)
|
#if defined(HAVE_OCD) || defined(ATRIP_PRINT_TUPLES)
|
||||||
tupleEnergies[abc] = tupleEnergy;
|
tupleEnergies[abc] = host_tuple_energy;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
energy += tupleEnergy;
|
energy += host_tuple_energy;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -750,6 +839,8 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
Atrip::chrono["iterations"].stop();
|
Atrip::chrono["iterations"].stop();
|
||||||
// ITERATION END %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%{{{1
|
// ITERATION END %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%{{{1
|
||||||
|
|
||||||
|
if (in.maxIterations != 0 && i >= in.maxIterations) break;
|
||||||
|
|
||||||
}
|
}
|
||||||
// END OF MAIN LOOP
|
// END OF MAIN LOOP
|
||||||
|
|
||||||
|
|||||||
303
src/atrip/DatabaseCommunicator.cxx
Normal file
303
src/atrip/DatabaseCommunicator.cxx
Normal file
@ -0,0 +1,303 @@
|
|||||||
|
#include <atrip/DatabaseCommunicator.hpp>
|
||||||
|
#include <atrip/Complex.hpp>
|
||||||
|
|
||||||
|
|
||||||
|
namespace atrip {
|
||||||
|
|
||||||
|
#if defined(ATRIP_NAIVE_SLOW)
|
||||||
|
/*
|
||||||
|
* This function is really too slow, below are more performant
|
||||||
|
* functions to get tuples.
|
||||||
|
*/
|
||||||
|
static
|
||||||
|
ABCTuples get_nth_naive_tuples(size_t Nv, size_t np, int64_t i) {
|
||||||
|
|
||||||
|
const size_t
|
||||||
|
// total number of tuples for the problem
|
||||||
|
n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv
|
||||||
|
|
||||||
|
// all ranks should have the same number of tuples_per_rank
|
||||||
|
, tuples_per_rank = n / np + size_t(n % np != 0)
|
||||||
|
;
|
||||||
|
|
||||||
|
|
||||||
|
ABCTuples result(np);
|
||||||
|
if (i < 0) return result;
|
||||||
|
std::vector<size_t>
|
||||||
|
rank_indices(np, 0);
|
||||||
|
|
||||||
|
for (size_t a(0), g(0); a < Nv; a++)
|
||||||
|
for (size_t b(a); b < Nv; b++)
|
||||||
|
for (size_t c(b); c < Nv; c++){
|
||||||
|
if ( a == b && b == c ) continue;
|
||||||
|
for (size_t rank = 0; rank < np; rank++) {
|
||||||
|
|
||||||
|
const size_t
|
||||||
|
// start index for the global tuples list
|
||||||
|
start = tuples_per_rank * rank
|
||||||
|
|
||||||
|
// end index for the global tuples list
|
||||||
|
, end = tuples_per_rank * (rank + 1)
|
||||||
|
;
|
||||||
|
|
||||||
|
if ( start <= g && g < end) {
|
||||||
|
if (rank_indices[rank] == i) {
|
||||||
|
result[rank] = {a, b, c};
|
||||||
|
}
|
||||||
|
rank_indices[rank] += 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
g++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
|
static
|
||||||
|
inline
|
||||||
|
size_t a_block_sum_atrip(int64_t T, int64_t nv) {
|
||||||
|
const int64_t nv_min_1 = nv - 1, t_plus_1 = T + 1;
|
||||||
|
return t_plus_1 * nv_min_1 * nv
|
||||||
|
+ nv_min_1 * t_plus_1
|
||||||
|
- (nv_min_1 * (T * t_plus_1) / 2)
|
||||||
|
- (t_plus_1 * (nv_min_1 * nv) / 2)
|
||||||
|
// do not simplify this expression, only the addition of both parts
|
||||||
|
// is a pair integer, prepare to endure the consequences of
|
||||||
|
// simplifying otherwise
|
||||||
|
+ (((T * t_plus_1 * (1 + 2 * T)) / 6) - 3 * ((T * t_plus_1) / 2)) / 2
|
||||||
|
;
|
||||||
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
inline
|
||||||
|
int64_t b_block_sum_atrip (int64_t a, int64_t T, int64_t nv) {
|
||||||
|
return nv * ((T - a) + 1)
|
||||||
|
- (T * (T + 1) - a * (a - 1)) / 2
|
||||||
|
- 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::vector<size_t> a_sums;
|
||||||
|
static
|
||||||
|
inline
|
||||||
|
ABCTuple nth_atrip(size_t it, size_t nv) {
|
||||||
|
|
||||||
|
// build the sums if necessary
|
||||||
|
if (!a_sums.size()) {
|
||||||
|
a_sums.resize(nv);
|
||||||
|
for (size_t _i = 0; _i < nv; _i++) {
|
||||||
|
a_sums[_i] = a_block_sum_atrip(_i, nv);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
int64_t a = -1, block_a = 0;
|
||||||
|
for (const auto& sum: a_sums) {
|
||||||
|
++a;
|
||||||
|
if (sum > it) {
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
block_a = sum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// build the b_sums
|
||||||
|
std::vector<int64_t> b_sums(nv - a);
|
||||||
|
for (size_t t = a, i=0; t < nv; t++) {
|
||||||
|
b_sums[i++] = b_block_sum_atrip(a, t, nv);
|
||||||
|
}
|
||||||
|
int64_t b = a - 1, block_b = block_a;
|
||||||
|
for (const auto& sum: b_sums) {
|
||||||
|
++b;
|
||||||
|
if (sum + block_a > it) {
|
||||||
|
break;
|
||||||
|
} else {
|
||||||
|
block_b = sum + block_a;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
const int64_t
|
||||||
|
c = b + it - block_b + (a == b);
|
||||||
|
|
||||||
|
return {(size_t)a, (size_t)b, (size_t)c};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
static
|
||||||
|
inline
|
||||||
|
ABCTuples nth_atrip_distributed(int64_t it, size_t nv, size_t np) {
|
||||||
|
|
||||||
|
// If we are getting the previous tuples in the first iteration,
|
||||||
|
// then just return an impossible tuple, different from the FAKE_TUPLE,
|
||||||
|
// because if FAKE_TUPLE is defined as {0,0,0} slices thereof
|
||||||
|
// are actually attainable.
|
||||||
|
//
|
||||||
|
if (it < 0) {
|
||||||
|
ABCTuples result(np, {nv, nv, nv});
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
ABCTuples result(np);
|
||||||
|
|
||||||
|
const size_t
|
||||||
|
// total number of tuples for the problem
|
||||||
|
n = nv * (nv + 1) * (nv + 2) / 6 - nv
|
||||||
|
|
||||||
|
// all ranks should have the same number of tuples_per_rank
|
||||||
|
, tuples_per_rank = n / np + size_t(n % np != 0)
|
||||||
|
;
|
||||||
|
|
||||||
|
|
||||||
|
for (size_t rank = 0; rank < np; rank++) {
|
||||||
|
const size_t
|
||||||
|
global_iteration = tuples_per_rank * rank + it;
|
||||||
|
result[rank] = nth_atrip(global_iteration, nv);
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
static
|
||||||
|
typename Slice<F>::LocalDatabase
|
||||||
|
build_local_database_fake(ABCTuple const& abc_prev,
|
||||||
|
ABCTuple const& abc,
|
||||||
|
size_t rank,
|
||||||
|
SliceUnion<F>* u) {
|
||||||
|
|
||||||
|
typename Slice<F>::LocalDatabase result;
|
||||||
|
|
||||||
|
// vector of type x tuple
|
||||||
|
auto const needed = u->neededSlices(abc);
|
||||||
|
auto const needed_prev = u->neededSlices(abc_prev);
|
||||||
|
|
||||||
|
for (auto const& pair: needed) {
|
||||||
|
auto const type = pair.first;
|
||||||
|
auto const tuple = pair.second;
|
||||||
|
auto const from = u->rankMap.find(abc, type);
|
||||||
|
|
||||||
|
// Try to find in the previously needed slices
|
||||||
|
// one that exactly matches the tuple.
|
||||||
|
// Not necessarily has to match the type.
|
||||||
|
//
|
||||||
|
// If we find it, then it means that the fake rank
|
||||||
|
// will mark it as recycled. This covers
|
||||||
|
// the finding of Ready slices and Recycled slices.
|
||||||
|
{
|
||||||
|
auto const& it
|
||||||
|
= std::find_if(needed_prev.begin(), needed_prev.end(),
|
||||||
|
[&tuple, &type](typename Slice<F>::Ty_x_Tu const& o) {
|
||||||
|
return o.second == tuple;
|
||||||
|
});
|
||||||
|
|
||||||
|
if (it != needed_prev.end()) {
|
||||||
|
typename Slice<F>::Info info;
|
||||||
|
info.tuple = tuple;
|
||||||
|
info.type = type;
|
||||||
|
info.from = from;
|
||||||
|
info.state = Slice<F>::Recycled;
|
||||||
|
result.push_back({u->name, info});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
{
|
||||||
|
typename Slice<F>::Info info;
|
||||||
|
info.type = type;
|
||||||
|
info.tuple = tuple;
|
||||||
|
info.from = from;
|
||||||
|
|
||||||
|
// Handle self sufficiency
|
||||||
|
info.state = rank == from.rank
|
||||||
|
? Slice<F>::SelfSufficient
|
||||||
|
: Slice<F>::Fetch
|
||||||
|
;
|
||||||
|
result.push_back({u->name, info});
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
typename Slice<F>::Database
|
||||||
|
naiveDatabase(Unions<F> &unions,
|
||||||
|
size_t nv,
|
||||||
|
size_t np,
|
||||||
|
size_t iteration,
|
||||||
|
MPI_Comm const& c) {
|
||||||
|
|
||||||
|
using Database = typename Slice<F>::Database;
|
||||||
|
Database db;
|
||||||
|
|
||||||
|
#ifdef ATRIP_NAIVE_SLOW
|
||||||
|
WITH_CHRONO("db:comm:naive:tuples",
|
||||||
|
const auto tuples = get_nth_naive_tuples(nv,
|
||||||
|
np,
|
||||||
|
iteration);
|
||||||
|
const auto prev_tuples = get_nth_naive_tuples(nv,
|
||||||
|
np,
|
||||||
|
iteration - 1);
|
||||||
|
)
|
||||||
|
#else
|
||||||
|
WITH_CHRONO("db:comm:naive:tuples",
|
||||||
|
const auto tuples = nth_atrip_distributed(iteration,
|
||||||
|
nv,
|
||||||
|
np);
|
||||||
|
const auto prev_tuples = nth_atrip_distributed(iteration - 1,
|
||||||
|
nv,
|
||||||
|
np);
|
||||||
|
)
|
||||||
|
|
||||||
|
#endif
|
||||||
|
|
||||||
|
for (size_t rank = 0; rank < np; rank++) {
|
||||||
|
auto abc = tuples[rank];
|
||||||
|
typename Slice<F>::LocalDatabase ldb;
|
||||||
|
|
||||||
|
for (auto const& tensor: unions) {
|
||||||
|
if (rank == Atrip::rank) {
|
||||||
|
auto const& tensorDb = tensor->buildLocalDatabase(abc);
|
||||||
|
ldb.insert(ldb.end(), tensorDb.begin(), tensorDb.end());
|
||||||
|
} else {
|
||||||
|
auto const& tensorDb
|
||||||
|
= build_local_database_fake(prev_tuples[rank],
|
||||||
|
abc,
|
||||||
|
rank,
|
||||||
|
tensor);
|
||||||
|
ldb.insert(ldb.end(), tensorDb.begin(), tensorDb.end());
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
db.insert(db.end(), ldb.begin(), ldb.end());
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return db;
|
||||||
|
}
|
||||||
|
|
||||||
|
template
|
||||||
|
typename Slice<double>::Database
|
||||||
|
naiveDatabase<double>(Unions<double> &unions,
|
||||||
|
size_t nv,
|
||||||
|
size_t np,
|
||||||
|
size_t iteration,
|
||||||
|
MPI_Comm const& c);
|
||||||
|
|
||||||
|
template
|
||||||
|
typename Slice<Complex>::Database
|
||||||
|
naiveDatabase<Complex>(Unions<Complex> &unions,
|
||||||
|
size_t nv,
|
||||||
|
size_t np,
|
||||||
|
size_t iteration,
|
||||||
|
MPI_Comm const& c);
|
||||||
|
|
||||||
|
} // namespace atrip
|
||||||
@ -16,96 +16,13 @@
|
|||||||
#include<atrip/Equations.hpp>
|
#include<atrip/Equations.hpp>
|
||||||
|
|
||||||
#include<atrip/CUDA.hpp>
|
#include<atrip/CUDA.hpp>
|
||||||
|
#include<atrip/Operations.hpp>
|
||||||
|
|
||||||
namespace atrip {
|
namespace atrip {
|
||||||
// Prolog:2 ends here
|
// Prolog:2 ends here
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef HAVE_CUDA
|
|
||||||
namespace cuda {
|
|
||||||
|
|
||||||
// cuda kernels
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__global__
|
|
||||||
void zeroing(F* a, size_t n) {
|
|
||||||
F zero = {0};
|
|
||||||
for (size_t i = 0; i < n; i++) {
|
|
||||||
a[i] = zero;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
////
|
|
||||||
template <typename F>
|
|
||||||
__device__
|
|
||||||
F maybeConjugateScalar(const F a);
|
|
||||||
|
|
||||||
template <>
|
|
||||||
__device__
|
|
||||||
double maybeConjugateScalar(const double a) { return a; }
|
|
||||||
|
|
||||||
template <>
|
|
||||||
__device__
|
|
||||||
cuDoubleComplex
|
|
||||||
maybeConjugateScalar(const cuDoubleComplex a) {
|
|
||||||
return {a.x, -a.y};
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__global__
|
|
||||||
void maybeConjugate(F* to, F* from, size_t n) {
|
|
||||||
for (size_t i = 0; i < n; ++i) {
|
|
||||||
to[i] = maybeConjugateScalar<F>(from[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__global__
|
|
||||||
void reorder(F* to, F* from, size_t size, size_t I, size_t J, size_t K) {
|
|
||||||
size_t idx = 0;
|
|
||||||
const size_t IDX = I + J*size + K*size*size;
|
|
||||||
for (size_t k = 0; k < size; k++)
|
|
||||||
for (size_t j = 0; j < size; j++)
|
|
||||||
for (size_t i = 0; i < size; i++, idx++)
|
|
||||||
to[idx] += from[IDX];
|
|
||||||
}
|
|
||||||
|
|
||||||
// I mean, really CUDA... really!?
|
|
||||||
template <typename F>
|
|
||||||
__device__
|
|
||||||
F multiply(const F &a, const F &b);
|
|
||||||
template <>
|
|
||||||
__device__
|
|
||||||
double multiply(const double &a, const double &b) { return a * b; }
|
|
||||||
|
|
||||||
template <>
|
|
||||||
__device__
|
|
||||||
cuDoubleComplex multiply(const cuDoubleComplex &a, const cuDoubleComplex &b) {
|
|
||||||
return
|
|
||||||
{a.x * b.x - a.y * b.y,
|
|
||||||
a.x * b.y + a.y * b.x};
|
|
||||||
}
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__device__
|
|
||||||
void sum_in_place(F* to, const F* from);
|
|
||||||
|
|
||||||
template <>
|
|
||||||
__device__
|
|
||||||
void sum_in_place(double* to, const double *from) { *to += *from; }
|
|
||||||
|
|
||||||
template <>
|
|
||||||
__device__
|
|
||||||
void sum_in_place(cuDoubleComplex* to, const cuDoubleComplex* from) {
|
|
||||||
to->x += from->x;
|
|
||||||
to->y += from->y;
|
|
||||||
}
|
|
||||||
|
|
||||||
};
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
#define FOR_K() \
|
#define FOR_K() \
|
||||||
for (size_t kmin = blockIdx.x * blockDim.x + threadIdx.x, \
|
for (size_t kmin = blockIdx.x * blockDim.x + threadIdx.x, \
|
||||||
@ -133,7 +50,7 @@ namespace cuda {
|
|||||||
_REORDER_BODY_(__VA_ARGS__) \
|
_REORDER_BODY_(__VA_ARGS__) \
|
||||||
}
|
}
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
#define GO(__TO, __FROM) cuda::sum_in_place<F>(&__TO, &__FROM);
|
#define GO(__TO, __FROM) acc::sum_in_place<F>(&__TO, &__FROM);
|
||||||
#else
|
#else
|
||||||
#define GO(__TO, __FROM) __TO += __FROM;
|
#define GO(__TO, __FROM) __TO += __FROM;
|
||||||
#endif
|
#endif
|
||||||
@ -156,7 +73,6 @@ namespace cuda {
|
|||||||
* in order to have an argument in the signature of
|
* in order to have an argument in the signature of
|
||||||
* the function that helps the compiler know which
|
* the function that helps the compiler know which
|
||||||
* instantiation it should take.
|
* instantiation it should take.
|
||||||
*
|
|
||||||
*/
|
*/
|
||||||
template <typename F, reordering_t R>
|
template <typename F, reordering_t R>
|
||||||
struct reorder_proxy {};
|
struct reorder_proxy {};
|
||||||
@ -180,162 +96,205 @@ namespace cuda {
|
|||||||
#undef _IJK_
|
#undef _IJK_
|
||||||
#undef GO
|
#undef GO
|
||||||
|
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
# define MIN(a, b) min((a), (b))
|
||||||
|
#else
|
||||||
|
# define MIN(a, b) std::min((a), (b))
|
||||||
|
#endif
|
||||||
|
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]]
|
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]]
|
||||||
template <typename F>
|
template <typename F>
|
||||||
double getEnergyDistinct
|
__MAYBE_GLOBAL__
|
||||||
|
void getEnergyDistinct
|
||||||
( F const epsabc
|
( F const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, F* const epsi
|
, F* const epsi
|
||||||
, F* const Tijk
|
, F* const Tijk
|
||||||
, F* const Zijk
|
, F* const Zijk
|
||||||
|
, double* energy
|
||||||
) {
|
) {
|
||||||
constexpr size_t blockSize=16;
|
constexpr size_t blockSize=16;
|
||||||
F energy(0.);
|
F _energy = {0.};
|
||||||
for (size_t kk=0; kk<No; kk+=blockSize){
|
for (size_t kk=0; kk<No; kk+=blockSize){
|
||||||
const size_t kend( std::min(No, kk+blockSize) );
|
const size_t kend( MIN(No, kk+blockSize) );
|
||||||
for (size_t jj(kk); jj<No; jj+=blockSize){
|
for (size_t jj(kk); jj<No; jj+=blockSize){
|
||||||
const size_t jend( std::min( No, jj+blockSize) );
|
const size_t jend( MIN( No, jj+blockSize) );
|
||||||
for (size_t ii(jj); ii<No; ii+=blockSize){
|
for (size_t ii(jj); ii<No; ii+=blockSize){
|
||||||
const size_t iend( std::min( No, ii+blockSize) );
|
const size_t iend( MIN( No, ii+blockSize) );
|
||||||
for (size_t k(kk); k < kend; k++){
|
for (size_t k(kk); k < kend; k++){
|
||||||
const F ek(epsi[k]);
|
const F ek(epsi[k]);
|
||||||
const size_t jstart = jj > k ? jj : k;
|
const size_t jstart = jj > k ? jj : k;
|
||||||
for (size_t j(jstart); j < jend; j++){
|
for (size_t j(jstart); j < jend; j++){
|
||||||
F const ej(epsi[j]);
|
F const ej(epsi[j]);
|
||||||
F const facjk = j == k ? F(0.5) : F(1.0);
|
F const facjk = j == k ? F{0.5} : F{1.0};
|
||||||
size_t istart = ii > j ? ii : j;
|
size_t istart = ii > j ? ii : j;
|
||||||
for (size_t i(istart); i < iend; i++){
|
for (size_t i(istart); i < iend; i++){
|
||||||
const F
|
const F
|
||||||
ei(epsi[i])
|
ei(epsi[i])
|
||||||
, facij = i == j ? F(0.5) : F(1.0)
|
, facij = i == j ? F{0.5} : F{1.0}
|
||||||
, denominator(epsabc - ei - ej - ek)
|
, eijk(acc::add(acc::add(ei, ej), ek))
|
||||||
|
, denominator(acc::sub(epsabc, eijk))
|
||||||
, U(Zijk[i + No*j + No*No*k])
|
, U(Zijk[i + No*j + No*No*k])
|
||||||
, V(Zijk[i + No*k + No*No*j])
|
, V(Zijk[i + No*k + No*No*j])
|
||||||
, W(Zijk[j + No*i + No*No*k])
|
, W(Zijk[j + No*i + No*No*k])
|
||||||
, X(Zijk[j + No*k + No*No*i])
|
, X(Zijk[j + No*k + No*No*i])
|
||||||
, Y(Zijk[k + No*i + No*No*j])
|
, Y(Zijk[k + No*i + No*No*j])
|
||||||
, Z(Zijk[k + No*j + No*No*i])
|
, Z(Zijk[k + No*j + No*No*i])
|
||||||
, A(maybeConjugate<F>(Tijk[i + No*j + No*No*k]))
|
, A(acc::maybeConjugateScalar(Tijk[i + No*j + No*No*k]))
|
||||||
, B(maybeConjugate<F>(Tijk[i + No*k + No*No*j]))
|
, B(acc::maybeConjugateScalar(Tijk[i + No*k + No*No*j]))
|
||||||
, C(maybeConjugate<F>(Tijk[j + No*i + No*No*k]))
|
, C(acc::maybeConjugateScalar(Tijk[j + No*i + No*No*k]))
|
||||||
, D(maybeConjugate<F>(Tijk[j + No*k + No*No*i]))
|
, D(acc::maybeConjugateScalar(Tijk[j + No*k + No*No*i]))
|
||||||
, E(maybeConjugate<F>(Tijk[k + No*i + No*No*j]))
|
, E(acc::maybeConjugateScalar(Tijk[k + No*i + No*No*j]))
|
||||||
, _F(maybeConjugate<F>(Tijk[k + No*j + No*No*i]))
|
, _F(acc::maybeConjugateScalar(Tijk[k + No*j + No*No*i]))
|
||||||
, value
|
, AU = acc::prod(A, U)
|
||||||
= 3.0 * ( A * U
|
, BV = acc::prod(B, V)
|
||||||
+ B * V
|
, CW = acc::prod(C, W)
|
||||||
+ C * W
|
, DX = acc::prod(D, X)
|
||||||
+ D * X
|
, EY = acc::prod(E, Y)
|
||||||
+ E * Y
|
, FZ = acc::prod(_F, Z)
|
||||||
+ _F * Z )
|
, UXY = acc::add(U, acc::add(X, Y))
|
||||||
+ ( ( U + X + Y )
|
, VWZ = acc::add(V, acc::add(W, Z))
|
||||||
- 2.0 * ( V + W + Z )
|
, ADE = acc::add(A, acc::add(D, E))
|
||||||
) * ( A + D + E )
|
, BCF = acc::add(B, acc::add(C, _F))
|
||||||
+ ( ( V + W + Z )
|
// I just might as well write this in CL
|
||||||
- 2.0 * ( U + X + Y )
|
, _first = acc::add(AU,
|
||||||
) * ( B + C + _F )
|
acc::add(BV,
|
||||||
|
acc::add(CW,
|
||||||
|
acc::add(DX,
|
||||||
|
acc::add(EY, FZ)))))
|
||||||
|
, _second = acc::prod(acc::sub(UXY,
|
||||||
|
acc::prod(F{-2.0}, VWZ)),
|
||||||
|
ADE)
|
||||||
|
, _third = acc::prod(acc::sub(VWZ,
|
||||||
|
acc::prod(F{-2.0}, UXY)),
|
||||||
|
BCF)
|
||||||
|
, value = acc::add(acc::prod(F{3.0}, _first),
|
||||||
|
acc::add(_second,
|
||||||
|
_third))
|
||||||
|
, _loop_energy = acc::prod(acc::prod(F{2.0}, value),
|
||||||
|
acc::div(acc::prod(facjk, facij),
|
||||||
|
denominator))
|
||||||
;
|
;
|
||||||
energy += 2.0 * value / denominator * facjk * facij;
|
acc::sum_in_place(&_energy, &_loop_energy);
|
||||||
} // i
|
} // i
|
||||||
} // j
|
} // j
|
||||||
} // k
|
} // k
|
||||||
} // ii
|
} // ii
|
||||||
} // jj
|
} // jj
|
||||||
} // kk
|
} // kk
|
||||||
return std::real(energy);
|
const double real_part = acc::real(_energy);
|
||||||
|
acc::sum_in_place(energy, &real_part);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template <typename F>
|
template <typename F>
|
||||||
double getEnergySame
|
__MAYBE_GLOBAL__
|
||||||
|
void getEnergySame
|
||||||
( F const epsabc
|
( F const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, F* const epsi
|
, F* const epsi
|
||||||
, F* const Tijk
|
, F* const Tijk
|
||||||
, F* const Zijk
|
, F* const Zijk
|
||||||
|
, double* energy
|
||||||
) {
|
) {
|
||||||
constexpr size_t blockSize = 16;
|
constexpr size_t blockSize = 16;
|
||||||
F energy = F(0.);
|
F _energy = F{0.};
|
||||||
for (size_t kk=0; kk<No; kk+=blockSize){
|
for (size_t kk=0; kk<No; kk+=blockSize){
|
||||||
const size_t kend( std::min( kk+blockSize, No) );
|
const size_t kend( MIN( kk+blockSize, No) );
|
||||||
for (size_t jj(kk); jj<No; jj+=blockSize){
|
for (size_t jj(kk); jj<No; jj+=blockSize){
|
||||||
const size_t jend( std::min( jj+blockSize, No) );
|
const size_t jend( MIN( jj+blockSize, No) );
|
||||||
for (size_t ii(jj); ii<No; ii+=blockSize){
|
for (size_t ii(jj); ii<No; ii+=blockSize){
|
||||||
const size_t iend( std::min( ii+blockSize, No) );
|
const size_t iend( MIN( ii+blockSize, No) );
|
||||||
for (size_t k(kk); k < kend; k++){
|
for (size_t k(kk); k < kend; k++){
|
||||||
const F ek(epsi[k]);
|
const F ek(epsi[k]);
|
||||||
const size_t jstart = jj > k ? jj : k;
|
const size_t jstart = jj > k ? jj : k;
|
||||||
for(size_t j(jstart); j < jend; j++){
|
for(size_t j(jstart); j < jend; j++){
|
||||||
const F facjk( j == k ? F(0.5) : F(1.0));
|
const F facjk( j == k ? F{0.5} : F{1.0});
|
||||||
const F ej(epsi[j]);
|
const F ej(epsi[j]);
|
||||||
const size_t istart = ii > j ? ii : j;
|
const size_t istart = ii > j ? ii : j;
|
||||||
for(size_t i(istart); i < iend; i++){
|
for(size_t i(istart); i < iend; i++){
|
||||||
const F
|
const F
|
||||||
ei(epsi[i])
|
ei(epsi[i])
|
||||||
, facij ( i==j ? F(0.5) : F(1.0))
|
, facij ( i==j ? F{0.5} : F{1.0})
|
||||||
, denominator(epsabc - ei - ej - ek)
|
, eijk(acc::add(acc::add(ei, ej), ek))
|
||||||
|
, denominator(acc::sub(epsabc, eijk))
|
||||||
, U(Zijk[i + No*j + No*No*k])
|
, U(Zijk[i + No*j + No*No*k])
|
||||||
, V(Zijk[j + No*k + No*No*i])
|
, V(Zijk[j + No*k + No*No*i])
|
||||||
, W(Zijk[k + No*i + No*No*j])
|
, W(Zijk[k + No*i + No*No*j])
|
||||||
, A(maybeConjugate<F>(Tijk[i + No*j + No*No*k]))
|
, A(acc::maybeConjugateScalar(Tijk[i + No*j + No*No*k]))
|
||||||
, B(maybeConjugate<F>(Tijk[j + No*k + No*No*i]))
|
, B(acc::maybeConjugateScalar(Tijk[j + No*k + No*No*i]))
|
||||||
, C(maybeConjugate<F>(Tijk[k + No*i + No*No*j]))
|
, C(acc::maybeConjugateScalar(Tijk[k + No*i + No*No*j]))
|
||||||
, value
|
, ABC = acc::add(A, acc::add(B, C))
|
||||||
= F(3.0) * ( A * U
|
, UVW = acc::add(U, acc::add(V, W))
|
||||||
+ B * V
|
, AU = acc::prod(A, U)
|
||||||
+ C * W
|
, BV = acc::prod(B, V)
|
||||||
)
|
, CW = acc::prod(C, W)
|
||||||
- ( A + B + C ) * ( U + V + W )
|
, AU_and_BV_and_CW = acc::add(acc::add(AU, BV), CW)
|
||||||
|
, value = acc::sub(acc::prod(F{3.0}, AU_and_BV_and_CW),
|
||||||
|
acc::prod(ABC, UVW))
|
||||||
|
, _loop_energy = acc::prod(acc::prod(F{2.0}, value),
|
||||||
|
acc::div(acc::prod(facjk, facij),
|
||||||
|
denominator))
|
||||||
;
|
;
|
||||||
energy += F(2.0) * value / denominator * facjk * facij;
|
|
||||||
|
acc::sum_in_place(&_energy, &_loop_energy);
|
||||||
} // i
|
} // i
|
||||||
} // j
|
} // j
|
||||||
} // k
|
} // k
|
||||||
} // ii
|
} // ii
|
||||||
} // jj
|
} // jj
|
||||||
} // kk
|
} // kk
|
||||||
return std::real(energy);
|
const double real_part = acc::real(_energy);
|
||||||
|
acc::sum_in_place(energy, &real_part);
|
||||||
}
|
}
|
||||||
// Energy:2 ends here
|
// Energy:2 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:3]]
|
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:3]]
|
||||||
// instantiate double
|
// instantiate double
|
||||||
template
|
template
|
||||||
double getEnergyDistinct
|
__MAYBE_GLOBAL__
|
||||||
( double const epsabc
|
void getEnergyDistinct
|
||||||
|
( DataFieldType<double> const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, double* const epsi
|
, DataFieldType<double>* const epsi
|
||||||
, double* const Tijk
|
, DataFieldType<double>* const Tijk
|
||||||
, double* const Zijk
|
, DataFieldType<double>* const Zijk
|
||||||
|
, DataFieldType<double>* energy
|
||||||
);
|
);
|
||||||
|
|
||||||
template
|
template
|
||||||
double getEnergySame
|
__MAYBE_GLOBAL__
|
||||||
( double const epsabc
|
void getEnergySame
|
||||||
|
( DataFieldType<double> const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, double* const epsi
|
, DataFieldType<double>* const epsi
|
||||||
, double* const Tijk
|
, DataFieldType<double>* const Tijk
|
||||||
, double* const Zijk
|
, DataFieldType<double>* const Zijk
|
||||||
|
, DataFieldType<double>* energy
|
||||||
);
|
);
|
||||||
|
|
||||||
// instantiate Complex
|
// instantiate Complex
|
||||||
template
|
template
|
||||||
double getEnergyDistinct
|
__MAYBE_GLOBAL__
|
||||||
( Complex const epsabc
|
void getEnergyDistinct
|
||||||
|
( DataFieldType<Complex> const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, Complex* const epsi
|
, DataFieldType<Complex>* const epsi
|
||||||
, Complex* const Tijk
|
, DataFieldType<Complex>* const Tijk
|
||||||
, Complex* const Zijk
|
, DataFieldType<Complex>* const Zijk
|
||||||
|
, DataFieldType<double>* energy
|
||||||
);
|
);
|
||||||
|
|
||||||
template
|
template
|
||||||
double getEnergySame
|
__MAYBE_GLOBAL__
|
||||||
( Complex const epsabc
|
void getEnergySame
|
||||||
|
( DataFieldType<Complex> const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, Complex* const epsi
|
, DataFieldType<Complex>* const epsi
|
||||||
, Complex* const Tijk
|
, DataFieldType<Complex>* const Tijk
|
||||||
, Complex* const Zijk
|
, DataFieldType<Complex>* const Zijk
|
||||||
|
, DataFieldType<double>* energy
|
||||||
);
|
);
|
||||||
// Energy:3 ends here
|
// Energy:3 ends here
|
||||||
|
|
||||||
@ -361,18 +320,26 @@ double getEnergySame
|
|||||||
const size_t ijk = i + j*No + k*NoNo;
|
const size_t ijk = i + j*No + k*NoNo;
|
||||||
|
|
||||||
#ifdef HAVE_CUDA
|
#ifdef HAVE_CUDA
|
||||||
# define GO(__TPH, __VABIJ) \
|
|
||||||
{ \
|
#define GO(__TPH, __VABIJ) \
|
||||||
const DataFieldType<F> product \
|
do { \
|
||||||
= cuda::multiply<DataFieldType<F>>((__TPH), (__VABIJ)); \
|
const DataFieldType<F> \
|
||||||
cuda::sum_in_place<DataFieldType<F>>(&Zijk[ijk], &product); \
|
product = acc::prod<DataFieldType<F>>((__TPH), \
|
||||||
}
|
(__VABIJ)); \
|
||||||
|
acc::sum_in_place<DataFieldType<F>>(&Zijk[ijk], \
|
||||||
|
&product); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
#else
|
#else
|
||||||
# define GO(__TPH, __VABIJ) Zijk[ijk] += (__TPH) * (__VABIJ);
|
|
||||||
|
#define GO(__TPH, __VABIJ) Zijk[ijk] += (__TPH) * (__VABIJ)
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
GO(Tph[ a + i * Nv ], VBCij[ j + k * No ])
|
|
||||||
GO(Tph[ b + j * Nv ], VACij[ i + k * No ])
|
GO(Tph[ a + i * Nv ], VBCij[ j + k * No ]);
|
||||||
GO(Tph[ c + k * Nv ], VABij[ i + j * No ])
|
GO(Tph[ b + j * Nv ], VACij[ i + k * No ]);
|
||||||
|
GO(Tph[ c + k * Nv ], VABij[ i + j * No ]);
|
||||||
|
|
||||||
#undef GO
|
#undef GO
|
||||||
} // for loop j
|
} // for loop j
|
||||||
}
|
}
|
||||||
@ -434,8 +401,12 @@ double getEnergySame
|
|||||||
// -- TIJK
|
// -- TIJK
|
||||||
// , DataPtr<F> Tijk_
|
// , DataPtr<F> Tijk_
|
||||||
, DataFieldType<F>* Tijk_
|
, DataFieldType<F>* Tijk_
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
// -- tmp buffers
|
||||||
|
, DataFieldType<F>* _t_buffer
|
||||||
|
, DataFieldType<F>* _vhhh
|
||||||
|
#endif
|
||||||
) {
|
) {
|
||||||
|
|
||||||
const size_t a = abc[0], b = abc[1], c = abc[2]
|
const size_t a = abc[0], b = abc[1], c = abc[2]
|
||||||
, NoNo = No*No
|
, NoNo = No*No
|
||||||
;
|
;
|
||||||
@ -451,7 +422,7 @@ double getEnergySame
|
|||||||
>{}, \
|
>{}, \
|
||||||
No, \
|
No, \
|
||||||
Tijk, \
|
Tijk, \
|
||||||
_t_buffer);
|
_t_buffer)
|
||||||
#define DGEMM_PARTICLES(__A, __B) \
|
#define DGEMM_PARTICLES(__A, __B) \
|
||||||
atrip::xgemm<F>("T", \
|
atrip::xgemm<F>("T", \
|
||||||
"N", \
|
"N", \
|
||||||
@ -482,10 +453,17 @@ double getEnergySame
|
|||||||
(int const*)&NoNo \
|
(int const*)&NoNo \
|
||||||
)
|
)
|
||||||
#define MAYBE_CONJ(_conj, _buffer) \
|
#define MAYBE_CONJ(_conj, _buffer) \
|
||||||
cuda::maybeConjugate<<< \
|
do { \
|
||||||
|
acc::maybeConjugate<<< \
|
||||||
|
\
|
||||||
Atrip::kernelDimensions.ooo.blocks, \
|
Atrip::kernelDimensions.ooo.blocks, \
|
||||||
|
\
|
||||||
Atrip::kernelDimensions.ooo.threads \
|
Atrip::kernelDimensions.ooo.threads \
|
||||||
>>>((DataFieldType<F>*)_conj, (DataFieldType<F>*)_buffer, NoNoNo);
|
\
|
||||||
|
>>>((DataFieldType<F>*)_conj, \
|
||||||
|
(DataFieldType<F>*)_buffer, \
|
||||||
|
NoNoNo); \
|
||||||
|
} while (0)
|
||||||
|
|
||||||
|
|
||||||
// END CUDA ////////////////////////////////////////////////////////////////////
|
// END CUDA ////////////////////////////////////////////////////////////////////
|
||||||
@ -500,7 +478,9 @@ double getEnergySame
|
|||||||
#define REORDER(__II, __JJ, __KK) \
|
#define REORDER(__II, __JJ, __KK) \
|
||||||
reorder(reorder_proxy<DataFieldType<F>, \
|
reorder(reorder_proxy<DataFieldType<F>, \
|
||||||
__II ## __JJ ## __KK >{}, \
|
__II ## __JJ ## __KK >{}, \
|
||||||
No, Tijk, _t_buffer);
|
No, \
|
||||||
|
Tijk, \
|
||||||
|
_t_buffer)
|
||||||
#define DGEMM_PARTICLES(__A, __B) \
|
#define DGEMM_PARTICLES(__A, __B) \
|
||||||
atrip::xgemm<F>("T", \
|
atrip::xgemm<F>("T", \
|
||||||
"N", \
|
"N", \
|
||||||
@ -532,28 +512,36 @@ double getEnergySame
|
|||||||
(int const*)&NoNo \
|
(int const*)&NoNo \
|
||||||
)
|
)
|
||||||
#define MAYBE_CONJ(_conj, _buffer) \
|
#define MAYBE_CONJ(_conj, _buffer) \
|
||||||
for (size_t __i = 0; __i < NoNoNo; ++__i) \
|
do { \
|
||||||
_conj[__i] = maybeConjugate<F>(_buffer[__i]);
|
for (size_t __i = 0; __i < NoNoNo; ++__i) { \
|
||||||
|
_conj[__i] \
|
||||||
|
= maybeConjugate<F>(_buffer[__i]); \
|
||||||
|
} \
|
||||||
|
} while (0)
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
F one{1.0}, m_one{-1.0}, zero{0.0};
|
F one{1.0}, m_one{-1.0}, zero{0.0};
|
||||||
const size_t NoNoNo = No*NoNo;
|
const size_t NoNoNo = No*NoNo;
|
||||||
#ifdef HAVE_CUDA
|
#ifdef HAVE_CUDA
|
||||||
DataFieldType<F>* _t_buffer;
|
// DataFieldType<F>* _t_buffer;
|
||||||
DataFieldType<F>* _vhhh;
|
// DataFieldType<F>* _vhhh;
|
||||||
WITH_CHRONO("double:cuda:alloc",
|
// WITH_CHRONO("double:cuda:alloc",
|
||||||
_CHECK_CUDA_SUCCESS("Allocating _t_buffer",
|
// _CHECK_CUDA_SUCCESS("Allocating _t_buffer",
|
||||||
cuMemAlloc((CUdeviceptr*)&_t_buffer,
|
// cuMemAlloc((CUdeviceptr*)&_t_buffer,
|
||||||
NoNoNo * sizeof(DataFieldType<F>)));
|
// NoNoNo * sizeof(DataFieldType<F>)));
|
||||||
_CHECK_CUDA_SUCCESS("Allocating _vhhh",
|
// _CHECK_CUDA_SUCCESS("Allocating _vhhh",
|
||||||
cuMemAlloc((CUdeviceptr*)&_vhhh,
|
// cuMemAlloc((CUdeviceptr*)&_vhhh,
|
||||||
NoNoNo * sizeof(DataFieldType<F>)));
|
// NoNoNo * sizeof(DataFieldType<F>)));
|
||||||
)
|
// )
|
||||||
|
#if !defined(ATRIP_ONLY_DGEMM)
|
||||||
|
// we still have to zero this
|
||||||
const size_t
|
const size_t
|
||||||
bs = Atrip::kernelDimensions.ooo.blocks,
|
bs = Atrip::kernelDimensions.ooo.blocks,
|
||||||
ths = Atrip::kernelDimensions.ooo.threads;
|
ths = Atrip::kernelDimensions.ooo.threads;
|
||||||
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
|
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
|
||||||
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
|
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
|
||||||
|
#endif
|
||||||
|
|
||||||
#else
|
#else
|
||||||
DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
|
DataFieldType<F>* _t_buffer = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
|
||||||
DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
|
DataFieldType<F>* _vhhh = (DataFieldType<F>*)malloc(NoNoNo * sizeof(F));
|
||||||
@ -565,55 +553,65 @@ double getEnergySame
|
|||||||
#endif
|
#endif
|
||||||
|
|
||||||
// Set Tijk to zero
|
// Set Tijk to zero
|
||||||
#ifdef HAVE_CUDA
|
#if defined(HAVE_CUDA) && !defined(ATRIP_ONLY_DGEMM)
|
||||||
WITH_CHRONO("double:reorder",
|
WITH_CHRONO("double:reorder",
|
||||||
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk,
|
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk,
|
||||||
//NoNoNo);
|
NoNoNo);
|
||||||
)
|
)
|
||||||
#else
|
#endif
|
||||||
|
|
||||||
|
#if !defined(HAVE_CUDA)
|
||||||
WITH_CHRONO("double:reorder",
|
WITH_CHRONO("double:reorder",
|
||||||
for (size_t k = 0; k < NoNoNo; k++) {
|
for (size_t k = 0; k < NoNoNo; k++) {
|
||||||
Tijk[k] = DataFieldType<F>{0.0};
|
Tijk[k] = DataFieldType<F>{0.0};
|
||||||
})
|
})
|
||||||
#endif
|
#endif /* !defined(HAVE_CUDA) */
|
||||||
|
|
||||||
|
|
||||||
|
#if defined(ATRIP_ONLY_DGEMM)
|
||||||
|
#undef MAYBE_CONJ
|
||||||
|
#undef REORDER
|
||||||
|
#define MAYBE_CONJ(a, b) do {} while(0)
|
||||||
|
#define REORDER(i, j, k) do {} while(0)
|
||||||
|
#endif /* defined(ATRIP_ONLY_DGEMM) */
|
||||||
|
|
||||||
// HOLES
|
// HOLES
|
||||||
WITH_CHRONO("doubles:holes",
|
WITH_CHRONO("doubles:holes",
|
||||||
{
|
{
|
||||||
// VhhhC[i + k*No + L*NoNo] * TABhh[L + j*No]; H1
|
// VhhhC[i + k*No + L*NoNo] * TABhh[L + j*No]; H1
|
||||||
//MAYBE_CONJ(_vhhh, VhhhC)
|
MAYBE_CONJ(_vhhh, VhhhC);
|
||||||
WITH_CHRONO("doubles:holes:1",
|
WITH_CHRONO("doubles:holes:1",
|
||||||
DGEMM_HOLES(_vhhh, TABhh, "N");
|
DGEMM_HOLES(_vhhh, TABhh, "N");
|
||||||
//REORDER(I, K, J)
|
REORDER(I, K, J);
|
||||||
)
|
)
|
||||||
// VhhhC[j + k*No + L*NoNo] * TABhh[i + L*No]; H0
|
// VhhhC[j + k*No + L*NoNo] * TABhh[i + L*No]; H0
|
||||||
WITH_CHRONO("doubles:holes:2",
|
WITH_CHRONO("doubles:holes:2",
|
||||||
DGEMM_HOLES(_vhhh, TABhh, "T");
|
DGEMM_HOLES(_vhhh, TABhh, "T");
|
||||||
//REORDER(J, K, I)
|
REORDER(J, K, I);
|
||||||
)
|
)
|
||||||
|
|
||||||
// VhhhB[i + j*No + L*NoNo] * TAChh[L + k*No]; H5
|
// VhhhB[i + j*No + L*NoNo] * TAChh[L + k*No]; H5
|
||||||
//MAYBE_CONJ(_vhhh, VhhhB)
|
MAYBE_CONJ(_vhhh, VhhhB);
|
||||||
WITH_CHRONO("doubles:holes:3",
|
WITH_CHRONO("doubles:holes:3",
|
||||||
DGEMM_HOLES(_vhhh, TAChh, "N");
|
DGEMM_HOLES(_vhhh, TAChh, "N");
|
||||||
//REORDER(I, J, K)
|
REORDER(I, J, K);
|
||||||
)
|
)
|
||||||
// VhhhB[k + j*No + L*NoNo] * TAChh[i + L*No]; H3
|
// VhhhB[k + j*No + L*NoNo] * TAChh[i + L*No]; H3
|
||||||
WITH_CHRONO("doubles:holes:4",
|
WITH_CHRONO("doubles:holes:4",
|
||||||
DGEMM_HOLES(_vhhh, TAChh, "T");
|
DGEMM_HOLES(_vhhh, TAChh, "T");
|
||||||
//REORDER(K, J, I)
|
REORDER(K, J, I);
|
||||||
)
|
)
|
||||||
|
|
||||||
// VhhhA[j + i*No + L*NoNo] * TBChh[L + k*No]; H1
|
// VhhhA[j + i*No + L*NoNo] * TBChh[L + k*No]; H1
|
||||||
//MAYBE_CONJ(_vhhh, VhhhA)
|
MAYBE_CONJ(_vhhh, VhhhA);
|
||||||
WITH_CHRONO("doubles:holes:5",
|
WITH_CHRONO("doubles:holes:5",
|
||||||
DGEMM_HOLES(_vhhh, TBChh, "N");
|
DGEMM_HOLES(_vhhh, TBChh, "N");
|
||||||
//REORDER(J, I, K)
|
REORDER(J, I, K);
|
||||||
)
|
)
|
||||||
// VhhhA[k + i*No + L*NoNo] * TBChh[j + L*No]; H4
|
// VhhhA[k + i*No + L*NoNo] * TBChh[j + L*No]; H4
|
||||||
WITH_CHRONO("doubles:holes:6",
|
WITH_CHRONO("doubles:holes:6",
|
||||||
DGEMM_HOLES(_vhhh, TBChh, "T");
|
DGEMM_HOLES(_vhhh, TBChh, "T");
|
||||||
//REORDER(K, I, J)
|
REORDER(K, I, J);
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -625,32 +623,32 @@ double getEnergySame
|
|||||||
// TAphh[E + i*Nv + j*NoNv] * VBCph[E + k*Nv]; P0
|
// TAphh[E + i*Nv + j*NoNv] * VBCph[E + k*Nv]; P0
|
||||||
WITH_CHRONO("doubles:particles:1",
|
WITH_CHRONO("doubles:particles:1",
|
||||||
DGEMM_PARTICLES(TAphh, VBCph);
|
DGEMM_PARTICLES(TAphh, VBCph);
|
||||||
//REORDER(I, J, K)
|
REORDER(I, J, K);
|
||||||
)
|
)
|
||||||
// TAphh[E + i*Nv + k*NoNv] * VCBph[E + j*Nv]; P3
|
// TAphh[E + i*Nv + k*NoNv] * VCBph[E + j*Nv]; P3
|
||||||
WITH_CHRONO("doubles:particles:2",
|
WITH_CHRONO("doubles:particles:2",
|
||||||
DGEMM_PARTICLES(TAphh, VCBph);
|
DGEMM_PARTICLES(TAphh, VCBph);
|
||||||
//REORDER(I, K, J)
|
REORDER(I, K, J);
|
||||||
)
|
)
|
||||||
// TCphh[E + k*Nv + i*NoNv] * VABph[E + j*Nv]; P5
|
// TCphh[E + k*Nv + i*NoNv] * VABph[E + j*Nv]; P5
|
||||||
WITH_CHRONO("doubles:particles:3",
|
WITH_CHRONO("doubles:particles:3",
|
||||||
DGEMM_PARTICLES(TCphh, VABph);
|
DGEMM_PARTICLES(TCphh, VABph);
|
||||||
//REORDER(K, I, J)
|
REORDER(K, I, J);
|
||||||
)
|
)
|
||||||
// TCphh[E + k*Nv + j*NoNv] * VBAph[E + i*Nv]; P2
|
// TCphh[E + k*Nv + j*NoNv] * VBAph[E + i*Nv]; P2
|
||||||
WITH_CHRONO("doubles:particles:4",
|
WITH_CHRONO("doubles:particles:4",
|
||||||
DGEMM_PARTICLES(TCphh, VBAph);
|
DGEMM_PARTICLES(TCphh, VBAph);
|
||||||
//REORDER(K, J, I)
|
REORDER(K, J, I);
|
||||||
)
|
)
|
||||||
// TBphh[E + j*Nv + i*NoNv] * VACph[E + k*Nv]; P1
|
// TBphh[E + j*Nv + i*NoNv] * VACph[E + k*Nv]; P1
|
||||||
WITH_CHRONO("doubles:particles:5",
|
WITH_CHRONO("doubles:particles:5",
|
||||||
DGEMM_PARTICLES(TBphh, VACph);
|
DGEMM_PARTICLES(TBphh, VACph);
|
||||||
//REORDER(J, I, K)
|
REORDER(J, I, K);
|
||||||
)
|
)
|
||||||
// TBphh[E + j*Nv + k*NoNv] * VCAph[E + i*Nv]; P4
|
// TBphh[E + j*Nv + k*NoNv] * VCAph[E + i*Nv]; P4
|
||||||
WITH_CHRONO("doubles:particles:6",
|
WITH_CHRONO("doubles:particles:6",
|
||||||
DGEMM_PARTICLES(TBphh, VCAph);
|
DGEMM_PARTICLES(TBphh, VCAph);
|
||||||
//REORDER(J, K, I)
|
REORDER(J, K, I);
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
@ -659,16 +657,16 @@ double getEnergySame
|
|||||||
#ifdef HAVE_CUDA
|
#ifdef HAVE_CUDA
|
||||||
// we need to synchronize here since we need
|
// we need to synchronize here since we need
|
||||||
// the Tijk for next process in the pipeline
|
// the Tijk for next process in the pipeline
|
||||||
_CHECK_CUDA_SUCCESS("Synchronizing",
|
//_CHECK_CUDA_SUCCESS("Synchronizing",
|
||||||
cuCtxSynchronize());
|
// cuCtxSynchronize());
|
||||||
_CHECK_CUDA_SUCCESS("Freeing _vhhh",
|
//_CHECK_CUDA_SUCCESS("Freeing _vhhh",
|
||||||
cuMemFree((CUdeviceptr)_vhhh));
|
// cuMemFree((CUdeviceptr)_vhhh));
|
||||||
_CHECK_CUDA_SUCCESS("Freeing _t_buffer",
|
//_CHECK_CUDA_SUCCESS("Freeing _t_buffer",
|
||||||
cuMemFree((CUdeviceptr)_t_buffer));
|
// cuMemFree((CUdeviceptr)_t_buffer));
|
||||||
#else
|
#else
|
||||||
free(_vhhh);
|
free(_vhhh);
|
||||||
free(_t_buffer);
|
free(_t_buffer);
|
||||||
#endif
|
#endif /* defined(HAVE_CUDA) */
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef REORDER
|
#undef REORDER
|
||||||
@ -719,7 +717,7 @@ double getEnergySame
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif
|
#endif /* defined(ATRIP_USE_DGEMM) */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -751,6 +749,12 @@ double getEnergySame
|
|||||||
, DataPtr<double> const TBChh
|
, DataPtr<double> const TBChh
|
||||||
// -- TIJK
|
// -- TIJK
|
||||||
, DataFieldType<double>* Tijk
|
, DataFieldType<double>* Tijk
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
// -- tmp buffers
|
||||||
|
, DataFieldType<double>* _t_buffer
|
||||||
|
, DataFieldType<double>* _vhhh
|
||||||
|
#endif
|
||||||
|
|
||||||
);
|
);
|
||||||
|
|
||||||
template
|
template
|
||||||
@ -779,6 +783,12 @@ double getEnergySame
|
|||||||
, DataPtr<Complex> const TBChh
|
, DataPtr<Complex> const TBChh
|
||||||
// -- TIJK
|
// -- TIJK
|
||||||
, DataFieldType<Complex>* Tijk
|
, DataFieldType<Complex>* Tijk
|
||||||
|
#if defined(HAVE_CUDA)
|
||||||
|
// -- tmp buffers
|
||||||
|
, DataFieldType<Complex>* _t_buffer
|
||||||
|
, DataFieldType<Complex>* _vhhh
|
||||||
|
#endif
|
||||||
|
|
||||||
);
|
);
|
||||||
// Doubles contribution:2 ends here
|
// Doubles contribution:2 ends here
|
||||||
|
|
||||||
|
|||||||
464
src/atrip/Tuples.cxx
Normal file
464
src/atrip/Tuples.cxx
Normal file
@ -0,0 +1,464 @@
|
|||||||
|
#include <atrip/Tuples.hpp>
|
||||||
|
#include <atrip/Atrip.hpp>
|
||||||
|
|
||||||
|
namespace atrip {
|
||||||
|
|
||||||
|
template <typename A>
|
||||||
|
static A unique(A const &xs) {
|
||||||
|
auto result = xs;
|
||||||
|
std::sort(std::begin(result), std::end(result));
|
||||||
|
auto const& last = std::unique(std::begin(result), std::end(result));
|
||||||
|
result.erase(last, std::end(result));
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::vector<std::string> getNodeNames(MPI_Comm comm){
|
||||||
|
int rank, np;
|
||||||
|
MPI_Comm_rank(comm, &rank);
|
||||||
|
MPI_Comm_size(comm, &np);
|
||||||
|
|
||||||
|
std::vector<std::string> nodeList(np);
|
||||||
|
char nodeName[MPI_MAX_PROCESSOR_NAME];
|
||||||
|
char *nodeNames = (char*)malloc(np * MPI_MAX_PROCESSOR_NAME);
|
||||||
|
std::vector<int> nameLengths(np)
|
||||||
|
, off(np)
|
||||||
|
;
|
||||||
|
int nameLength;
|
||||||
|
MPI_Get_processor_name(nodeName, &nameLength);
|
||||||
|
MPI_Allgather(&nameLength,
|
||||||
|
1,
|
||||||
|
MPI_INT,
|
||||||
|
nameLengths.data(),
|
||||||
|
1,
|
||||||
|
MPI_INT,
|
||||||
|
comm);
|
||||||
|
for (int i(1); i < np; i++)
|
||||||
|
off[i] = off[i-1] + nameLengths[i-1];
|
||||||
|
MPI_Allgatherv(nodeName,
|
||||||
|
nameLengths[rank],
|
||||||
|
MPI_BYTE,
|
||||||
|
nodeNames,
|
||||||
|
nameLengths.data(),
|
||||||
|
off.data(),
|
||||||
|
MPI_BYTE,
|
||||||
|
comm);
|
||||||
|
for (int i(0); i < np; i++) {
|
||||||
|
std::string const s(&nodeNames[off[i]], nameLengths[i]);
|
||||||
|
nodeList[i] = s;
|
||||||
|
}
|
||||||
|
std::free(nodeNames);
|
||||||
|
return nodeList;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
std::vector<RankInfo>
|
||||||
|
getNodeInfos(std::vector<string> const& nodeNames) {
|
||||||
|
std::vector<RankInfo> result;
|
||||||
|
auto const uniqueNames = unique(nodeNames);
|
||||||
|
auto const index = [&uniqueNames](std::string const& s) {
|
||||||
|
auto const& it = std::find(uniqueNames.begin(), uniqueNames.end(), s);
|
||||||
|
return std::distance(uniqueNames.begin(), it);
|
||||||
|
};
|
||||||
|
std::vector<size_t> localRanks(uniqueNames.size(), 0);
|
||||||
|
size_t globalRank = 0;
|
||||||
|
for (auto const& name: nodeNames) {
|
||||||
|
const size_t nodeId = index(name);
|
||||||
|
result.push_back({name,
|
||||||
|
nodeId,
|
||||||
|
globalRank++,
|
||||||
|
localRanks[nodeId]++,
|
||||||
|
(size_t)
|
||||||
|
std::count(nodeNames.begin(),
|
||||||
|
nodeNames.end(),
|
||||||
|
name)
|
||||||
|
});
|
||||||
|
}
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
ClusterInfo
|
||||||
|
getClusterInfo(MPI_Comm comm) {
|
||||||
|
auto const names = getNodeNames(comm);
|
||||||
|
auto const rankInfos = getNodeInfos(names);
|
||||||
|
|
||||||
|
return ClusterInfo {
|
||||||
|
unique(names).size(),
|
||||||
|
names.size(),
|
||||||
|
rankInfos[0].ranksPerNode,
|
||||||
|
rankInfos
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
ABCTuples getTuplesList(size_t Nv, size_t rank, size_t np) {
|
||||||
|
|
||||||
|
const size_t
|
||||||
|
// total number of tuples for the problem
|
||||||
|
n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv
|
||||||
|
|
||||||
|
// all ranks should have the same number of tuples_per_rank
|
||||||
|
, tuples_per_rank = n / np + size_t(n % np != 0)
|
||||||
|
|
||||||
|
// start index for the global tuples list
|
||||||
|
, start = tuples_per_rank * rank
|
||||||
|
|
||||||
|
// end index for the global tuples list
|
||||||
|
, end = tuples_per_rank * (rank + 1)
|
||||||
|
;
|
||||||
|
|
||||||
|
LOG(1,"Atrip") << "tuples_per_rank = " << tuples_per_rank << "\n";
|
||||||
|
WITH_RANK << "start, end = " << start << ", " << end << "\n";
|
||||||
|
ABCTuples result(tuples_per_rank, FAKE_TUPLE);
|
||||||
|
|
||||||
|
for (size_t a(0), r(0), g(0); a < Nv; a++)
|
||||||
|
for (size_t b(a); b < Nv; b++)
|
||||||
|
for (size_t c(b); c < Nv; c++){
|
||||||
|
if ( a == b && b == c ) continue;
|
||||||
|
if ( start <= g && g < end) result[r++] = {a, b, c};
|
||||||
|
g++;
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ABCTuples getAllTuplesList(const size_t Nv) {
|
||||||
|
const size_t n = Nv * (Nv + 1) * (Nv + 2) / 6 - Nv;
|
||||||
|
ABCTuples result(n);
|
||||||
|
|
||||||
|
for (size_t a(0), u(0); a < Nv; a++)
|
||||||
|
for (size_t b(a); b < Nv; b++)
|
||||||
|
for (size_t c(b); c < Nv; c++){
|
||||||
|
if ( a == b && b == c ) continue;
|
||||||
|
result[u++] = {a, b, c};
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ABCTuples atrip::NaiveDistribution::getTuples(size_t Nv, MPI_Comm universe) {
|
||||||
|
int rank, np;
|
||||||
|
MPI_Comm_rank(universe, &rank);
|
||||||
|
MPI_Comm_size(universe, &np);
|
||||||
|
return getTuplesList(Nv, (size_t)rank, (size_t)np);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
namespace group_and_sort {
|
||||||
|
|
||||||
|
inline
|
||||||
|
size_t isOnNode(size_t tuple, size_t nNodes) { return tuple % nNodes; }
|
||||||
|
|
||||||
|
std::vector<size_t> getTupleNodes(ABCTuple const& t, size_t nNodes) {
|
||||||
|
std::vector<size_t>
|
||||||
|
nTuple = { isOnNode(t[0], nNodes)
|
||||||
|
, isOnNode(t[1], nNodes)
|
||||||
|
, isOnNode(t[2], nNodes)
|
||||||
|
};
|
||||||
|
return unique(nTuple);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ABCTuples specialDistribution(Info const& info, ABCTuples const& allTuples) {
|
||||||
|
|
||||||
|
ABCTuples nodeTuples;
|
||||||
|
size_t const nNodes(info.nNodes);
|
||||||
|
|
||||||
|
std::vector<ABCTuples>
|
||||||
|
container1d(nNodes)
|
||||||
|
, container2d(nNodes * nNodes)
|
||||||
|
, container3d(nNodes * nNodes * nNodes)
|
||||||
|
;
|
||||||
|
|
||||||
|
WITH_DBG if (info.nodeId == 0)
|
||||||
|
std::cout << "\tGoing through all "
|
||||||
|
<< allTuples.size()
|
||||||
|
<< " tuples in "
|
||||||
|
<< nNodes
|
||||||
|
<< " nodes\n";
|
||||||
|
|
||||||
|
// build container-n-d's
|
||||||
|
for (auto const& t: allTuples) {
|
||||||
|
// one which node(s) are the tuple elements located...
|
||||||
|
// put them into the right container
|
||||||
|
auto const _nodes = getTupleNodes(t, nNodes);
|
||||||
|
|
||||||
|
switch (_nodes.size()) {
|
||||||
|
case 1:
|
||||||
|
container1d[_nodes[0]].push_back(t);
|
||||||
|
break;
|
||||||
|
case 2:
|
||||||
|
container2d[ _nodes[0]
|
||||||
|
+ _nodes[1] * nNodes
|
||||||
|
].push_back(t);
|
||||||
|
break;
|
||||||
|
case 3:
|
||||||
|
container3d[ _nodes[0]
|
||||||
|
+ _nodes[1] * nNodes
|
||||||
|
+ _nodes[2] * nNodes * nNodes
|
||||||
|
].push_back(t);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
WITH_DBG if (info.nodeId == 0)
|
||||||
|
std::cout << "\tBuilding 1-d containers\n";
|
||||||
|
// DISTRIBUTE 1-d containers
|
||||||
|
// every tuple which is only located at one node belongs to this node
|
||||||
|
{
|
||||||
|
auto const& _tuples = container1d[info.nodeId];
|
||||||
|
nodeTuples.resize(_tuples.size(), INVALID_TUPLE);
|
||||||
|
std::copy(_tuples.begin(), _tuples.end(), nodeTuples.begin());
|
||||||
|
}
|
||||||
|
|
||||||
|
WITH_DBG if (info.nodeId == 0)
|
||||||
|
std::cout << "\tBuilding 2-d containers\n";
|
||||||
|
// DISTRIBUTE 2-d containers
|
||||||
|
//the tuples which are located at two nodes are half/half given to these nodes
|
||||||
|
for (size_t yx = 0; yx < container2d.size(); yx++) {
|
||||||
|
|
||||||
|
auto const& _tuples = container2d[yx];
|
||||||
|
const
|
||||||
|
size_t idx = yx % nNodes
|
||||||
|
// remeber: yx = idy * nNodes + idx
|
||||||
|
, idy = yx / nNodes
|
||||||
|
, n_half = _tuples.size() / 2
|
||||||
|
, size = nodeTuples.size()
|
||||||
|
;
|
||||||
|
|
||||||
|
size_t nbeg, nend;
|
||||||
|
if (info.nodeId == idx) {
|
||||||
|
nbeg = 0 * n_half;
|
||||||
|
nend = n_half;
|
||||||
|
} else if (info.nodeId == idy) {
|
||||||
|
nbeg = 1 * n_half;
|
||||||
|
nend = _tuples.size();
|
||||||
|
} else {
|
||||||
|
// either idx or idy is my node
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t const nextra = nend - nbeg;
|
||||||
|
nodeTuples.resize(size + nextra, INVALID_TUPLE);
|
||||||
|
std::copy(_tuples.begin() + nbeg,
|
||||||
|
_tuples.begin() + nend,
|
||||||
|
nodeTuples.begin() + size);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
WITH_DBG if (info.nodeId == 0)
|
||||||
|
std::cout << "\tBuilding 3-d containers\n";
|
||||||
|
// DISTRIBUTE 3-d containers
|
||||||
|
for (size_t zyx = 0; zyx < container3d.size(); zyx++) {
|
||||||
|
auto const& _tuples = container3d[zyx];
|
||||||
|
|
||||||
|
const
|
||||||
|
size_t idx = zyx % nNodes
|
||||||
|
, idy = (zyx / nNodes) % nNodes
|
||||||
|
// remember: zyx = idx + idy * nNodes + idz * nNodes^2
|
||||||
|
, idz = zyx / nNodes / nNodes
|
||||||
|
, n_third = _tuples.size() / 3
|
||||||
|
, size = nodeTuples.size()
|
||||||
|
;
|
||||||
|
|
||||||
|
size_t nbeg, nend;
|
||||||
|
if (info.nodeId == idx) {
|
||||||
|
nbeg = 0 * n_third;
|
||||||
|
nend = 1 * n_third;
|
||||||
|
} else if (info.nodeId == idy) {
|
||||||
|
nbeg = 1 * n_third;
|
||||||
|
nend = 2 * n_third;
|
||||||
|
} else if (info.nodeId == idz) {
|
||||||
|
nbeg = 2 * n_third;
|
||||||
|
nend = _tuples.size();
|
||||||
|
} else {
|
||||||
|
// either idx or idy or idz is my node
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
|
||||||
|
size_t const nextra = nend - nbeg;
|
||||||
|
nodeTuples.resize(size + nextra, INVALID_TUPLE);
|
||||||
|
std::copy(_tuples.begin() + nbeg,
|
||||||
|
_tuples.begin() + nend,
|
||||||
|
nodeTuples.begin() + size);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
WITH_DBG if (info.nodeId == 0) std::cout << "\tswapping tuples...\n";
|
||||||
|
/*
|
||||||
|
* sort part of group-and-sort algorithm
|
||||||
|
* every tuple on a given node is sorted in a way that
|
||||||
|
* the 'home elements' are the fastest index.
|
||||||
|
* 1:yyy 2:yyn(x) 3:yny(x) 4:ynn(x) 5:nyy 6:nyn(x) 7:nny 8:nnn
|
||||||
|
*/
|
||||||
|
for (auto &nt: nodeTuples){
|
||||||
|
if ( isOnNode(nt[0], nNodes) == info.nodeId ){ // 1234
|
||||||
|
if ( isOnNode(nt[2], nNodes) != info.nodeId ){ // 24
|
||||||
|
size_t const x(nt[0]);
|
||||||
|
nt[0] = nt[2]; // switch first and last
|
||||||
|
nt[2] = x;
|
||||||
|
}
|
||||||
|
else if ( isOnNode(nt[1], nNodes) != info.nodeId){ // 3
|
||||||
|
size_t const x(nt[0]);
|
||||||
|
nt[0] = nt[1]; // switch first two
|
||||||
|
nt[1] = x;
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if ( isOnNode(nt[1], nNodes) == info.nodeId // 56
|
||||||
|
&& isOnNode(nt[2], nNodes) != info.nodeId
|
||||||
|
) { // 6
|
||||||
|
size_t const x(nt[1]);
|
||||||
|
nt[1] = nt[2]; // switch last two
|
||||||
|
nt[2] = x;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
WITH_DBG if (info.nodeId == 0) std::cout << "\tsorting list of tuples...\n";
|
||||||
|
//now we sort the list of tuples
|
||||||
|
std::sort(nodeTuples.begin(), nodeTuples.end());
|
||||||
|
|
||||||
|
WITH_DBG if (info.nodeId == 0) std::cout << "\trestoring tuples...\n";
|
||||||
|
// we bring the tuples abc back in the order a<b<c
|
||||||
|
for (auto &t: nodeTuples) std::sort(t.begin(), t.end());
|
||||||
|
|
||||||
|
#if ATRIP_DEBUG > 1
|
||||||
|
WITH_DBG if (info.nodeId == 0)
|
||||||
|
std::cout << "checking for validity of " << nodeTuples.size() << std::endl;
|
||||||
|
const bool anyInvalid
|
||||||
|
= std::any_of(nodeTuples.begin(),
|
||||||
|
nodeTuples.end(),
|
||||||
|
[](ABCTuple const& t) { return t == INVALID_TUPLE; });
|
||||||
|
if (anyInvalid) throw "Some tuple is invalid in group-and-sort algorithm";
|
||||||
|
#endif
|
||||||
|
|
||||||
|
WITH_DBG if (info.nodeId == 0) std::cout << "\treturning tuples...\n";
|
||||||
|
return nodeTuples;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
std::vector<ABCTuple> main(MPI_Comm universe, size_t Nv) {
|
||||||
|
|
||||||
|
int rank, np;
|
||||||
|
MPI_Comm_rank(universe, &rank);
|
||||||
|
MPI_Comm_size(universe, &np);
|
||||||
|
|
||||||
|
std::vector<ABCTuple> result;
|
||||||
|
|
||||||
|
auto const nodeNames(getNodeNames(universe));
|
||||||
|
size_t const nNodes = unique(nodeNames).size();
|
||||||
|
auto const nodeInfos = getNodeInfos(nodeNames);
|
||||||
|
|
||||||
|
// We want to construct a communicator which only contains of one
|
||||||
|
// element per node
|
||||||
|
bool const computeDistribution
|
||||||
|
= nodeInfos[rank].localRank == 0;
|
||||||
|
|
||||||
|
std::vector<ABCTuple>
|
||||||
|
nodeTuples
|
||||||
|
= computeDistribution
|
||||||
|
? specialDistribution(Info{nNodes, nodeInfos[rank].nodeId},
|
||||||
|
getAllTuplesList(Nv))
|
||||||
|
: std::vector<ABCTuple>()
|
||||||
|
;
|
||||||
|
|
||||||
|
LOG(1,"Atrip") << "got nodeTuples\n";
|
||||||
|
|
||||||
|
// now we have to send the data from **one** rank on each node
|
||||||
|
// to all others ranks of this node
|
||||||
|
const
|
||||||
|
int color = nodeInfos[rank].nodeId,
|
||||||
|
key = nodeInfos[rank].localRank
|
||||||
|
;
|
||||||
|
|
||||||
|
|
||||||
|
MPI_Comm INTRA_COMM;
|
||||||
|
MPI_Comm_split(universe, color, key, &INTRA_COMM);
|
||||||
|
// Main:1 ends here
|
||||||
|
|
||||||
|
// [[file:~/cuda/atrip/atrip.org::*Main][Main:2]]
|
||||||
|
size_t const
|
||||||
|
tuplesPerRankLocal
|
||||||
|
= nodeTuples.size() / nodeInfos[rank].ranksPerNode
|
||||||
|
+ size_t(nodeTuples.size() % nodeInfos[rank].ranksPerNode != 0)
|
||||||
|
;
|
||||||
|
|
||||||
|
size_t tuplesPerRankGlobal;
|
||||||
|
|
||||||
|
MPI_Reduce(&tuplesPerRankLocal,
|
||||||
|
&tuplesPerRankGlobal,
|
||||||
|
1,
|
||||||
|
MPI_UINT64_T,
|
||||||
|
MPI_MAX,
|
||||||
|
0,
|
||||||
|
universe);
|
||||||
|
|
||||||
|
MPI_Bcast(&tuplesPerRankGlobal,
|
||||||
|
1,
|
||||||
|
MPI_UINT64_T,
|
||||||
|
0,
|
||||||
|
universe);
|
||||||
|
|
||||||
|
LOG(1,"Atrip") << "Tuples per rank: " << tuplesPerRankGlobal << "\n";
|
||||||
|
LOG(1,"Atrip") << "ranks per node " << nodeInfos[rank].ranksPerNode << "\n";
|
||||||
|
LOG(1,"Atrip") << "#nodes " << nNodes << "\n";
|
||||||
|
// Main:2 ends here
|
||||||
|
|
||||||
|
// [[file:~/cuda/atrip/atrip.org::*Main][Main:3]]
|
||||||
|
size_t const totalTuples
|
||||||
|
= tuplesPerRankGlobal * nodeInfos[rank].ranksPerNode;
|
||||||
|
|
||||||
|
if (computeDistribution) {
|
||||||
|
// pad with FAKE_TUPLEs
|
||||||
|
nodeTuples.insert(nodeTuples.end(),
|
||||||
|
totalTuples - nodeTuples.size(),
|
||||||
|
FAKE_TUPLE);
|
||||||
|
}
|
||||||
|
// Main:3 ends here
|
||||||
|
|
||||||
|
// [[file:~/cuda/atrip/atrip.org::*Main][Main:4]]
|
||||||
|
{
|
||||||
|
// construct mpi type for abctuple
|
||||||
|
MPI_Datatype MPI_ABCTUPLE;
|
||||||
|
MPI_Type_vector(nodeTuples[0].size(), 1, 1, MPI_UINT64_T, &MPI_ABCTUPLE);
|
||||||
|
MPI_Type_commit(&MPI_ABCTUPLE);
|
||||||
|
|
||||||
|
LOG(1,"Atrip") << "scattering tuples \n";
|
||||||
|
|
||||||
|
result.resize(tuplesPerRankGlobal);
|
||||||
|
MPI_Scatter(nodeTuples.data(),
|
||||||
|
tuplesPerRankGlobal,
|
||||||
|
MPI_ABCTUPLE,
|
||||||
|
result.data(),
|
||||||
|
tuplesPerRankGlobal,
|
||||||
|
MPI_ABCTUPLE,
|
||||||
|
0,
|
||||||
|
INTRA_COMM);
|
||||||
|
|
||||||
|
MPI_Type_free(&MPI_ABCTUPLE);
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
return result;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ABCTuples Distribution::getTuples(size_t Nv, MPI_Comm universe) {
|
||||||
|
return main(universe, Nv);
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
} // namespace group_and_sort
|
||||||
|
} // namespace atrip
|
||||||
183
tools/configure-benches.sh
Executable file
183
tools/configure-benches.sh
Executable file
@ -0,0 +1,183 @@
|
|||||||
|
#!/usr/bin/env bash
|
||||||
|
# Copyright (C) 2022 by Alejandro Gallo <aamsgallo@gmail.com>
|
||||||
|
|
||||||
|
set -eu
|
||||||
|
|
||||||
|
flags=("${@}")
|
||||||
|
PROJECTS=()
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
#
|
||||||
|
## Check root directory
|
||||||
|
#
|
||||||
|
root_project=$(git rev-parse --show-toplevel)
|
||||||
|
configure=$root_project/configure
|
||||||
|
if [[ $(basename $PWD) == $(basename $root_project) ]]; then
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
|
You are trying to build in the root directory, create a build folder
|
||||||
|
and then configure.
|
||||||
|
|
||||||
|
mkdir build
|
||||||
|
cd build
|
||||||
|
$(readlink -f $0)
|
||||||
|
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
fi
|
||||||
|
|
||||||
|
[[ -f $configure ]] || {
|
||||||
|
cat <<EOF
|
||||||
|
No configure script at $configure create it with bootstrap.sh or
|
||||||
|
|
||||||
|
autoreconf -vif
|
||||||
|
|
||||||
|
EOF
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
#
|
||||||
|
## Create configuration function
|
||||||
|
#
|
||||||
|
|
||||||
|
create_config () {
|
||||||
|
file=$1
|
||||||
|
name=$2
|
||||||
|
PROJECTS=(${PROJECTS[@]} "$name")
|
||||||
|
mkdir -p $name
|
||||||
|
cd $name
|
||||||
|
echo "> creating: $name"
|
||||||
|
cat <<SH > configure
|
||||||
|
#!/usr/bin/env bash
|
||||||
|
# creator: $0
|
||||||
|
# date: $(date)
|
||||||
|
|
||||||
|
$root_project/configure $(cat $file | paste -s) \\
|
||||||
|
$(for word in "${flags[@]}"; do
|
||||||
|
printf " \"%s\"" "$word";
|
||||||
|
done)
|
||||||
|
|
||||||
|
|
||||||
|
exit 0
|
||||||
|
SH
|
||||||
|
chmod +x configure
|
||||||
|
cd - > /dev/null
|
||||||
|
}
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
# begin doc
|
||||||
|
#
|
||||||
|
# - default ::
|
||||||
|
# This configuration uses a CPU code with dgemm
|
||||||
|
# and without computing slices.
|
||||||
|
#
|
||||||
|
# end doc
|
||||||
|
|
||||||
|
tmp=`mktemp`
|
||||||
|
cat <<EOF > $tmp
|
||||||
|
--disable-slice
|
||||||
|
EOF
|
||||||
|
|
||||||
|
create_config $tmp default
|
||||||
|
rm $tmp
|
||||||
|
|
||||||
|
# begin doc
|
||||||
|
#
|
||||||
|
# - only-dgemm ::
|
||||||
|
# This only runs the computation part that involves dgemms.
|
||||||
|
#
|
||||||
|
# end doc
|
||||||
|
|
||||||
|
tmp=`mktemp`
|
||||||
|
cat <<EOF > $tmp
|
||||||
|
--disable-slice
|
||||||
|
--enable-only-dgemm
|
||||||
|
EOF
|
||||||
|
|
||||||
|
create_config $tmp only-dgemm
|
||||||
|
rm $tmp
|
||||||
|
|
||||||
|
# begin doc
|
||||||
|
#
|
||||||
|
# - cuda-only-dgemm ::
|
||||||
|
# This is the naive CUDA implementation compiling only the dgemm parts
|
||||||
|
# of the compute.
|
||||||
|
#
|
||||||
|
# end doc
|
||||||
|
|
||||||
|
tmp=`mktemp`
|
||||||
|
cat <<EOF > $tmp
|
||||||
|
--enable-cuda
|
||||||
|
--enable-only-dgemm
|
||||||
|
--disable-slice
|
||||||
|
EOF
|
||||||
|
|
||||||
|
create_config $tmp cuda-only-dgemm
|
||||||
|
rm $tmp
|
||||||
|
|
||||||
|
# begin doc
|
||||||
|
#
|
||||||
|
# - cuda-slices-on-gpu-only-dgemm ::
|
||||||
|
# This configuration tests that slices reside completely on the gpu
|
||||||
|
# and it should use a CUDA aware MPI implementation.
|
||||||
|
# It also only uses the routines that involve dgemm.
|
||||||
|
#
|
||||||
|
# end doc
|
||||||
|
|
||||||
|
tmp=`mktemp`
|
||||||
|
cat <<EOF > $tmp
|
||||||
|
--enable-cuda
|
||||||
|
--enable-sources-in-gpu
|
||||||
|
--enable-cuda-aware-mpi
|
||||||
|
--enable-only-dgemm
|
||||||
|
--disable-slice
|
||||||
|
EOF
|
||||||
|
|
||||||
|
create_config $tmp cuda-slices-on-gpu-only-dgemm
|
||||||
|
rm $tmp
|
||||||
|
|
||||||
|
############################################################
|
||||||
|
#
|
||||||
|
## Create makefile
|
||||||
|
#
|
||||||
|
|
||||||
|
cat <<MAKE > Makefile
|
||||||
|
|
||||||
|
all: configure do
|
||||||
|
do: configure
|
||||||
|
|
||||||
|
configure: ${PROJECTS[@]/%/\/Makefile}
|
||||||
|
|
||||||
|
%/Makefile: %/configure
|
||||||
|
cd \$* && ./configure
|
||||||
|
|
||||||
|
do: ${PROJECTS[@]/%/\/src\/libatrip.a}
|
||||||
|
|
||||||
|
|
||||||
|
%/src/libatrip.a:
|
||||||
|
cd \$* && \$(MAKE)
|
||||||
|
|
||||||
|
|
||||||
|
.PHONY: configure do all
|
||||||
|
MAKE
|
||||||
|
|
||||||
|
cat <<EOF
|
||||||
|
|
||||||
|
Now you can do
|
||||||
|
|
||||||
|
make all
|
||||||
|
|
||||||
|
or go into one of the directories
|
||||||
|
${PROJECTS[@]}
|
||||||
|
and do
|
||||||
|
./configure
|
||||||
|
make
|
||||||
|
|
||||||
|
EOF
|
||||||
|
|
||||||
|
## Emacs stuff
|
||||||
|
# Local Variables:
|
||||||
|
# eval: (outline-minor-mode)
|
||||||
|
# outline-regexp: "############################################################"
|
||||||
|
# End:
|
||||||
Loading…
Reference in New Issue
Block a user