Compare commits
No commits in common. "cuda" and "newtuples" have entirely different histories.
2
.github/workflows/main.yml
vendored
2
.github/workflows/main.yml
vendored
@ -2,8 +2,6 @@
|
|||||||
name: CI
|
name: CI
|
||||||
|
|
||||||
on:
|
on:
|
||||||
push:
|
|
||||||
branches: [ master, cuda ]
|
|
||||||
pull_request:
|
pull_request:
|
||||||
branches: [ master, cuda ]
|
branches: [ master, cuda ]
|
||||||
|
|
||||||
|
|||||||
47
README.org
47
README.org
@ -69,10 +69,10 @@ And then you can see the =configure= options
|
|||||||
../../configure --help
|
../../configure --help
|
||||||
#+end_src
|
#+end_src
|
||||||
|
|
||||||
** Benches
|
** Benchmarks
|
||||||
|
|
||||||
The script =tools/configure-benches.sh= can be used to create
|
The script =tools/configure-benches.sh= can be used to create
|
||||||
a couple of configurations for benches:
|
a couple of configurations for benchmarks:
|
||||||
|
|
||||||
#+begin_src sh :exports results :results verbatim org :results verbatim drawer replace output
|
#+begin_src sh :exports results :results verbatim org :results verbatim drawer replace output
|
||||||
awk '/begin +doc/,/end +doc/ { print $NL }' tools/configure-benches.sh |
|
awk '/begin +doc/,/end +doc/ { print $NL }' tools/configure-benches.sh |
|
||||||
@ -87,49 +87,8 @@ sed "s/^# //; s/^# *$//; /^$/d"
|
|||||||
and without computing slices.
|
and without computing slices.
|
||||||
- only-dgemm ::
|
- only-dgemm ::
|
||||||
This only runs the computation part that involves dgemms.
|
This only runs the computation part that involves dgemms.
|
||||||
- cuda-only-dgemm ::
|
- slices-on-gpu-only-dgemm ::
|
||||||
This is the naive CUDA implementation compiling only the dgemm parts
|
|
||||||
of the compute.
|
|
||||||
- cuda-slices-on-gpu-only-dgemm ::
|
|
||||||
This configuration tests that slices reside completely on the gpu
|
This configuration tests that slices reside completely on the gpu
|
||||||
and it should use a CUDA aware MPI implementation.
|
and it should use a CUDA aware MPI implementation.
|
||||||
It also only uses the routines that involve dgemm.
|
It also only uses the routines that involve dgemm.
|
||||||
:end:
|
:end:
|
||||||
|
|
||||||
In order to generate the benches just create a suitable directory for it
|
|
||||||
|
|
||||||
#+begin_src sh :eval no
|
|
||||||
mkdir -p build/benches
|
|
||||||
cd buid/benches
|
|
||||||
../../tools/configure-benches.sh CXX=g++ ...
|
|
||||||
#+end_src
|
|
||||||
|
|
||||||
and you will get a Makefile together with several project folders.
|
|
||||||
You can either configure all projects with =make all= or
|
|
||||||
then go in each folder.
|
|
||||||
|
|
||||||
Notice that you can give a path for ctf for all of them by doing
|
|
||||||
#+begin_src sh :eval no
|
|
||||||
../../tools/configure-benches.sh --with-ctf=/absolute/path/to/ctf
|
|
||||||
#+end_src
|
|
||||||
|
|
||||||
* Running benches
|
|
||||||
|
|
||||||
** Main benchmark
|
|
||||||
|
|
||||||
The main benchmark gets built in =bench/atrip= and is used to run an
|
|
||||||
atrip run with random tensors.
|
|
||||||
|
|
||||||
A common run of this script will be the following
|
|
||||||
|
|
||||||
#+begin_src sh
|
|
||||||
bench/atrip \
|
|
||||||
--no 100 \
|
|
||||||
--nv 1000 \
|
|
||||||
--mod 1 \
|
|
||||||
--% 0 \
|
|
||||||
--dist group \
|
|
||||||
--nocheckpoint \
|
|
||||||
--max-iterations 1000
|
|
||||||
#+end_src
|
|
||||||
|
|
||||||
|
|||||||
@ -5,20 +5,18 @@
|
|||||||
#include <CLI11.hpp>
|
#include <CLI11.hpp>
|
||||||
|
|
||||||
#define _print_size(what, size) \
|
#define _print_size(what, size) \
|
||||||
do { \
|
|
||||||
if (rank == 0) { \
|
if (rank == 0) { \
|
||||||
std::cout << #what \
|
std::cout << #what \
|
||||||
<< " => " \
|
<< " => " \
|
||||||
<< (double)size * elem_to_gb \
|
<< (double)size * elem_to_gb \
|
||||||
<< "GB" \
|
<< "GB" \
|
||||||
<< std::endl; \
|
<< std::endl; \
|
||||||
} \
|
}
|
||||||
} while (0)
|
|
||||||
|
|
||||||
int main(int argc, char** argv) {
|
int main(int argc, char** argv) {
|
||||||
MPI_Init(&argc, &argv);
|
MPI_Init(&argc, &argv);
|
||||||
|
|
||||||
size_t checkpoint_it, max_iterations;
|
size_t checkpoint_it;
|
||||||
int no(10), nv(100), itMod(-1), percentageMod(10);
|
int no(10), nv(100), itMod(-1), percentageMod(10);
|
||||||
float checkpoint_percentage;
|
float checkpoint_percentage;
|
||||||
bool
|
bool
|
||||||
@ -32,9 +30,6 @@ int main(int argc, char** argv) {
|
|||||||
app.add_option("--no", no, "Occupied orbitals");
|
app.add_option("--no", no, "Occupied orbitals");
|
||||||
app.add_option("--nv", nv, "Virtual orbitals");
|
app.add_option("--nv", nv, "Virtual orbitals");
|
||||||
app.add_option("--mod", itMod, "Iteration modifier");
|
app.add_option("--mod", itMod, "Iteration modifier");
|
||||||
app.add_option("--max-iterations",
|
|
||||||
max_iterations,
|
|
||||||
"Maximum number of iterations to run");
|
|
||||||
app.add_flag("--keep-vppph", keepVppph, "Do not delete Vppph");
|
app.add_flag("--keep-vppph", keepVppph, "Do not delete Vppph");
|
||||||
app.add_flag("--nochrono", nochrono, "Do not print chrono");
|
app.add_flag("--nochrono", nochrono, "Do not print chrono");
|
||||||
app.add_flag("--rank-round-robin", rankRoundRobin, "Do rank round robin");
|
app.add_flag("--rank-round-robin", rankRoundRobin, "Do rank round robin");
|
||||||
@ -50,19 +45,6 @@ int main(int argc, char** argv) {
|
|||||||
checkpoint_percentage,
|
checkpoint_percentage,
|
||||||
"Percentage for checkpoints");
|
"Percentage for checkpoints");
|
||||||
|
|
||||||
// Optional tensor files
|
|
||||||
std::string
|
|
||||||
ei_path, ea_path,
|
|
||||||
Tph_path, Tpphh_path,
|
|
||||||
Vpphh_path, Vhhhp_path, Vppph_path;
|
|
||||||
app.add_option("--ei", ei_path, "Path for ei");
|
|
||||||
app.add_option("--ea", ea_path, "Path for ea");
|
|
||||||
app.add_option("--Tpphh", Tpphh_path, "Path for Tpphh");
|
|
||||||
app.add_option("--Tph", Tph_path, "Path for Tph");
|
|
||||||
app.add_option("--Vpphh", Vpphh_path, "Path for Vpphh");
|
|
||||||
app.add_option("--Vhhhp", Vhhhp_path, "Path for Vhhhp");
|
|
||||||
app.add_option("--Vppph", Vppph_path, "Path for Vppph");
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
size_t ooo_threads = 0, ooo_blocks = 0;
|
size_t ooo_threads = 0, ooo_blocks = 0;
|
||||||
app.add_option("--ooo-blocks",
|
app.add_option("--ooo-blocks",
|
||||||
@ -166,64 +148,37 @@ int main(int argc, char** argv) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
std::vector<int>
|
std::vector<int> symmetries(4, NS)
|
||||||
symmetries(4, NS),
|
, vo({nv, no})
|
||||||
vo({nv, no}),
|
, vvoo({nv, nv, no, no})
|
||||||
vvoo({nv, nv, no, no}),
|
, ooov({no, no, no, nv})
|
||||||
ooov({no, no, no, nv}),
|
, vvvo({nv, nv, nv, no})
|
||||||
vvvo({nv, nv, nv, no});
|
;
|
||||||
|
|
||||||
CTF::Tensor<double>
|
CTF::Tensor<double>
|
||||||
ei(1, ooov.data(), symmetries.data(), world),
|
ei(1, ooov.data(), symmetries.data(), world)
|
||||||
ea(1, vo.data(), symmetries.data(), world),
|
, ea(1, vo.data(), symmetries.data(), world)
|
||||||
Tph(2, vo.data(), symmetries.data(), world),
|
, Tph(2, vo.data(), symmetries.data(), world)
|
||||||
Tpphh(4, vvoo.data(), symmetries.data(), world),
|
, Tpphh(4, vvoo.data(), symmetries.data(), world)
|
||||||
Vpphh(4, vvoo.data(), symmetries.data(), world),
|
, Vpphh(4, vvoo.data(), symmetries.data(), world)
|
||||||
Vhhhp(4, ooov.data(), symmetries.data(), world);
|
, Vhhhp(4, ooov.data(), symmetries.data(), world)
|
||||||
|
;
|
||||||
|
|
||||||
// initialize deletable tensors in heap
|
// initialize deletable tensors in heap
|
||||||
auto Vppph
|
auto Vppph
|
||||||
= new CTF::Tensor<double>(4, vvvo.data(), symmetries.data(), world);
|
= new CTF::Tensor<double>(4, vvvo.data(), symmetries.data(), world);
|
||||||
|
|
||||||
_print_size(Vabci, no*nv*nv*nv);
|
_print_size(Vabci, no*nv*nv*nv)
|
||||||
_print_size(Vabij, no*no*nv*nv);
|
_print_size(Vabij, no*no*nv*nv)
|
||||||
_print_size(Vijka, no*no*no*nv);
|
_print_size(Vijka, no*no*no*nv)
|
||||||
|
|
||||||
if (ei_path.size()) {
|
|
||||||
ei.read_dense_from_file(ei_path.c_str());
|
|
||||||
} else {
|
|
||||||
ei.fill_random(-40.0, -2);
|
ei.fill_random(-40.0, -2);
|
||||||
}
|
|
||||||
if (ea_path.size()) {
|
|
||||||
ea.read_dense_from_file(ea_path.c_str());
|
|
||||||
} else {
|
|
||||||
ea.fill_random(2, 50);
|
ea.fill_random(2, 50);
|
||||||
}
|
|
||||||
if (Tpphh_path.size()) {
|
|
||||||
Tpphh.read_dense_from_file(Tpphh_path.c_str());
|
|
||||||
} else {
|
|
||||||
Tpphh.fill_random(0, 1);
|
Tpphh.fill_random(0, 1);
|
||||||
}
|
|
||||||
if (Tph_path.size()) {
|
|
||||||
Tph.read_dense_from_file(Tph_path.c_str());
|
|
||||||
} else {
|
|
||||||
Tph.fill_random(0, 1);
|
Tph.fill_random(0, 1);
|
||||||
}
|
|
||||||
if (Vpphh_path.size()) {
|
|
||||||
Vpphh.read_dense_from_file(Vpphh_path.c_str());
|
|
||||||
} else {
|
|
||||||
Vpphh.fill_random(0, 1);
|
Vpphh.fill_random(0, 1);
|
||||||
}
|
|
||||||
if (Vhhhp_path.size()) {
|
|
||||||
Vhhhp.read_dense_from_file(Vhhhp_path.c_str());
|
|
||||||
} else {
|
|
||||||
Vhhhp.fill_random(0, 1);
|
Vhhhp.fill_random(0, 1);
|
||||||
}
|
|
||||||
if (Vppph_path.size()) {
|
|
||||||
Vppph->read_dense_from_file(Vppph_path.c_str());
|
|
||||||
} else {
|
|
||||||
Vppph->fill_random(0, 1);
|
Vppph->fill_random(0, 1);
|
||||||
}
|
|
||||||
|
|
||||||
atrip::Atrip::init(MPI_COMM_WORLD);
|
atrip::Atrip::init(MPI_COMM_WORLD);
|
||||||
const auto in
|
const auto in
|
||||||
@ -244,7 +199,6 @@ int main(int argc, char** argv) {
|
|||||||
.with_iterationMod(itMod)
|
.with_iterationMod(itMod)
|
||||||
.with_percentageMod(percentageMod)
|
.with_percentageMod(percentageMod)
|
||||||
.with_tuplesDistribution(tuplesDistribution)
|
.with_tuplesDistribution(tuplesDistribution)
|
||||||
.with_maxIterations(max_iterations)
|
|
||||||
// checkpoint options
|
// checkpoint options
|
||||||
.with_checkpointAtEveryIteration(checkpoint_it)
|
.with_checkpointAtEveryIteration(checkpoint_it)
|
||||||
.with_checkpointAtPercentage(checkpoint_percentage)
|
.with_checkpointAtPercentage(checkpoint_percentage)
|
||||||
|
|||||||
@ -164,7 +164,8 @@ AC_TYPE_SIZE_T
|
|||||||
dnl -----------------------------------------------------------------------
|
dnl -----------------------------------------------------------------------
|
||||||
dnl CHECK CTF
|
dnl CHECK CTF
|
||||||
if test xYES = x${BUILD_CTF}; then
|
if test xYES = x${BUILD_CTF}; then
|
||||||
AC_MSG_WARN([You will have to do make ctf before building the project.])
|
AC_MSG_WARN([Sorry, building CTF not supported yet provide a build path
|
||||||
|
with --with-ctf=path/to/ctf/installation])
|
||||||
else
|
else
|
||||||
CPPFLAGS="$CPPFLAGS -I${LIBCTF_CPATH}"
|
CPPFLAGS="$CPPFLAGS -I${LIBCTF_CPATH}"
|
||||||
LDFLAGS="$LDFLAGS -L${LIBCTF_LD_LIBRARY_PATH} -lctf"
|
LDFLAGS="$LDFLAGS -L${LIBCTF_LD_LIBRARY_PATH} -lctf"
|
||||||
|
|||||||
56
etc/env/raven/cuda
vendored
56
etc/env/raven/cuda
vendored
@ -1,56 +0,0 @@
|
|||||||
mods=(
|
|
||||||
cuda/11.6
|
|
||||||
intel/19.1.2
|
|
||||||
mkl/2020.4
|
|
||||||
impi/2019.8
|
|
||||||
autoconf/2.69
|
|
||||||
automake/1.15
|
|
||||||
libtool/2.4.6
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
module purge
|
|
||||||
module load ${mods[@]}
|
|
||||||
LIB_PATH="${CUDA_HOME}/lib64"
|
|
||||||
export CUDA_ROOT=${CUDA_HOME}
|
|
||||||
export CUDA_LDFLAGS="-L${LIB_PATH} -lcuda -L${LIB_PATH} -lcudart -L${LIB_PATH} -lcublas"
|
|
||||||
export CUDA_CXXFLAGS="-I${CUDA_HOME}/include"
|
|
||||||
|
|
||||||
export LD_LIBRARY_PATH="${MKL_HOME}/lib/intel64_lin:${LD_LIBRARY_PATH}"
|
|
||||||
|
|
||||||
BLAS_STATIC_PATH="$MKL_HOME/lib/intel64/libmkl_intel_lp64.a"
|
|
||||||
|
|
||||||
ls ${LIB_PATH}/libcublas.so
|
|
||||||
ls ${LIB_PATH}/libcudart.so
|
|
||||||
|
|
||||||
cat <<EOF
|
|
||||||
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
info
|
|
||||||
////////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
|
|
||||||
MKL_HOME = $MKL_HOME
|
|
||||||
BLAS_STATIC_PATH = $BLAS_STATIC_PATH
|
|
||||||
|
|
||||||
CUDA_ROOT = ${CUDA_HOME}
|
|
||||||
CUDA_LDFLAGS = "-L${LIB_PATH} -lcuda -L${LIB_PATH} -lcudart -L${LIB_PATH} -lcublas"
|
|
||||||
CUDA_CXXFLAGS = "-I${CUDA_HOME}/include"
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
Consider now runnng the following
|
|
||||||
|
|
||||||
../configure \\
|
|
||||||
--enable-cuda \\
|
|
||||||
--disable-slice \\
|
|
||||||
--with-blas="-L\$MKL_HOME/lib/intel64/ -lmkl_intel_lp64 -mkl" \\
|
|
||||||
CXX=mpiicpc \\
|
|
||||||
CC=mpiicc \\
|
|
||||||
MPICXX=mpiicpc
|
|
||||||
|
|
||||||
|
|
||||||
EOF
|
|
||||||
|
|
||||||
|
|
||||||
return
|
|
||||||
@ -86,7 +86,7 @@ namespace atrip {
|
|||||||
ADD_ATTRIBUTE(bool, rankRoundRobin, false)
|
ADD_ATTRIBUTE(bool, rankRoundRobin, false)
|
||||||
ADD_ATTRIBUTE(bool, chrono, false)
|
ADD_ATTRIBUTE(bool, chrono, false)
|
||||||
ADD_ATTRIBUTE(bool, barrier, false)
|
ADD_ATTRIBUTE(bool, barrier, false)
|
||||||
ADD_ATTRIBUTE(size_t, maxIterations, 0)
|
ADD_ATTRIBUTE(int, maxIterations, 0)
|
||||||
ADD_ATTRIBUTE(int, iterationMod, -1)
|
ADD_ATTRIBUTE(int, iterationMod, -1)
|
||||||
ADD_ATTRIBUTE(int, percentageMod, -1)
|
ADD_ATTRIBUTE(int, percentageMod, -1)
|
||||||
ADD_ATTRIBUTE(TuplesDistribution, tuplesDistribution, NAIVE)
|
ADD_ATTRIBUTE(TuplesDistribution, tuplesDistribution, NAIVE)
|
||||||
|
|||||||
@ -11,22 +11,11 @@
|
|||||||
#if defined(HAVE_CUDA) && defined(__CUDACC__)
|
#if defined(HAVE_CUDA) && defined(__CUDACC__)
|
||||||
# define __MAYBE_GLOBAL__ __global__
|
# define __MAYBE_GLOBAL__ __global__
|
||||||
# define __MAYBE_DEVICE__ __device__
|
# define __MAYBE_DEVICE__ __device__
|
||||||
# define __MAYBE_HOST__ __host__
|
|
||||||
# define __INLINE__ __inline__
|
|
||||||
#else
|
#else
|
||||||
# define __MAYBE_GLOBAL__
|
# define __MAYBE_GLOBAL__
|
||||||
# define __MAYBE_DEVICE__
|
# define __MAYBE_DEVICE__
|
||||||
# define __MAYBE_HOST__
|
|
||||||
# define __INLINE__ inline
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
#define ACC_FUNCALL(fname, i, j, ...) fname<<<(i), (j)>>>(__VA_ARGS__)
|
|
||||||
#else
|
|
||||||
#define ACC_FUNCALL(fname, i, j, ...) fname(__VA_ARGS__)
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
|
|
||||||
#define _CHECK_CUDA_SUCCESS(message, ...) \
|
#define _CHECK_CUDA_SUCCESS(message, ...) \
|
||||||
do { \
|
do { \
|
||||||
CUresult result = __VA_ARGS__; \
|
CUresult result = __VA_ARGS__; \
|
||||||
|
|||||||
@ -23,8 +23,6 @@
|
|||||||
#include<thrust/device_vector.h>
|
#include<thrust/device_vector.h>
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#include<atrip/CUDA.hpp>
|
|
||||||
|
|
||||||
|
|
||||||
namespace atrip {
|
namespace atrip {
|
||||||
using ABCTuple = std::array<size_t, 3>;
|
using ABCTuple = std::array<size_t, 3>;
|
||||||
@ -34,25 +32,21 @@ using ABCTuples = std::vector<ABCTuple>;
|
|||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:1]]
|
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:1]]
|
||||||
template <typename F=double>
|
template <typename F=double>
|
||||||
__MAYBE_GLOBAL__
|
double getEnergyDistinct
|
||||||
void getEnergyDistinct
|
|
||||||
( F const epsabc
|
( F const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, F* const epsi
|
, F* const epsi
|
||||||
, F* const Tijk
|
, F* const Tijk
|
||||||
, F* const Zijk
|
, F* const Zijk
|
||||||
, double* energy
|
|
||||||
);
|
);
|
||||||
|
|
||||||
template <typename F=double>
|
template <typename F=double>
|
||||||
__MAYBE_GLOBAL__
|
double getEnergySame
|
||||||
void getEnergySame
|
|
||||||
( F const epsabc
|
( F const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, F* const epsi
|
, F* const epsi
|
||||||
, F* const Tijk
|
, F* const Tijk
|
||||||
, F* const Zijk
|
, F* const Zijk
|
||||||
, double* energy
|
|
||||||
);
|
);
|
||||||
// Energy:1 ends here
|
// Energy:1 ends here
|
||||||
|
|
||||||
@ -103,11 +97,6 @@ void singlesContribution
|
|||||||
// -- TIJK
|
// -- TIJK
|
||||||
// , DataPtr<F> Tijk
|
// , DataPtr<F> Tijk
|
||||||
, DataFieldType<F>* Tijk_
|
, DataFieldType<F>* Tijk_
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
// -- tmp buffers
|
|
||||||
, DataFieldType<F>* _t_buffer
|
|
||||||
, DataFieldType<F>* _vhhh
|
|
||||||
#endif
|
|
||||||
);
|
);
|
||||||
// Doubles contribution:1 ends here
|
// Doubles contribution:1 ends here
|
||||||
|
|
||||||
|
|||||||
@ -1,171 +0,0 @@
|
|||||||
// Copyright 2022 Alejandro Gallo
|
|
||||||
//
|
|
||||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
// you may not use this file except in compliance with the License.
|
|
||||||
// You may obtain a copy of the License at
|
|
||||||
//
|
|
||||||
// http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
//
|
|
||||||
// Unless required by applicable law or agreed to in writing, software
|
|
||||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
// See the License for the specific language governing permissions and
|
|
||||||
// limitations under the License.
|
|
||||||
|
|
||||||
#ifndef OPERATIONS_HPP_
|
|
||||||
#define OPERATIONS_HPP_
|
|
||||||
|
|
||||||
#include <atrip/CUDA.hpp>
|
|
||||||
#include <atrip/Types.hpp>
|
|
||||||
#include <atrip/Complex.hpp>
|
|
||||||
|
|
||||||
namespace atrip {
|
|
||||||
namespace acc {
|
|
||||||
|
|
||||||
// cuda kernels
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_GLOBAL__
|
|
||||||
void zeroing(F* a, size_t n) {
|
|
||||||
F zero = {0};
|
|
||||||
for (size_t i = 0; i < n; i++) {
|
|
||||||
a[i] = zero;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
////
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
F maybeConjugateScalar(const F &a) { return a; }
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
template <>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
cuDoubleComplex maybeConjugateScalar(const cuDoubleComplex &a) {
|
|
||||||
return {a.x, -a.y};
|
|
||||||
}
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_GLOBAL__
|
|
||||||
void maybeConjugate(F* to, F* from, size_t n) {
|
|
||||||
for (size_t i = 0; i < n; ++i) {
|
|
||||||
to[i] = maybeConjugateScalar<F>(from[i]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__
|
|
||||||
void reorder(F* to, F* from, size_t size, size_t I, size_t J, size_t K) {
|
|
||||||
size_t idx = 0;
|
|
||||||
const size_t IDX = I + J*size + K*size*size;
|
|
||||||
for (size_t k = 0; k < size; k++)
|
|
||||||
for (size_t j = 0; j < size; j++)
|
|
||||||
for (size_t i = 0; i < size; i++, idx++)
|
|
||||||
to[idx] += from[IDX];
|
|
||||||
}
|
|
||||||
|
|
||||||
// Multiplication operation
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
F prod(const F &a, const F &b) { return a * b; }
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
template <>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
cuDoubleComplex prod(const cuDoubleComplex &a, const cuDoubleComplex &b) {
|
|
||||||
return cuCmul(a, b);
|
|
||||||
}
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
// Division operation
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
F div(const F &a, const F &b) { return a / b; }
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
template <>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
cuDoubleComplex div(const cuDoubleComplex &a, const cuDoubleComplex &b) {
|
|
||||||
return cuCdiv(a, b);
|
|
||||||
}
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
// Real part
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_HOST__ __INLINE__
|
|
||||||
double real(F &a) { return std::real(a); }
|
|
||||||
|
|
||||||
template <>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
double real(double &a) {
|
|
||||||
return a;
|
|
||||||
}
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
template <>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
double real(cuDoubleComplex &a) {
|
|
||||||
return cuCreal(a);
|
|
||||||
}
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
// Substraction operator
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
F sub(const F &a, const F &b) { return a - b; }
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
template <>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
cuDoubleComplex sub(const cuDoubleComplex &a,
|
|
||||||
const cuDoubleComplex &b) {
|
|
||||||
return cuCsub(a, b);
|
|
||||||
}
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
// Addition operator
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
F add(const F &a, const F &b) { return a + b; }
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
template <>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__ __INLINE__
|
|
||||||
cuDoubleComplex add(const cuDoubleComplex &a, const cuDoubleComplex &b) {
|
|
||||||
return cuCadd(a, b);
|
|
||||||
}
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
// Sum in place operator
|
|
||||||
//////////////////////////////////////////////////////////////////////////////
|
|
||||||
|
|
||||||
template <typename F>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__
|
|
||||||
void sum_in_place(F* to, const F* from) { *to += *from; }
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
template <>
|
|
||||||
__MAYBE_DEVICE__ __MAYBE_HOST__
|
|
||||||
void sum_in_place(cuDoubleComplex* to, const cuDoubleComplex* from) {
|
|
||||||
to->x += from->x;
|
|
||||||
to->y += from->y;
|
|
||||||
}
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
|
|
||||||
} // namespace acc
|
|
||||||
} // namespace atrip
|
|
||||||
|
|
||||||
#endif
|
|
||||||
@ -352,7 +352,7 @@ Info info;
|
|||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Attributes][Attributes:2]]
|
// [[file:~/cuda/atrip/atrip.org::*Attributes][Attributes:2]]
|
||||||
DataPtr<F> data;
|
DataPtr<F> data;
|
||||||
#if defined(HAVE_CUDA) && !defined (ATRIP_SOURCES_IN_GPU)
|
#if defined(HAVE_CUDA)
|
||||||
F* mpi_data;
|
F* mpi_data;
|
||||||
#endif
|
#endif
|
||||||
// Attributes:2 ends here
|
// Attributes:2 ends here
|
||||||
@ -456,7 +456,7 @@ void unwrapAndMarkReady() {
|
|||||||
if (errorCode != MPI_SUCCESS)
|
if (errorCode != MPI_SUCCESS)
|
||||||
throw "Atrip: Unexpected error MPI ERROR";
|
throw "Atrip: Unexpected error MPI ERROR";
|
||||||
|
|
||||||
#if defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
|
#if defined(HAVE_CUDA)
|
||||||
// copy the retrieved mpi data to the device
|
// copy the retrieved mpi data to the device
|
||||||
WITH_CHRONO("cuda:memcpy",
|
WITH_CHRONO("cuda:memcpy",
|
||||||
_CHECK_CUDA_SUCCESS("copying mpi data to device",
|
_CHECK_CUDA_SUCCESS("copying mpi data to device",
|
||||||
@ -488,7 +488,7 @@ void unwrapAndMarkReady() {
|
|||||||
Slice(size_t size_)
|
Slice(size_t size_)
|
||||||
: info({})
|
: info({})
|
||||||
, data(DataNullPtr)
|
, data(DataNullPtr)
|
||||||
#if defined(HAVE_CUDA) && !defined(ATRIP_SOURCES_IN_GPU)
|
#if defined(HAVE_CUDA)
|
||||||
, mpi_data(nullptr)
|
, mpi_data(nullptr)
|
||||||
#endif
|
#endif
|
||||||
, size(size_)
|
, size(size_)
|
||||||
|
|||||||
@ -405,7 +405,6 @@ template <typename F=double>
|
|||||||
, sliceSize(std::accumulate(sliceLength.begin(),
|
, sliceSize(std::accumulate(sliceLength.begin(),
|
||||||
sliceLength.end(),
|
sliceLength.end(),
|
||||||
1UL, std::multiplies<size_t>()))
|
1UL, std::multiplies<size_t>()))
|
||||||
|
|
||||||
#if defined(ATRIP_SOURCES_IN_GPU)
|
#if defined(ATRIP_SOURCES_IN_GPU)
|
||||||
, sources(rankMap.nSources())
|
, sources(rankMap.nSources())
|
||||||
#else
|
#else
|
||||||
@ -418,23 +417,6 @@ template <typename F=double>
|
|||||||
{ // constructor begin
|
{ // constructor begin
|
||||||
|
|
||||||
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
|
LOG(0,"Atrip") << "INIT SliceUnion: " << name << "\n";
|
||||||
printf("sliceSize %d, number of slices %d\n\n\n", sliceSize, sources.size());
|
|
||||||
|
|
||||||
#if defined(ATRIP_SOURCES_IN_GPU)
|
|
||||||
for (auto& ptr: sources) {
|
|
||||||
const CUresult sourceError =
|
|
||||||
cuMemAlloc(&ptr, sizeof(F) * sliceSize);
|
|
||||||
if (ptr == 0UL) {
|
|
||||||
throw "UNSUFICCIENT MEMORY ON THE GRAPHIC CARD FOR SOURCES";
|
|
||||||
}
|
|
||||||
if (sourceError != CUDA_SUCCESS) {
|
|
||||||
std::stringstream s;
|
|
||||||
s << "Error allocating memory for sources "
|
|
||||||
<< "code " << sourceError << "\n";
|
|
||||||
throw s.str();
|
|
||||||
}
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
for (auto& ptr: sliceBuffers) {
|
for (auto& ptr: sliceBuffers) {
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
@ -463,34 +445,6 @@ template <typename F=double>
|
|||||||
std::inserter(freePointers, freePointers.begin()),
|
std::inserter(freePointers, freePointers.begin()),
|
||||||
[](DataPtr<F> ptr) { return ptr; });
|
[](DataPtr<F> ptr) { return ptr; });
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
LOG(1,"Atrip") << "warming communication up " << slices.size() << "\n";
|
|
||||||
WITH_CHRONO("cuda:warmup",
|
|
||||||
int nRanks=Atrip::np, requestCount=0;
|
|
||||||
int nSends=sliceBuffers.size()*nRanks;
|
|
||||||
MPI_Request *requests = (MPI_Request*) malloc(nSends*2 * sizeof(MPI_Request));
|
|
||||||
MPI_Status *statuses = (MPI_Status*) malloc(nSends*2 * sizeof(MPI_Status));
|
|
||||||
for (int sliceId=0; sliceId<sliceBuffers.size(); sliceId++){
|
|
||||||
for (int rankId=0; rankId<nRanks; rankId++){
|
|
||||||
MPI_Isend((void*)SOURCES_DATA(sources[0]),
|
|
||||||
sliceSize,
|
|
||||||
traits::mpi::datatypeOf<F>(),
|
|
||||||
rankId,
|
|
||||||
100,
|
|
||||||
universe,
|
|
||||||
&requests[requestCount++]);
|
|
||||||
MPI_Irecv((void*)sliceBuffers[sliceId],
|
|
||||||
sliceSize,
|
|
||||||
traits::mpi::datatypeOf<F>(),
|
|
||||||
rankId,
|
|
||||||
100,
|
|
||||||
universe,
|
|
||||||
&requests[requestCount++]);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
MPI_Waitall(nSends*2, requests, statuses);
|
|
||||||
)
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
LOG(1,"Atrip") << "#slices " << slices.size() << "\n";
|
LOG(1,"Atrip") << "#slices " << slices.size() << "\n";
|
||||||
@ -573,11 +527,12 @@ template <typename F=double>
|
|||||||
if (slice.info.state == Slice<F>::Fetch) { // if-1
|
if (slice.info.state == Slice<F>::Fetch) { // if-1
|
||||||
// TODO: do it through the slice class
|
// TODO: do it through the slice class
|
||||||
slice.info.state = Slice<F>::Dispatched;
|
slice.info.state = Slice<F>::Dispatched;
|
||||||
#if defined(HAVE_CUDA) && defined(ATRIP_SOURCES_IN_GPU)
|
#if defined(HAVE_CUDA)
|
||||||
# if !defined(ATRIP_CUDA_AWARE_MPI)
|
# if !defined(ATRIP_CUDA_AWARE_MPI) && defined(ATRIP_SOURCES_IN_GPU)
|
||||||
# error "You need CUDA aware MPI to have slices on the GPU"
|
# error "You need CUDA aware MPI to have slices on the GPU"
|
||||||
# endif
|
# endif
|
||||||
MPI_Irecv((void*)slice.data,
|
slice.mpi_data = (F*)malloc(sizeof(F) * slice.size);
|
||||||
|
MPI_Irecv(slice.mpi_data,
|
||||||
#else
|
#else
|
||||||
MPI_Irecv(slice.data,
|
MPI_Irecv(slice.data,
|
||||||
#endif
|
#endif
|
||||||
|
|||||||
@ -202,7 +202,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
_CHECK_CUDA_SUCCESS("Zijk",
|
_CHECK_CUDA_SUCCESS("Zijk",
|
||||||
cuMemAlloc(&Zijk, sizeof(F) * No * No * No));
|
cuMemAlloc(&Zijk, sizeof(F) * No * No * No));
|
||||||
#else
|
#else
|
||||||
DataPtr<F> Tai = _Tai.data(), epsi = _epsi.data(), epsa = _epsa.data();
|
std::vector<F> &Tai = _Tai, &epsi = _epsi, &epsa = _epsa;
|
||||||
Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
|
Zijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
|
||||||
Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
|
Tijk = (DataFieldType<F>*)malloc(No*No*No * sizeof(DataFieldType<F>));
|
||||||
#endif
|
#endif
|
||||||
@ -258,25 +258,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
// all tensors
|
// all tensors
|
||||||
std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh};
|
std::vector< SliceUnion<F>* > unions = {&taphh, &hhha, &abph, &abhh, &tabhh};
|
||||||
|
|
||||||
#ifdef HAVE_CUDA
|
|
||||||
// TODO: free buffers
|
|
||||||
DataFieldType<F>* _t_buffer;
|
|
||||||
DataFieldType<F>* _vhhh;
|
|
||||||
WITH_CHRONO("double:cuda:alloc",
|
|
||||||
_CHECK_CUDA_SUCCESS("Allocating _t_buffer",
|
|
||||||
cuMemAlloc((CUdeviceptr*)&_t_buffer,
|
|
||||||
No*No*No * sizeof(DataFieldType<F>)));
|
|
||||||
_CHECK_CUDA_SUCCESS("Allocating _vhhh",
|
|
||||||
cuMemAlloc((CUdeviceptr*)&_vhhh,
|
|
||||||
No*No*No * sizeof(DataFieldType<F>)));
|
|
||||||
)
|
|
||||||
//const size_t
|
|
||||||
// bs = Atrip::kernelDimensions.ooo.blocks,
|
|
||||||
//ths = Atrip::kernelDimensions.ooo.threads;
|
|
||||||
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
|
|
||||||
//cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// get tuples for the current rank
|
// get tuples for the current rank
|
||||||
TuplesDistribution *distribution;
|
TuplesDistribution *distribution;
|
||||||
|
|
||||||
@ -658,14 +639,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
tabhh.unwrapSlice(Slice<F>::AC, abc),
|
tabhh.unwrapSlice(Slice<F>::AC, abc),
|
||||||
tabhh.unwrapSlice(Slice<F>::BC, abc),
|
tabhh.unwrapSlice(Slice<F>::BC, abc),
|
||||||
// -- TIJK
|
// -- TIJK
|
||||||
(DataFieldType<F>*)Tijk
|
(DataFieldType<F>*)Tijk);
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
// -- tmp buffers
|
|
||||||
,(DataFieldType<F>*)_t_buffer
|
|
||||||
,(DataFieldType<F>*)_vhhh
|
|
||||||
#endif
|
|
||||||
);
|
|
||||||
|
|
||||||
WITH_RANK << iteration << "-th doubles done\n";
|
WITH_RANK << iteration << "-th doubles done\n";
|
||||||
))
|
))
|
||||||
}
|
}
|
||||||
@ -693,7 +667,7 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
(DataFieldType<F>*)Tai,
|
(DataFieldType<F>*)Tai,
|
||||||
#else
|
#else
|
||||||
singlesContribution<F>(No, Nv, abc[0], abc[1], abc[2],
|
singlesContribution<F>(No, Nv, abc[0], abc[1], abc[2],
|
||||||
Tai,
|
Tai.data(),
|
||||||
#endif
|
#endif
|
||||||
(DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB,
|
(DataFieldType<F>*)abhh.unwrapSlice(Slice<F>::AB,
|
||||||
abc),
|
abc),
|
||||||
@ -709,71 +683,31 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
// COMPUTE ENERGY %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
|
// COMPUTE ENERGY %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {{{1
|
||||||
#if defined(ATRIP_ONLY_DGEMM)
|
#if defined(ATRIP_ONLY_DGEMM)
|
||||||
if (false)
|
if (false)
|
||||||
#endif /* defined(ATRIP_ONLY_DGEMM) */
|
#endif
|
||||||
if (!isFakeTuple(i)) {
|
if (!isFakeTuple(i)) {
|
||||||
#if defined(HAVE_CUDA)
|
double tupleEnergy(0.);
|
||||||
double *tupleEnergy;
|
|
||||||
cuMemAlloc((DataPtr<double>*)&tupleEnergy, sizeof(double));
|
|
||||||
#else
|
|
||||||
double _tupleEnergy(0.);
|
|
||||||
double *tupleEnergy = &_tupleEnergy;
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
int distinct(0);
|
int distinct(0);
|
||||||
if (abc[0] == abc[1]) distinct++;
|
if (abc[0] == abc[1]) distinct++;
|
||||||
if (abc[1] == abc[2]) distinct--;
|
if (abc[1] == abc[2]) distinct--;
|
||||||
const double
|
const F epsabc(_epsa[abc[0]] + _epsa[abc[1]] + _epsa[abc[2]]);
|
||||||
epsabc = std::real(_epsa[abc[0]] + _epsa[abc[1]] + _epsa[abc[2]]);
|
|
||||||
|
|
||||||
DataFieldType<F> _epsabc{epsabc};
|
|
||||||
|
|
||||||
|
// LOG(0, "AtripCUDA") << "doing energy " << i << "distinct " << distinct << "\n";
|
||||||
WITH_CHRONO("energy",
|
WITH_CHRONO("energy",
|
||||||
if ( distinct == 0) {
|
/*
|
||||||
ACC_FUNCALL(getEnergyDistinct<DataFieldType<F>>,
|
TODO: think about how to do this on the GPU in the best way possible
|
||||||
1, 1, // for cuda
|
if ( distinct == 0)
|
||||||
_epsabc,
|
tupleEnergy = getEnergyDistinct<F>(epsabc, No, (F*)epsi, (F*)Tijk, (F*)Zijk);
|
||||||
No,
|
else
|
||||||
#if defined(HAVE_CUDA)
|
tupleEnergy = getEnergySame<F>(epsabc, No, (F*)epsi, (F*)Tijk, (F*)Zijk);
|
||||||
(DataFieldType<F>*)epsi,
|
*/
|
||||||
(DataFieldType<F>*)Tijk,
|
)
|
||||||
(DataFieldType<F>*)Zijk,
|
|
||||||
#else
|
|
||||||
epsi,
|
|
||||||
Tijk,
|
|
||||||
Zijk,
|
|
||||||
#endif
|
|
||||||
tupleEnergy);
|
|
||||||
} else {
|
|
||||||
ACC_FUNCALL(getEnergySame<DataFieldType<F>>,
|
|
||||||
1, 1, // for cuda
|
|
||||||
_epsabc,
|
|
||||||
No,
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
(DataFieldType<F>*)epsi,
|
|
||||||
(DataFieldType<F>*)Tijk,
|
|
||||||
(DataFieldType<F>*)Zijk,
|
|
||||||
#else
|
|
||||||
epsi,
|
|
||||||
Tijk,
|
|
||||||
Zijk,
|
|
||||||
#endif
|
|
||||||
tupleEnergy);
|
|
||||||
})
|
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
double host_tuple_energy;
|
|
||||||
cuMemcpyDtoH((void*)&host_tuple_energy,
|
|
||||||
(DataPtr<double>)tupleEnergy,
|
|
||||||
sizeof(double));
|
|
||||||
#else
|
|
||||||
double host_tuple_energy = *tupleEnergy;
|
|
||||||
#endif /* defined(HAVE_CUDA) */
|
|
||||||
|
|
||||||
#if defined(HAVE_OCD) || defined(ATRIP_PRINT_TUPLES)
|
#if defined(HAVE_OCD) || defined(ATRIP_PRINT_TUPLES)
|
||||||
tupleEnergies[abc] = host_tuple_energy;
|
tupleEnergies[abc] = tupleEnergy;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
energy += host_tuple_energy;
|
energy += tupleEnergy;
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -839,8 +773,6 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
|||||||
Atrip::chrono["iterations"].stop();
|
Atrip::chrono["iterations"].stop();
|
||||||
// ITERATION END %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%{{{1
|
// ITERATION END %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%{{{1
|
||||||
|
|
||||||
if (in.maxIterations != 0 && i >= in.maxIterations) break;
|
|
||||||
|
|
||||||
}
|
}
|
||||||
// END OF MAIN LOOP
|
// END OF MAIN LOOP
|
||||||
|
|
||||||
|
|||||||
@ -16,13 +16,96 @@
|
|||||||
#include<atrip/Equations.hpp>
|
#include<atrip/Equations.hpp>
|
||||||
|
|
||||||
#include<atrip/CUDA.hpp>
|
#include<atrip/CUDA.hpp>
|
||||||
#include<atrip/Operations.hpp>
|
|
||||||
|
|
||||||
namespace atrip {
|
namespace atrip {
|
||||||
// Prolog:2 ends here
|
// Prolog:2 ends here
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
#ifdef HAVE_CUDA
|
||||||
|
namespace cuda {
|
||||||
|
|
||||||
|
// cuda kernels
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__global__
|
||||||
|
void zeroing(F* a, size_t n) {
|
||||||
|
F zero = {0};
|
||||||
|
for (size_t i = 0; i < n; i++) {
|
||||||
|
a[i] = zero;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
////
|
||||||
|
template <typename F>
|
||||||
|
__device__
|
||||||
|
F maybeConjugateScalar(const F a);
|
||||||
|
|
||||||
|
template <>
|
||||||
|
__device__
|
||||||
|
double maybeConjugateScalar(const double a) { return a; }
|
||||||
|
|
||||||
|
template <>
|
||||||
|
__device__
|
||||||
|
cuDoubleComplex
|
||||||
|
maybeConjugateScalar(const cuDoubleComplex a) {
|
||||||
|
return {a.x, -a.y};
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__global__
|
||||||
|
void maybeConjugate(F* to, F* from, size_t n) {
|
||||||
|
for (size_t i = 0; i < n; ++i) {
|
||||||
|
to[i] = maybeConjugateScalar<F>(from[i]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__global__
|
||||||
|
void reorder(F* to, F* from, size_t size, size_t I, size_t J, size_t K) {
|
||||||
|
size_t idx = 0;
|
||||||
|
const size_t IDX = I + J*size + K*size*size;
|
||||||
|
for (size_t k = 0; k < size; k++)
|
||||||
|
for (size_t j = 0; j < size; j++)
|
||||||
|
for (size_t i = 0; i < size; i++, idx++)
|
||||||
|
to[idx] += from[IDX];
|
||||||
|
}
|
||||||
|
|
||||||
|
// I mean, really CUDA... really!?
|
||||||
|
template <typename F>
|
||||||
|
__device__
|
||||||
|
F multiply(const F &a, const F &b);
|
||||||
|
template <>
|
||||||
|
__device__
|
||||||
|
double multiply(const double &a, const double &b) { return a * b; }
|
||||||
|
|
||||||
|
template <>
|
||||||
|
__device__
|
||||||
|
cuDoubleComplex multiply(const cuDoubleComplex &a, const cuDoubleComplex &b) {
|
||||||
|
return
|
||||||
|
{a.x * b.x - a.y * b.y,
|
||||||
|
a.x * b.y + a.y * b.x};
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename F>
|
||||||
|
__device__
|
||||||
|
void sum_in_place(F* to, const F* from);
|
||||||
|
|
||||||
|
template <>
|
||||||
|
__device__
|
||||||
|
void sum_in_place(double* to, const double *from) { *to += *from; }
|
||||||
|
|
||||||
|
template <>
|
||||||
|
__device__
|
||||||
|
void sum_in_place(cuDoubleComplex* to, const cuDoubleComplex* from) {
|
||||||
|
to->x += from->x;
|
||||||
|
to->y += from->y;
|
||||||
|
}
|
||||||
|
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
#define FOR_K() \
|
#define FOR_K() \
|
||||||
for (size_t kmin = blockIdx.x * blockDim.x + threadIdx.x, \
|
for (size_t kmin = blockIdx.x * blockDim.x + threadIdx.x, \
|
||||||
@ -50,7 +133,7 @@ namespace atrip {
|
|||||||
_REORDER_BODY_(__VA_ARGS__) \
|
_REORDER_BODY_(__VA_ARGS__) \
|
||||||
}
|
}
|
||||||
#if defined(HAVE_CUDA)
|
#if defined(HAVE_CUDA)
|
||||||
#define GO(__TO, __FROM) acc::sum_in_place<F>(&__TO, &__FROM);
|
#define GO(__TO, __FROM) cuda::sum_in_place<F>(&__TO, &__FROM);
|
||||||
#else
|
#else
|
||||||
#define GO(__TO, __FROM) __TO += __FROM;
|
#define GO(__TO, __FROM) __TO += __FROM;
|
||||||
#endif
|
#endif
|
||||||
@ -96,205 +179,162 @@ namespace atrip {
|
|||||||
#undef _IJK_
|
#undef _IJK_
|
||||||
#undef GO
|
#undef GO
|
||||||
|
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
# define MIN(a, b) min((a), (b))
|
|
||||||
#else
|
|
||||||
# define MIN(a, b) std::min((a), (b))
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]]
|
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]]
|
||||||
template <typename F>
|
template <typename F>
|
||||||
__MAYBE_GLOBAL__
|
double getEnergyDistinct
|
||||||
void getEnergyDistinct
|
|
||||||
( F const epsabc
|
( F const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, F* const epsi
|
, F* const epsi
|
||||||
, F* const Tijk
|
, F* const Tijk
|
||||||
, F* const Zijk
|
, F* const Zijk
|
||||||
, double* energy
|
|
||||||
) {
|
) {
|
||||||
constexpr size_t blockSize=16;
|
constexpr size_t blockSize=16;
|
||||||
F _energy = {0.};
|
F energy(0.);
|
||||||
for (size_t kk=0; kk<No; kk+=blockSize){
|
for (size_t kk=0; kk<No; kk+=blockSize){
|
||||||
const size_t kend( MIN(No, kk+blockSize) );
|
const size_t kend( std::min(No, kk+blockSize) );
|
||||||
for (size_t jj(kk); jj<No; jj+=blockSize){
|
for (size_t jj(kk); jj<No; jj+=blockSize){
|
||||||
const size_t jend( MIN( No, jj+blockSize) );
|
const size_t jend( std::min( No, jj+blockSize) );
|
||||||
for (size_t ii(jj); ii<No; ii+=blockSize){
|
for (size_t ii(jj); ii<No; ii+=blockSize){
|
||||||
const size_t iend( MIN( No, ii+blockSize) );
|
const size_t iend( std::min( No, ii+blockSize) );
|
||||||
for (size_t k(kk); k < kend; k++){
|
for (size_t k(kk); k < kend; k++){
|
||||||
const F ek(epsi[k]);
|
const F ek(epsi[k]);
|
||||||
const size_t jstart = jj > k ? jj : k;
|
const size_t jstart = jj > k ? jj : k;
|
||||||
for (size_t j(jstart); j < jend; j++){
|
for (size_t j(jstart); j < jend; j++){
|
||||||
F const ej(epsi[j]);
|
F const ej(epsi[j]);
|
||||||
F const facjk = j == k ? F{0.5} : F{1.0};
|
F const facjk = j == k ? F(0.5) : F(1.0);
|
||||||
size_t istart = ii > j ? ii : j;
|
size_t istart = ii > j ? ii : j;
|
||||||
for (size_t i(istart); i < iend; i++){
|
for (size_t i(istart); i < iend; i++){
|
||||||
const F
|
const F
|
||||||
ei(epsi[i])
|
ei(epsi[i])
|
||||||
, facij = i == j ? F{0.5} : F{1.0}
|
, facij = i == j ? F(0.5) : F(1.0)
|
||||||
, eijk(acc::add(acc::add(ei, ej), ek))
|
, denominator(epsabc - ei - ej - ek)
|
||||||
, denominator(acc::sub(epsabc, eijk))
|
|
||||||
, U(Zijk[i + No*j + No*No*k])
|
, U(Zijk[i + No*j + No*No*k])
|
||||||
, V(Zijk[i + No*k + No*No*j])
|
, V(Zijk[i + No*k + No*No*j])
|
||||||
, W(Zijk[j + No*i + No*No*k])
|
, W(Zijk[j + No*i + No*No*k])
|
||||||
, X(Zijk[j + No*k + No*No*i])
|
, X(Zijk[j + No*k + No*No*i])
|
||||||
, Y(Zijk[k + No*i + No*No*j])
|
, Y(Zijk[k + No*i + No*No*j])
|
||||||
, Z(Zijk[k + No*j + No*No*i])
|
, Z(Zijk[k + No*j + No*No*i])
|
||||||
, A(acc::maybeConjugateScalar(Tijk[i + No*j + No*No*k]))
|
, A(maybeConjugate<F>(Tijk[i + No*j + No*No*k]))
|
||||||
, B(acc::maybeConjugateScalar(Tijk[i + No*k + No*No*j]))
|
, B(maybeConjugate<F>(Tijk[i + No*k + No*No*j]))
|
||||||
, C(acc::maybeConjugateScalar(Tijk[j + No*i + No*No*k]))
|
, C(maybeConjugate<F>(Tijk[j + No*i + No*No*k]))
|
||||||
, D(acc::maybeConjugateScalar(Tijk[j + No*k + No*No*i]))
|
, D(maybeConjugate<F>(Tijk[j + No*k + No*No*i]))
|
||||||
, E(acc::maybeConjugateScalar(Tijk[k + No*i + No*No*j]))
|
, E(maybeConjugate<F>(Tijk[k + No*i + No*No*j]))
|
||||||
, _F(acc::maybeConjugateScalar(Tijk[k + No*j + No*No*i]))
|
, _F(maybeConjugate<F>(Tijk[k + No*j + No*No*i]))
|
||||||
, AU = acc::prod(A, U)
|
, value
|
||||||
, BV = acc::prod(B, V)
|
= 3.0 * ( A * U
|
||||||
, CW = acc::prod(C, W)
|
+ B * V
|
||||||
, DX = acc::prod(D, X)
|
+ C * W
|
||||||
, EY = acc::prod(E, Y)
|
+ D * X
|
||||||
, FZ = acc::prod(_F, Z)
|
+ E * Y
|
||||||
, UXY = acc::add(U, acc::add(X, Y))
|
+ _F * Z )
|
||||||
, VWZ = acc::add(V, acc::add(W, Z))
|
+ ( ( U + X + Y )
|
||||||
, ADE = acc::add(A, acc::add(D, E))
|
- 2.0 * ( V + W + Z )
|
||||||
, BCF = acc::add(B, acc::add(C, _F))
|
) * ( A + D + E )
|
||||||
// I just might as well write this in CL
|
+ ( ( V + W + Z )
|
||||||
, _first = acc::add(AU,
|
- 2.0 * ( U + X + Y )
|
||||||
acc::add(BV,
|
) * ( B + C + _F )
|
||||||
acc::add(CW,
|
|
||||||
acc::add(DX,
|
|
||||||
acc::add(EY, FZ)))))
|
|
||||||
, _second = acc::prod(acc::sub(UXY,
|
|
||||||
acc::prod(F{-2.0}, VWZ)),
|
|
||||||
ADE)
|
|
||||||
, _third = acc::prod(acc::sub(VWZ,
|
|
||||||
acc::prod(F{-2.0}, UXY)),
|
|
||||||
BCF)
|
|
||||||
, value = acc::add(acc::prod(F{3.0}, _first),
|
|
||||||
acc::add(_second,
|
|
||||||
_third))
|
|
||||||
, _loop_energy = acc::prod(acc::prod(F{2.0}, value),
|
|
||||||
acc::div(acc::prod(facjk, facij),
|
|
||||||
denominator))
|
|
||||||
;
|
;
|
||||||
acc::sum_in_place(&_energy, &_loop_energy);
|
energy += 2.0 * value / denominator * facjk * facij;
|
||||||
} // i
|
} // i
|
||||||
} // j
|
} // j
|
||||||
} // k
|
} // k
|
||||||
} // ii
|
} // ii
|
||||||
} // jj
|
} // jj
|
||||||
} // kk
|
} // kk
|
||||||
const double real_part = acc::real(_energy);
|
return std::real(energy);
|
||||||
acc::sum_in_place(energy, &real_part);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
template <typename F>
|
template <typename F>
|
||||||
__MAYBE_GLOBAL__
|
double getEnergySame
|
||||||
void getEnergySame
|
|
||||||
( F const epsabc
|
( F const epsabc
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, F* const epsi
|
, F* const epsi
|
||||||
, F* const Tijk
|
, F* const Tijk
|
||||||
, F* const Zijk
|
, F* const Zijk
|
||||||
, double* energy
|
|
||||||
) {
|
) {
|
||||||
constexpr size_t blockSize = 16;
|
constexpr size_t blockSize = 16;
|
||||||
F _energy = F{0.};
|
F energy = F(0.);
|
||||||
for (size_t kk=0; kk<No; kk+=blockSize){
|
for (size_t kk=0; kk<No; kk+=blockSize){
|
||||||
const size_t kend( MIN( kk+blockSize, No) );
|
const size_t kend( std::min( kk+blockSize, No) );
|
||||||
for (size_t jj(kk); jj<No; jj+=blockSize){
|
for (size_t jj(kk); jj<No; jj+=blockSize){
|
||||||
const size_t jend( MIN( jj+blockSize, No) );
|
const size_t jend( std::min( jj+blockSize, No) );
|
||||||
for (size_t ii(jj); ii<No; ii+=blockSize){
|
for (size_t ii(jj); ii<No; ii+=blockSize){
|
||||||
const size_t iend( MIN( ii+blockSize, No) );
|
const size_t iend( std::min( ii+blockSize, No) );
|
||||||
for (size_t k(kk); k < kend; k++){
|
for (size_t k(kk); k < kend; k++){
|
||||||
const F ek(epsi[k]);
|
const F ek(epsi[k]);
|
||||||
const size_t jstart = jj > k ? jj : k;
|
const size_t jstart = jj > k ? jj : k;
|
||||||
for(size_t j(jstart); j < jend; j++){
|
for(size_t j(jstart); j < jend; j++){
|
||||||
const F facjk( j == k ? F{0.5} : F{1.0});
|
const F facjk( j == k ? F(0.5) : F(1.0));
|
||||||
const F ej(epsi[j]);
|
const F ej(epsi[j]);
|
||||||
const size_t istart = ii > j ? ii : j;
|
const size_t istart = ii > j ? ii : j;
|
||||||
for(size_t i(istart); i < iend; i++){
|
for(size_t i(istart); i < iend; i++){
|
||||||
const F
|
const F
|
||||||
ei(epsi[i])
|
ei(epsi[i])
|
||||||
, facij ( i==j ? F{0.5} : F{1.0})
|
, facij ( i==j ? F(0.5) : F(1.0))
|
||||||
, eijk(acc::add(acc::add(ei, ej), ek))
|
, denominator(epsabc - ei - ej - ek)
|
||||||
, denominator(acc::sub(epsabc, eijk))
|
|
||||||
, U(Zijk[i + No*j + No*No*k])
|
, U(Zijk[i + No*j + No*No*k])
|
||||||
, V(Zijk[j + No*k + No*No*i])
|
, V(Zijk[j + No*k + No*No*i])
|
||||||
, W(Zijk[k + No*i + No*No*j])
|
, W(Zijk[k + No*i + No*No*j])
|
||||||
, A(acc::maybeConjugateScalar(Tijk[i + No*j + No*No*k]))
|
, A(maybeConjugate<F>(Tijk[i + No*j + No*No*k]))
|
||||||
, B(acc::maybeConjugateScalar(Tijk[j + No*k + No*No*i]))
|
, B(maybeConjugate<F>(Tijk[j + No*k + No*No*i]))
|
||||||
, C(acc::maybeConjugateScalar(Tijk[k + No*i + No*No*j]))
|
, C(maybeConjugate<F>(Tijk[k + No*i + No*No*j]))
|
||||||
, ABC = acc::add(A, acc::add(B, C))
|
, value
|
||||||
, UVW = acc::add(U, acc::add(V, W))
|
= F(3.0) * ( A * U
|
||||||
, AU = acc::prod(A, U)
|
+ B * V
|
||||||
, BV = acc::prod(B, V)
|
+ C * W
|
||||||
, CW = acc::prod(C, W)
|
)
|
||||||
, AU_and_BV_and_CW = acc::add(acc::add(AU, BV), CW)
|
- ( A + B + C ) * ( U + V + W )
|
||||||
, value = acc::sub(acc::prod(F{3.0}, AU_and_BV_and_CW),
|
|
||||||
acc::prod(ABC, UVW))
|
|
||||||
, _loop_energy = acc::prod(acc::prod(F{2.0}, value),
|
|
||||||
acc::div(acc::prod(facjk, facij),
|
|
||||||
denominator))
|
|
||||||
;
|
;
|
||||||
|
energy += F(2.0) * value / denominator * facjk * facij;
|
||||||
acc::sum_in_place(&_energy, &_loop_energy);
|
|
||||||
} // i
|
} // i
|
||||||
} // j
|
} // j
|
||||||
} // k
|
} // k
|
||||||
} // ii
|
} // ii
|
||||||
} // jj
|
} // jj
|
||||||
} // kk
|
} // kk
|
||||||
const double real_part = acc::real(_energy);
|
return std::real(energy);
|
||||||
acc::sum_in_place(energy, &real_part);
|
|
||||||
}
|
}
|
||||||
// Energy:2 ends here
|
// Energy:2 ends here
|
||||||
|
|
||||||
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:3]]
|
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:3]]
|
||||||
// instantiate double
|
// instantiate double
|
||||||
template
|
template
|
||||||
__MAYBE_GLOBAL__
|
double getEnergyDistinct
|
||||||
void getEnergyDistinct
|
( double const epsabc
|
||||||
( DataFieldType<double> const epsabc
|
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, DataFieldType<double>* const epsi
|
, double* const epsi
|
||||||
, DataFieldType<double>* const Tijk
|
, double* const Tijk
|
||||||
, DataFieldType<double>* const Zijk
|
, double* const Zijk
|
||||||
, DataFieldType<double>* energy
|
|
||||||
);
|
);
|
||||||
|
|
||||||
template
|
template
|
||||||
__MAYBE_GLOBAL__
|
double getEnergySame
|
||||||
void getEnergySame
|
( double const epsabc
|
||||||
( DataFieldType<double> const epsabc
|
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, DataFieldType<double>* const epsi
|
, double* const epsi
|
||||||
, DataFieldType<double>* const Tijk
|
, double* const Tijk
|
||||||
, DataFieldType<double>* const Zijk
|
, double* const Zijk
|
||||||
, DataFieldType<double>* energy
|
|
||||||
);
|
);
|
||||||
|
|
||||||
// instantiate Complex
|
// instantiate Complex
|
||||||
template
|
template
|
||||||
__MAYBE_GLOBAL__
|
double getEnergyDistinct
|
||||||
void getEnergyDistinct
|
( Complex const epsabc
|
||||||
( DataFieldType<Complex> const epsabc
|
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, DataFieldType<Complex>* const epsi
|
, Complex* const epsi
|
||||||
, DataFieldType<Complex>* const Tijk
|
, Complex* const Tijk
|
||||||
, DataFieldType<Complex>* const Zijk
|
, Complex* const Zijk
|
||||||
, DataFieldType<double>* energy
|
|
||||||
);
|
);
|
||||||
|
|
||||||
template
|
template
|
||||||
__MAYBE_GLOBAL__
|
double getEnergySame
|
||||||
void getEnergySame
|
( Complex const epsabc
|
||||||
( DataFieldType<Complex> const epsabc
|
|
||||||
, size_t const No
|
, size_t const No
|
||||||
, DataFieldType<Complex>* const epsi
|
, Complex* const epsi
|
||||||
, DataFieldType<Complex>* const Tijk
|
, Complex* const Tijk
|
||||||
, DataFieldType<Complex>* const Zijk
|
, Complex* const Zijk
|
||||||
, DataFieldType<double>* energy
|
|
||||||
);
|
);
|
||||||
// Energy:3 ends here
|
// Energy:3 ends here
|
||||||
|
|
||||||
@ -320,26 +360,18 @@ void getEnergySame
|
|||||||
const size_t ijk = i + j*No + k*NoNo;
|
const size_t ijk = i + j*No + k*NoNo;
|
||||||
|
|
||||||
#ifdef HAVE_CUDA
|
#ifdef HAVE_CUDA
|
||||||
|
|
||||||
# define GO(__TPH, __VABIJ) \
|
# define GO(__TPH, __VABIJ) \
|
||||||
do { \
|
{ \
|
||||||
const DataFieldType<F> \
|
const DataFieldType<F> product \
|
||||||
product = acc::prod<DataFieldType<F>>((__TPH), \
|
= cuda::multiply<DataFieldType<F>>((__TPH), (__VABIJ)); \
|
||||||
(__VABIJ)); \
|
cuda::sum_in_place<DataFieldType<F>>(&Zijk[ijk], &product); \
|
||||||
acc::sum_in_place<DataFieldType<F>>(&Zijk[ijk], \
|
}
|
||||||
&product); \
|
|
||||||
} while (0)
|
|
||||||
|
|
||||||
#else
|
#else
|
||||||
|
# define GO(__TPH, __VABIJ) Zijk[ijk] += (__TPH) * (__VABIJ);
|
||||||
#define GO(__TPH, __VABIJ) Zijk[ijk] += (__TPH) * (__VABIJ)
|
|
||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
GO(Tph[ a + i * Nv ], VBCij[ j + k * No ])
|
||||||
GO(Tph[ a + i * Nv ], VBCij[ j + k * No ]);
|
GO(Tph[ b + j * Nv ], VACij[ i + k * No ])
|
||||||
GO(Tph[ b + j * Nv ], VACij[ i + k * No ]);
|
GO(Tph[ c + k * Nv ], VABij[ i + j * No ])
|
||||||
GO(Tph[ c + k * Nv ], VABij[ i + j * No ]);
|
|
||||||
|
|
||||||
#undef GO
|
#undef GO
|
||||||
} // for loop j
|
} // for loop j
|
||||||
}
|
}
|
||||||
@ -401,15 +433,9 @@ void getEnergySame
|
|||||||
// -- TIJK
|
// -- TIJK
|
||||||
// , DataPtr<F> Tijk_
|
// , DataPtr<F> Tijk_
|
||||||
, DataFieldType<F>* Tijk_
|
, DataFieldType<F>* Tijk_
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
// -- tmp buffers
|
|
||||||
, DataFieldType<F>* _t_buffer
|
|
||||||
, DataFieldType<F>* _vhhh
|
|
||||||
#endif
|
|
||||||
) {
|
) {
|
||||||
const size_t a = abc[0], b = abc[1], c = abc[2]
|
|
||||||
, NoNo = No*No
|
const size_t NoNo = No*No;
|
||||||
;
|
|
||||||
|
|
||||||
DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_;
|
DataFieldType<F>* Tijk = (DataFieldType<F>*)Tijk_;
|
||||||
|
|
||||||
@ -454,7 +480,7 @@ void getEnergySame
|
|||||||
)
|
)
|
||||||
#define MAYBE_CONJ(_conj, _buffer) \
|
#define MAYBE_CONJ(_conj, _buffer) \
|
||||||
do { \
|
do { \
|
||||||
acc::maybeConjugate<<< \
|
cuda::maybeConjugate<<< \
|
||||||
\
|
\
|
||||||
Atrip::kernelDimensions.ooo.blocks, \
|
Atrip::kernelDimensions.ooo.blocks, \
|
||||||
\
|
\
|
||||||
@ -523,23 +549,23 @@ void getEnergySame
|
|||||||
F one{1.0}, m_one{-1.0}, zero{0.0};
|
F one{1.0}, m_one{-1.0}, zero{0.0};
|
||||||
const size_t NoNoNo = No*NoNo;
|
const size_t NoNoNo = No*NoNo;
|
||||||
#ifdef HAVE_CUDA
|
#ifdef HAVE_CUDA
|
||||||
// DataFieldType<F>* _t_buffer;
|
DataFieldType<F>* _t_buffer;
|
||||||
// DataFieldType<F>* _vhhh;
|
DataFieldType<F>* _vhhh;
|
||||||
// WITH_CHRONO("double:cuda:alloc",
|
WITH_CHRONO("double:cuda:alloc",
|
||||||
// _CHECK_CUDA_SUCCESS("Allocating _t_buffer",
|
_CHECK_CUDA_SUCCESS("Allocating _t_buffer",
|
||||||
// cuMemAlloc((CUdeviceptr*)&_t_buffer,
|
cuMemAlloc((CUdeviceptr*)&_t_buffer,
|
||||||
// NoNoNo * sizeof(DataFieldType<F>)));
|
NoNoNo * sizeof(DataFieldType<F>)));
|
||||||
// _CHECK_CUDA_SUCCESS("Allocating _vhhh",
|
_CHECK_CUDA_SUCCESS("Allocating _vhhh",
|
||||||
// cuMemAlloc((CUdeviceptr*)&_vhhh,
|
cuMemAlloc((CUdeviceptr*)&_vhhh,
|
||||||
// NoNoNo * sizeof(DataFieldType<F>)));
|
NoNoNo * sizeof(DataFieldType<F>)));
|
||||||
// )
|
)
|
||||||
#if !defined(ATRIP_ONLY_DGEMM)
|
|
||||||
// we still have to zero this
|
|
||||||
const size_t
|
const size_t
|
||||||
bs = Atrip::kernelDimensions.ooo.blocks,
|
bs = Atrip::kernelDimensions.ooo.blocks,
|
||||||
ths = Atrip::kernelDimensions.ooo.threads;
|
ths = Atrip::kernelDimensions.ooo.threads;
|
||||||
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
|
|
||||||
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
|
#if !defined(ATRIP_ONLY_DGEMM)
|
||||||
|
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_t_buffer, NoNoNo);
|
||||||
|
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)_vhhh, NoNoNo);
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#else
|
#else
|
||||||
@ -555,17 +581,15 @@ void getEnergySame
|
|||||||
// Set Tijk to zero
|
// Set Tijk to zero
|
||||||
#if defined(HAVE_CUDA) && !defined(ATRIP_ONLY_DGEMM)
|
#if defined(HAVE_CUDA) && !defined(ATRIP_ONLY_DGEMM)
|
||||||
WITH_CHRONO("double:reorder",
|
WITH_CHRONO("double:reorder",
|
||||||
acc::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk,
|
cuda::zeroing<<<bs, ths>>>((DataFieldType<F>*)Tijk,
|
||||||
NoNoNo);
|
NoNoNo);
|
||||||
)
|
)
|
||||||
#endif
|
#else
|
||||||
|
|
||||||
#if !defined(HAVE_CUDA)
|
|
||||||
WITH_CHRONO("double:reorder",
|
WITH_CHRONO("double:reorder",
|
||||||
for (size_t k = 0; k < NoNoNo; k++) {
|
for (size_t k = 0; k < NoNoNo; k++) {
|
||||||
Tijk[k] = DataFieldType<F>{0.0};
|
Tijk[k] = DataFieldType<F>{0.0};
|
||||||
})
|
})
|
||||||
#endif /* !defined(HAVE_CUDA) */
|
#endif
|
||||||
|
|
||||||
|
|
||||||
#if defined(ATRIP_ONLY_DGEMM)
|
#if defined(ATRIP_ONLY_DGEMM)
|
||||||
@ -573,7 +597,7 @@ void getEnergySame
|
|||||||
#undef REORDER
|
#undef REORDER
|
||||||
#define MAYBE_CONJ(a, b) do {} while(0)
|
#define MAYBE_CONJ(a, b) do {} while(0)
|
||||||
#define REORDER(i, j, k) do {} while(0)
|
#define REORDER(i, j, k) do {} while(0)
|
||||||
#endif /* defined(ATRIP_ONLY_DGEMM) */
|
#endif
|
||||||
|
|
||||||
// HOLES
|
// HOLES
|
||||||
WITH_CHRONO("doubles:holes",
|
WITH_CHRONO("doubles:holes",
|
||||||
@ -657,16 +681,16 @@ void getEnergySame
|
|||||||
#ifdef HAVE_CUDA
|
#ifdef HAVE_CUDA
|
||||||
// we need to synchronize here since we need
|
// we need to synchronize here since we need
|
||||||
// the Tijk for next process in the pipeline
|
// the Tijk for next process in the pipeline
|
||||||
//_CHECK_CUDA_SUCCESS("Synchronizing",
|
_CHECK_CUDA_SUCCESS("Synchronizing",
|
||||||
// cuCtxSynchronize());
|
cuCtxSynchronize());
|
||||||
//_CHECK_CUDA_SUCCESS("Freeing _vhhh",
|
_CHECK_CUDA_SUCCESS("Freeing _vhhh",
|
||||||
// cuMemFree((CUdeviceptr)_vhhh));
|
cuMemFree((CUdeviceptr)_vhhh));
|
||||||
//_CHECK_CUDA_SUCCESS("Freeing _t_buffer",
|
_CHECK_CUDA_SUCCESS("Freeing _t_buffer",
|
||||||
// cuMemFree((CUdeviceptr)_t_buffer));
|
cuMemFree((CUdeviceptr)_t_buffer));
|
||||||
#else
|
#else
|
||||||
free(_vhhh);
|
free(_vhhh);
|
||||||
free(_t_buffer);
|
free(_t_buffer);
|
||||||
#endif /* defined(HAVE_CUDA) */
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
#undef REORDER
|
#undef REORDER
|
||||||
@ -717,7 +741,7 @@ void getEnergySame
|
|||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
#endif /* defined(ATRIP_USE_DGEMM) */
|
#endif
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@ -749,12 +773,6 @@ void getEnergySame
|
|||||||
, DataPtr<double> const TBChh
|
, DataPtr<double> const TBChh
|
||||||
// -- TIJK
|
// -- TIJK
|
||||||
, DataFieldType<double>* Tijk
|
, DataFieldType<double>* Tijk
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
// -- tmp buffers
|
|
||||||
, DataFieldType<double>* _t_buffer
|
|
||||||
, DataFieldType<double>* _vhhh
|
|
||||||
#endif
|
|
||||||
|
|
||||||
);
|
);
|
||||||
|
|
||||||
template
|
template
|
||||||
@ -783,12 +801,6 @@ void getEnergySame
|
|||||||
, DataPtr<Complex> const TBChh
|
, DataPtr<Complex> const TBChh
|
||||||
// -- TIJK
|
// -- TIJK
|
||||||
, DataFieldType<Complex>* Tijk
|
, DataFieldType<Complex>* Tijk
|
||||||
#if defined(HAVE_CUDA)
|
|
||||||
// -- tmp buffers
|
|
||||||
, DataFieldType<Complex>* _t_buffer
|
|
||||||
, DataFieldType<Complex>* _vhhh
|
|
||||||
#endif
|
|
||||||
|
|
||||||
);
|
);
|
||||||
// Doubles contribution:2 ends here
|
// Doubles contribution:2 ends here
|
||||||
|
|
||||||
|
|||||||
@ -98,27 +98,10 @@ EOF
|
|||||||
create_config $tmp only-dgemm
|
create_config $tmp only-dgemm
|
||||||
rm $tmp
|
rm $tmp
|
||||||
|
|
||||||
|
#
|
||||||
# begin doc
|
# begin doc
|
||||||
#
|
#
|
||||||
# - cuda-only-dgemm ::
|
# - slices-on-gpu-only-dgemm ::
|
||||||
# This is the naive CUDA implementation compiling only the dgemm parts
|
|
||||||
# of the compute.
|
|
||||||
#
|
|
||||||
# end doc
|
|
||||||
|
|
||||||
tmp=`mktemp`
|
|
||||||
cat <<EOF > $tmp
|
|
||||||
--enable-cuda
|
|
||||||
--enable-only-dgemm
|
|
||||||
--disable-slice
|
|
||||||
EOF
|
|
||||||
|
|
||||||
create_config $tmp cuda-only-dgemm
|
|
||||||
rm $tmp
|
|
||||||
|
|
||||||
# begin doc
|
|
||||||
#
|
|
||||||
# - cuda-slices-on-gpu-only-dgemm ::
|
|
||||||
# This configuration tests that slices reside completely on the gpu
|
# This configuration tests that slices reside completely on the gpu
|
||||||
# and it should use a CUDA aware MPI implementation.
|
# and it should use a CUDA aware MPI implementation.
|
||||||
# It also only uses the routines that involve dgemm.
|
# It also only uses the routines that involve dgemm.
|
||||||
@ -134,7 +117,7 @@ cat <<EOF > $tmp
|
|||||||
--disable-slice
|
--disable-slice
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
create_config $tmp cuda-slices-on-gpu-only-dgemm
|
create_config $tmp sources-in-gpu
|
||||||
rm $tmp
|
rm $tmp
|
||||||
|
|
||||||
############################################################
|
############################################################
|
||||||
|
|||||||
Loading…
Reference in New Issue
Block a user