Compare commits
10 Commits
ad542fe856
...
openacc
| Author | SHA1 | Date | |
|---|---|---|---|
| 017cf43381 | |||
| 77e1aaabeb | |||
| 249f1c0b51 | |||
| 1d96800d45 | |||
| 9087e3af19 | |||
| 418fd9d389 | |||
| 895cd02778 | |||
| 8efa3d911e | |||
| 0fa24404e5 | |||
| 8f7d05efda |
4
.github/workflows/main.yml
vendored
4
.github/workflows/main.yml
vendored
@@ -2,6 +2,8 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ master, cuda ]
|
||||
pull_request:
|
||||
branches: [ master, cuda ]
|
||||
|
||||
@@ -16,6 +18,8 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
compiler:
|
||||
- gcc12
|
||||
- gcc11
|
||||
- gcc11
|
||||
- gcc10
|
||||
- gcc9
|
||||
|
||||
107
README.org
107
README.org
@@ -26,3 +26,110 @@ before the proper paper is released please contact me.
|
||||
|
||||
In the mean time the code has been used in
|
||||
[[https://aip.scitation.org/doi/10.1063/5.0074936][this publication]] and can therefore been cited.
|
||||
|
||||
* Building
|
||||
|
||||
Atrip uses autotools to build the system.
|
||||
Autotools works by first creating a =configure= script from
|
||||
a =configure.ac= file.
|
||||
|
||||
Atrip should be built out of source, this means that
|
||||
you have to create a build directory other that the root
|
||||
directory, for instance in the =build/tutorial= directory
|
||||
|
||||
#+begin_src sh :exports code
|
||||
mkdir -p build/tutorial/
|
||||
cd build/tutorial
|
||||
#+end_src
|
||||
|
||||
First you have to build the =configure= script by doing
|
||||
|
||||
#+begin_src sh :dir build/tutorial :exports code :results raw drawer
|
||||
../../bootstrap.sh
|
||||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
:results:
|
||||
|
||||
Creating configure script
|
||||
|
||||
|
||||
Now you can build by doing
|
||||
|
||||
mkdir build
|
||||
cd build
|
||||
../configure
|
||||
make extern
|
||||
make all
|
||||
|
||||
:end:
|
||||
|
||||
And then you can see the =configure= options
|
||||
#+begin_src sh :dir build/tutorial :results raw drawer :eval no
|
||||
../../configure --help
|
||||
#+end_src
|
||||
|
||||
** Benches
|
||||
|
||||
The script =tools/configure-benches.sh= can be used to create
|
||||
a couple of configurations for benches:
|
||||
|
||||
#+begin_src sh :exports results :results verbatim org :results verbatim drawer replace output
|
||||
awk '/begin +doc/,/end +doc/ { print $NL }' tools/configure-benches.sh |
|
||||
grep -v -e "begin \+doc" -e "end \+doc" |
|
||||
sed "s/^# //; s/^# *$//; /^$/d"
|
||||
#+end_src
|
||||
|
||||
#+RESULTS:
|
||||
:results:
|
||||
- default ::
|
||||
This configuration uses a CPU code with dgemm
|
||||
and without computing slices.
|
||||
- only-dgemm ::
|
||||
This only runs the computation part that involves dgemms.
|
||||
- cuda-only-dgemm ::
|
||||
This is the naive CUDA implementation compiling only the dgemm parts
|
||||
of the compute.
|
||||
- cuda-slices-on-gpu-only-dgemm ::
|
||||
This configuration tests that slices reside completely on the gpu
|
||||
and it should use a CUDA aware MPI implementation.
|
||||
It also only uses the routines that involve dgemm.
|
||||
:end:
|
||||
|
||||
In order to generate the benches just create a suitable directory for it
|
||||
|
||||
#+begin_src sh :eval no
|
||||
mkdir -p build/benches
|
||||
cd buid/benches
|
||||
../../tools/configure-benches.sh CXX=g++ ...
|
||||
#+end_src
|
||||
|
||||
and you will get a Makefile together with several project folders.
|
||||
You can either configure all projects with =make all= or
|
||||
then go in each folder.
|
||||
|
||||
Notice that you can give a path for ctf for all of them by doing
|
||||
#+begin_src sh :eval no
|
||||
../../tools/configure-benches.sh --with-ctf=/absolute/path/to/ctf
|
||||
#+end_src
|
||||
|
||||
* Running benches
|
||||
|
||||
** Main benchmark
|
||||
|
||||
The main benchmark gets built in =bench/atrip= and is used to run an
|
||||
atrip run with random tensors.
|
||||
|
||||
A common run of this script will be the following
|
||||
|
||||
#+begin_src sh
|
||||
bench/atrip \
|
||||
--no 100 \
|
||||
--nv 1000 \
|
||||
--mod 1 \
|
||||
--% 0 \
|
||||
--dist group \
|
||||
--nocheckpoint \
|
||||
--max-iterations 1000
|
||||
#+end_src
|
||||
|
||||
|
||||
@@ -23,6 +23,18 @@ atrip_SOURCES = main.cxx
|
||||
atrip_CPPFLAGS = $(AM_CPPFLAGS)
|
||||
atrip_LDADD = $(BENCHES_LDADD)
|
||||
|
||||
atrip: main.cxx
|
||||
$(NVCXX) -cuda \
|
||||
-x cu -I../ \
|
||||
$(MPILIBS) \
|
||||
-I$(srcdir)/ \
|
||||
$(AM_CPPFLAGS) \
|
||||
$(DEFS) \
|
||||
$(BENCHES_LDADD) \
|
||||
$(AM_LDFLAGS) \
|
||||
$< -o $@
|
||||
|
||||
endif
|
||||
|
||||
if !WITH_CUDA
|
||||
##
|
||||
|
||||
@@ -5,18 +5,20 @@
|
||||
#include <CLI11.hpp>
|
||||
|
||||
#define _print_size(what, size) \
|
||||
do { \
|
||||
if (rank == 0) { \
|
||||
std::cout << #what \
|
||||
<< " => " \
|
||||
<< (double)size * elem_to_gb \
|
||||
<< "GB" \
|
||||
<< std::endl; \
|
||||
}
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
MPI_Init(&argc, &argv);
|
||||
|
||||
size_t checkpoint_it;
|
||||
size_t checkpoint_it, max_iterations;
|
||||
int no(10), nv(100), itMod(-1), percentageMod(10);
|
||||
float checkpoint_percentage;
|
||||
bool
|
||||
@@ -30,6 +32,9 @@ int main(int argc, char** argv) {
|
||||
app.add_option("--no", no, "Occupied orbitals");
|
||||
app.add_option("--nv", nv, "Virtual orbitals");
|
||||
app.add_option("--mod", itMod, "Iteration modifier");
|
||||
app.add_option("--max-iterations",
|
||||
max_iterations,
|
||||
"Maximum number of iterations to run");
|
||||
app.add_flag("--keep-vppph", keepVppph, "Do not delete Vppph");
|
||||
app.add_flag("--nochrono", nochrono, "Do not print chrono");
|
||||
app.add_flag("--rank-round-robin", rankRoundRobin, "Do rank round robin");
|
||||
@@ -45,6 +50,19 @@ int main(int argc, char** argv) {
|
||||
checkpoint_percentage,
|
||||
"Percentage for checkpoints");
|
||||
|
||||
// Optional tensor files
|
||||
std::string
|
||||
ei_path, ea_path,
|
||||
Tph_path, Tpphh_path,
|
||||
Vpphh_path, Vhhhp_path, Vppph_path;
|
||||
app.add_option("--ei", ei_path, "Path for ei");
|
||||
app.add_option("--ea", ea_path, "Path for ea");
|
||||
app.add_option("--Tpphh", Tpphh_path, "Path for Tpphh");
|
||||
app.add_option("--Tph", Tph_path, "Path for Tph");
|
||||
app.add_option("--Vpphh", Vpphh_path, "Path for Vpphh");
|
||||
app.add_option("--Vhhhp", Vhhhp_path, "Path for Vhhhp");
|
||||
app.add_option("--Vppph", Vppph_path, "Path for Vppph");
|
||||
|
||||
#if defined(HAVE_CUDA)
|
||||
size_t ooo_threads = 0, ooo_blocks = 0;
|
||||
app.add_option("--ooo-blocks",
|
||||
@@ -148,37 +166,64 @@ int main(int argc, char** argv) {
|
||||
}
|
||||
|
||||
|
||||
std::vector<int> symmetries(4, NS)
|
||||
, vo({nv, no})
|
||||
, vvoo({nv, nv, no, no})
|
||||
, ooov({no, no, no, nv})
|
||||
, vvvo({nv, nv, nv, no})
|
||||
;
|
||||
std::vector<int>
|
||||
symmetries(4, NS),
|
||||
vo({nv, no}),
|
||||
vvoo({nv, nv, no, no}),
|
||||
ooov({no, no, no, nv}),
|
||||
vvvo({nv, nv, nv, no});
|
||||
|
||||
CTF::Tensor<double>
|
||||
ei(1, ooov.data(), symmetries.data(), world)
|
||||
, ea(1, vo.data(), symmetries.data(), world)
|
||||
, Tph(2, vo.data(), symmetries.data(), world)
|
||||
, Tpphh(4, vvoo.data(), symmetries.data(), world)
|
||||
, Vpphh(4, vvoo.data(), symmetries.data(), world)
|
||||
, Vhhhp(4, ooov.data(), symmetries.data(), world)
|
||||
;
|
||||
ei(1, ooov.data(), symmetries.data(), world),
|
||||
ea(1, vo.data(), symmetries.data(), world),
|
||||
Tph(2, vo.data(), symmetries.data(), world),
|
||||
Tpphh(4, vvoo.data(), symmetries.data(), world),
|
||||
Vpphh(4, vvoo.data(), symmetries.data(), world),
|
||||
Vhhhp(4, ooov.data(), symmetries.data(), world);
|
||||
|
||||
// initialize deletable tensors in heap
|
||||
auto Vppph
|
||||
= new CTF::Tensor<double>(4, vvvo.data(), symmetries.data(), world);
|
||||
|
||||
_print_size(Vabci, no*nv*nv*nv)
|
||||
_print_size(Vabij, no*no*nv*nv)
|
||||
_print_size(Vijka, no*no*no*nv)
|
||||
_print_size(Vabci, no*nv*nv*nv);
|
||||
_print_size(Vabij, no*no*nv*nv);
|
||||
_print_size(Vijka, no*no*no*nv);
|
||||
|
||||
if (ei_path.size()) {
|
||||
ei.read_dense_from_file(ei_path.c_str());
|
||||
} else {
|
||||
ei.fill_random(-40.0, -2);
|
||||
}
|
||||
if (ea_path.size()) {
|
||||
ea.read_dense_from_file(ea_path.c_str());
|
||||
} else {
|
||||
ea.fill_random(2, 50);
|
||||
}
|
||||
if (Tpphh_path.size()) {
|
||||
Tpphh.read_dense_from_file(Tpphh_path.c_str());
|
||||
} else {
|
||||
Tpphh.fill_random(0, 1);
|
||||
}
|
||||
if (Tph_path.size()) {
|
||||
Tph.read_dense_from_file(Tph_path.c_str());
|
||||
} else {
|
||||
Tph.fill_random(0, 1);
|
||||
}
|
||||
if (Vpphh_path.size()) {
|
||||
Vpphh.read_dense_from_file(Vpphh_path.c_str());
|
||||
} else {
|
||||
Vpphh.fill_random(0, 1);
|
||||
}
|
||||
if (Vhhhp_path.size()) {
|
||||
Vhhhp.read_dense_from_file(Vhhhp_path.c_str());
|
||||
} else {
|
||||
Vhhhp.fill_random(0, 1);
|
||||
}
|
||||
if (Vppph_path.size()) {
|
||||
Vppph->read_dense_from_file(Vppph_path.c_str());
|
||||
} else {
|
||||
Vppph->fill_random(0, 1);
|
||||
}
|
||||
|
||||
atrip::Atrip::init(MPI_COMM_WORLD);
|
||||
const auto in
|
||||
@@ -199,6 +244,7 @@ int main(int argc, char** argv) {
|
||||
.with_iterationMod(itMod)
|
||||
.with_percentageMod(percentageMod)
|
||||
.with_tuplesDistribution(tuplesDistribution)
|
||||
.with_maxIterations(max_iterations)
|
||||
// checkpoint options
|
||||
.with_checkpointAtEveryIteration(checkpoint_it)
|
||||
.with_checkpointAtPercentage(checkpoint_percentage)
|
||||
|
||||
@@ -48,7 +48,8 @@ AM_CONDITIONAL([WITH_CLANG_CHECK], [test x${clang_check} = xYES])
|
||||
AC_ARG_ENABLE([cuda],
|
||||
[AS_HELP_STRING([--enable-cuda],
|
||||
[Build with cuda])],
|
||||
[WITH_CUDA=yes],
|
||||
[WITH_CUDA=yes
|
||||
WITH_OPENACC=yes],
|
||||
[WITH_CUDA=no])
|
||||
AC_ARG_VAR([NVCC], [Path to the nvidia cuda compiler.])
|
||||
AC_ARG_VAR([CUDA_LDFLAGS], [LDFLAGS to find libraries -lcuda, -lcudart, -lcublas.])
|
||||
@@ -164,8 +165,7 @@ AC_TYPE_SIZE_T
|
||||
dnl -----------------------------------------------------------------------
|
||||
dnl CHECK CTF
|
||||
if test xYES = x${BUILD_CTF}; then
|
||||
AC_MSG_WARN([Sorry, building CTF not supported yet provide a build path
|
||||
with --with-ctf=path/to/ctf/installation])
|
||||
AC_MSG_WARN([You will have to do make ctf before building the project.])
|
||||
else
|
||||
CPPFLAGS="$CPPFLAGS -I${LIBCTF_CPATH}"
|
||||
LDFLAGS="$LDFLAGS -L${LIBCTF_LD_LIBRARY_PATH} -lctf"
|
||||
@@ -183,6 +183,8 @@ if test x${WITH_CUDA} = xyes; then
|
||||
-----------------------
|
||||
])
|
||||
AC_CHECK_PROGS([NVCC], [nvcc])
|
||||
AC_CHECK_PROGS([NVCXX], [nvc++])
|
||||
MPILIBS=$($MPICXX -show | awk '!($1="")')
|
||||
AC_SUBST([CUDA_LDFLAGS])
|
||||
AC_DEFINE([HAVE_CUDA],1,[Wether we are using CUDA])
|
||||
# TODO: make sure to find cuda and cudart
|
||||
@@ -228,6 +230,7 @@ AC_MSG_RESULT([
|
||||
ATRIP_LDFLAGS = $ATRIP_LDFLAGS
|
||||
BLAS = ${BLAS_LIBS}
|
||||
LIBS = ${LIBS}
|
||||
MPILIBS = $MPILIBS
|
||||
])
|
||||
|
||||
AC_OUTPUT
|
||||
|
||||
56
etc/env/raven/cuda
vendored
Normal file
56
etc/env/raven/cuda
vendored
Normal file
@@ -0,0 +1,56 @@
|
||||
mods=(
|
||||
cuda/11.6
|
||||
intel/19.1.2
|
||||
mkl/2020.4
|
||||
impi/2019.8
|
||||
autoconf/2.69
|
||||
automake/1.15
|
||||
libtool/2.4.6
|
||||
)
|
||||
|
||||
|
||||
module purge
|
||||
module load ${mods[@]}
|
||||
LIB_PATH="${CUDA_HOME}/lib64"
|
||||
export CUDA_ROOT=${CUDA_HOME}
|
||||
export CUDA_LDFLAGS="-L${LIB_PATH} -lcuda -L${LIB_PATH} -lcudart -L${LIB_PATH} -lcublas"
|
||||
export CUDA_CXXFLAGS="-I${CUDA_HOME}/include"
|
||||
|
||||
export LD_LIBRARY_PATH="${MKL_HOME}/lib/intel64_lin:${LD_LIBRARY_PATH}"
|
||||
|
||||
BLAS_STATIC_PATH="$MKL_HOME/lib/intel64/libmkl_intel_lp64.a"
|
||||
|
||||
ls ${LIB_PATH}/libcublas.so
|
||||
ls ${LIB_PATH}/libcudart.so
|
||||
|
||||
cat <<EOF
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
info
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
MKL_HOME = $MKL_HOME
|
||||
BLAS_STATIC_PATH = $BLAS_STATIC_PATH
|
||||
|
||||
CUDA_ROOT = ${CUDA_HOME}
|
||||
CUDA_LDFLAGS = "-L${LIB_PATH} -lcuda -L${LIB_PATH} -lcudart -L${LIB_PATH} -lcublas"
|
||||
CUDA_CXXFLAGS = "-I${CUDA_HOME}/include"
|
||||
|
||||
|
||||
|
||||
Consider now runnng the following
|
||||
|
||||
../configure \\
|
||||
--enable-cuda \\
|
||||
--disable-slice \\
|
||||
--with-blas="-L\$MKL_HOME/lib/intel64/ -lmkl_intel_lp64 -mkl" \\
|
||||
CXX=mpiicpc \\
|
||||
CC=mpiicc \\
|
||||
MPICXX=mpiicpc
|
||||
|
||||
|
||||
EOF
|
||||
|
||||
|
||||
return
|
||||
82
etc/env/raven/cuda-openacc
vendored
Normal file
82
etc/env/raven/cuda-openacc
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
#!/usr/bin/env bash
|
||||
mods=(
|
||||
#cuda/11.6
|
||||
nvhpcsdk/22 # for openacc
|
||||
gcc/12
|
||||
openmpi
|
||||
mkl/2020.4
|
||||
autoconf/2.69
|
||||
automake/1.15
|
||||
libtool/2.4.6
|
||||
)
|
||||
|
||||
|
||||
module purge
|
||||
module load ${mods[@]}
|
||||
|
||||
LIB_PATH="${NVHPC_CUDA_HOME}/lib64"
|
||||
export CUBLAS_LD_PATH="${NVHPC_ROOT}/math_libs/lib64/"
|
||||
export CUDA_ROOT=${CUDA_HOME}
|
||||
export CUDA_LDFLAGS="-L${LIB_PATH} -lcuda -L${LIB_PATH} -lcudart -L${CUBLAS_LD_PATH} -lcublas"
|
||||
export CUDA_CXXFLAGS="-I${CUDA_HOME}/include"
|
||||
|
||||
export LD_LIBRARY_PATH="${MKL_HOME}/lib/intel64:${LD_LIBRARY_PATH}"
|
||||
|
||||
MPILIBS=$(mpicxx -show | awk '!($1="")')
|
||||
export MPILIBS
|
||||
export MPINVCXX="nv++ ${MPILIBS}"
|
||||
|
||||
ls ${CUBLAS_LD_PATH}/libcublas.so
|
||||
ls ${LIB_PATH}/libcudart.so
|
||||
|
||||
#export OMPI_CC="nvc"
|
||||
#export OMPI_CXX="nvc++"
|
||||
|
||||
BLAS_LDFLAGS="-L${PWD}/OpenBLAS-0.3.20/ -lopenblas"
|
||||
_openblas_make () {
|
||||
|
||||
[[ -d OpenBLAS-0.3.20/ ]] || {
|
||||
wget https://github.com/xianyi/OpenBLAS/releases/download/v0.3.20/OpenBLAS-0.3.20.tar.gz
|
||||
tar xvzf OpenBLAS-0.3.20.tar.gz
|
||||
cd OpenBLAS-0.3.20/
|
||||
make FC=gfortran CC=gcc USE_OPENMP=1 NUM_THREADS=72 TARGET=SKYLAKEX
|
||||
} && {
|
||||
echo "Openblas built"
|
||||
}
|
||||
|
||||
}
|
||||
( _openblas_make; )
|
||||
|
||||
|
||||
|
||||
cat <<EOF
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
info
|
||||
////////////////////////////////////////////////////////////////////////////////
|
||||
|
||||
|
||||
MKL_HOME = $MKL_HOME
|
||||
BLAS_STATIC_PATH = $BLAS_STATIC_PATH
|
||||
|
||||
CUDA_ROOT = ${CUDA_HOME}
|
||||
CUDA_LDFLAGS = "-L${LIB_PATH} -lcuda -L${LIB_PATH} -lcudart -L${LIB_PATH} -lcublas"
|
||||
CUDA_CXXFLAGS = "-I${CUDA_HOME}/include"
|
||||
|
||||
|
||||
|
||||
Consider now runnng the following
|
||||
|
||||
../../configure \\
|
||||
--enable-cuda \\
|
||||
--disable-slice \\
|
||||
--with-blas="${BLAS_LDFLAGS}" \\
|
||||
CXX="gcc" \\
|
||||
NVCC="\$MPINVCXX" \\
|
||||
MPICXX="mpicxx"
|
||||
|
||||
|
||||
EOF
|
||||
|
||||
|
||||
return
|
||||
70
etc/m4/atrip_openacc.m4
Normal file
70
etc/m4/atrip_openacc.m4
Normal file
@@ -0,0 +1,70 @@
|
||||
# SYNOPSIS
|
||||
#
|
||||
# ATRIP_OPENACC([ACTION-SUCCESS], [ACTION-FAILURE])
|
||||
#
|
||||
# DESCRIPTION
|
||||
#
|
||||
# Check whether the given the -fopenacc flag works with the current language's compiler
|
||||
# or gives an error.
|
||||
#
|
||||
# ACTION-SUCCESS/ACTION-FAILURE are shell commands to execute on
|
||||
# success/failure.
|
||||
#
|
||||
# LICENSE
|
||||
#
|
||||
# Copyright (c) 2023 Alejandro Gallo <aamsgallo@gmail.com>
|
||||
#
|
||||
# Copying and distribution of this file, with or without modification, are
|
||||
# permitted in any medium without royalty provided the copyright notice
|
||||
# and this notice are preserved. This file is offered as-is, without any
|
||||
# warranty.
|
||||
|
||||
AC_DEFUN([ATRIP_OPENACC],
|
||||
[
|
||||
AC_MSG_CHECKING([that the compiler works with the -fopenacc])
|
||||
AC_COMPILE_IFELSE([AC_LANG_SOURCE([_ATRIP_OPENACC_SOURCE])],
|
||||
[
|
||||
$1
|
||||
AC_MSG_RESULT([yes])
|
||||
],
|
||||
[
|
||||
$2
|
||||
AC_MSG_ERROR([no])
|
||||
])
|
||||
])dnl DEFUN
|
||||
|
||||
m4_define([_ATRIP_OPENACC_SOURCE], [[
|
||||
#include <stdio.h>
|
||||
#include <stdlib.h>
|
||||
#include <openacc.h>
|
||||
|
||||
#define SIZE 10
|
||||
|
||||
int main(int argc, char **argv) {
|
||||
float matrix[SIZE * SIZE];
|
||||
float result[SIZE * SIZE];
|
||||
|
||||
// Initialize the matrix with random values
|
||||
for (int i = 0; i < SIZE * SIZE; i++) {
|
||||
matrix[i] = rand() / (float)RAND_MAX;
|
||||
}
|
||||
|
||||
#pragma acc data \
|
||||
copy(matrix[0:SIZE * SIZE]) \
|
||||
copyout(result[0:SIZE * SIZE])
|
||||
{
|
||||
// Calculate the matrix multiplication
|
||||
#pragma acc parallel loop collapse(2)
|
||||
for (int i = 0; i < SIZE; i++) {
|
||||
for (int j = 0; j < SIZE; j++) {
|
||||
float sum = 0.0f;
|
||||
for (int k = 0; k < SIZE; k++) {
|
||||
sum += matrix[i * SIZE + k] * matrix[j * SIZE + k];
|
||||
}
|
||||
result[i * SIZE + j] = sum;
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
]])
|
||||
@@ -43,7 +43,7 @@
|
||||
# and this notice are preserved. This file is offered as-is, without any
|
||||
# warranty.
|
||||
|
||||
#serial 14
|
||||
#serial 15
|
||||
|
||||
dnl This macro is based on the code from the AX_CXX_COMPILE_STDCXX_11 macro
|
||||
dnl (serial version number 13).
|
||||
@@ -189,7 +189,11 @@ m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_11], [[
|
||||
|
||||
#error "This is not a C++ compiler"
|
||||
|
||||
#elif __cplusplus < 201103L
|
||||
// MSVC always sets __cplusplus to 199711L in older versions; newer versions
|
||||
// only set it correctly if /Zc:__cplusplus is specified as well as a
|
||||
// /std:c++NN switch:
|
||||
// https://devblogs.microsoft.com/cppblog/msvc-now-correctly-reports-__cplusplus/
|
||||
#elif __cplusplus < 201103L && !defined _MSC_VER
|
||||
|
||||
#error "This is not a C++11 compiler"
|
||||
|
||||
@@ -480,7 +484,7 @@ m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_14], [[
|
||||
|
||||
#error "This is not a C++ compiler"
|
||||
|
||||
#elif __cplusplus < 201402L
|
||||
#elif __cplusplus < 201402L && !defined _MSC_VER
|
||||
|
||||
#error "This is not a C++14 compiler"
|
||||
|
||||
@@ -604,7 +608,7 @@ m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_17], [[
|
||||
|
||||
#error "This is not a C++ compiler"
|
||||
|
||||
#elif __cplusplus < 201703L
|
||||
#elif __cplusplus < 201703L && !defined _MSC_VER
|
||||
|
||||
#error "This is not a C++17 compiler"
|
||||
|
||||
@@ -970,7 +974,7 @@ namespace cxx17
|
||||
|
||||
} // namespace cxx17
|
||||
|
||||
#endif // __cplusplus < 201703L
|
||||
#endif // __cplusplus < 201703L && !defined _MSC_VER
|
||||
|
||||
]])
|
||||
|
||||
@@ -983,7 +987,7 @@ m4_define([_AX_CXX_COMPILE_STDCXX_testbody_new_in_20], [[
|
||||
|
||||
#error "This is not a C++ compiler"
|
||||
|
||||
#elif __cplusplus < 202002L
|
||||
#elif __cplusplus < 202002L && !defined _MSC_VER
|
||||
|
||||
#error "This is not a C++20 compiler"
|
||||
|
||||
@@ -1000,6 +1004,6 @@ namespace cxx20
|
||||
|
||||
} // namespace cxx20
|
||||
|
||||
#endif // __cplusplus < 202002L
|
||||
#endif // __cplusplus < 202002L && !defined _MSC_VER
|
||||
|
||||
]])
|
||||
|
||||
@@ -86,7 +86,7 @@ namespace atrip {
|
||||
ADD_ATTRIBUTE(bool, rankRoundRobin, false)
|
||||
ADD_ATTRIBUTE(bool, chrono, false)
|
||||
ADD_ATTRIBUTE(bool, barrier, false)
|
||||
ADD_ATTRIBUTE(int, maxIterations, 0)
|
||||
ADD_ATTRIBUTE(size_t, maxIterations, 0)
|
||||
ADD_ATTRIBUTE(int, iterationMod, -1)
|
||||
ADD_ATTRIBUTE(int, percentageMod, -1)
|
||||
ADD_ATTRIBUTE(TuplesDistribution, tuplesDistribution, NAIVE)
|
||||
|
||||
@@ -7,16 +7,17 @@ AM_CPPFLAGS = $(CTF_CPPFLAGS)
|
||||
lib_LIBRARIES = libatrip.a
|
||||
|
||||
libatrip_a_CPPFLAGS = -I$(top_srcdir)/include/
|
||||
libatrip_a_SOURCES = ./atrip/Blas.cxx ./atrip/Tuples.cxx ./atrip/DatabaseCommunicator.cxx
|
||||
libatrip_a_SOURCES =
|
||||
NVCC_FILES = ./atrip/Equations.cxx ./atrip/Complex.cxx ./atrip/Atrip.cxx
|
||||
|
||||
NVCC_FILES += ./atrip/Blas.cxx ./atrip/Tuples.cxx ./atrip/DatabaseCommunicator.cxx
|
||||
if WITH_CUDA
|
||||
NVCC_OBJS = $(patsubst %.cxx,%.nvcc.o,$(NVCC_FILES))
|
||||
libatrip_a_CPPFLAGS += $(CUDA_CXXFLAGS)
|
||||
libatrip_a_DEPENDENCIES = $(NVCC_OBJS)
|
||||
libatrip_a_LIBADD = $(NVCC_OBJS)
|
||||
%.nvcc.o: %.cxx
|
||||
$(NVCC) -c -x cu -ccbin="${MPICXX}" -I../ $(CPPFLAGS) $(CTF_CPPFLAGS) $(DEFS) $(libatrip_a_CPPFLAGS) $< -o $@
|
||||
##$(NVCC) -c -x cu -ccbin="${MPICXX}" -I../ $(CPPFLAGS) $(CTF_CPPFLAGS) $(DEFS) $(libatrip_a_CPPFLAGS) $< -o $@
|
||||
$(NVCXX) -cuda $(MPILIBS) -c -x cu -I../ $(CPPFLAGS) $(CTF_CPPFLAGS) $(DEFS) $(libatrip_a_CPPFLAGS) $< -o $@
|
||||
|
||||
#./atrip/Equations.o: ./atrip/Equations.cxx
|
||||
# $(NVCC) -c -I../ $(CPPFLAGS) $(libatrip_a_CPPFLAGS) $< -o $@
|
||||
|
||||
@@ -694,13 +694,10 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
||||
|
||||
// LOG(0, "AtripCUDA") << "doing energy " << i << "distinct " << distinct << "\n";
|
||||
WITH_CHRONO("energy",
|
||||
/*
|
||||
TODO: think about how to do this on the GPU in the best way possible
|
||||
if ( distinct == 0)
|
||||
tupleEnergy = getEnergyDistinct<F>(epsabc, No, (F*)epsi, (F*)Tijk, (F*)Zijk);
|
||||
else
|
||||
tupleEnergy = getEnergySame<F>(epsabc, No, (F*)epsi, (F*)Tijk, (F*)Zijk);
|
||||
*/
|
||||
)
|
||||
|
||||
#if defined(HAVE_OCD) || defined(ATRIP_PRINT_TUPLES)
|
||||
@@ -773,6 +770,8 @@ Atrip::Output Atrip::run(Atrip::Input<F> const& in) {
|
||||
Atrip::chrono["iterations"].stop();
|
||||
// ITERATION END %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%{{{1
|
||||
|
||||
if (in.maxIterations != 0 && i >= in.maxIterations) break;
|
||||
|
||||
}
|
||||
// END OF MAIN LOOP
|
||||
|
||||
|
||||
@@ -182,15 +182,21 @@ namespace cuda {
|
||||
|
||||
// [[file:~/cuda/atrip/atrip.org::*Energy][Energy:2]]
|
||||
template <typename F>
|
||||
__MAYBE_DEVICE__
|
||||
double getEnergyDistinct
|
||||
( F const epsabc
|
||||
, size_t const No
|
||||
, F* const epsi
|
||||
, F* const Tijk
|
||||
, F* const Zijk
|
||||
) {
|
||||
(F const epsabc,
|
||||
size_t const No,
|
||||
F* const epsi,
|
||||
F* const Tijk,
|
||||
F* const Zijk) {
|
||||
constexpr size_t blockSize=16;
|
||||
F energy(0.);
|
||||
#if defined(HAVE_CUDA)
|
||||
#pragma acc kernels
|
||||
for (size_t k(0); k < No; k++) {
|
||||
for (size_t j(k); j < No; j++) {
|
||||
for (size_t i(j); i < No; i++) {
|
||||
#else
|
||||
for (size_t kk=0; kk<No; kk+=blockSize){
|
||||
const size_t kend( std::min(No, kk+blockSize) );
|
||||
for (size_t jj(kk); jj<No; jj+=blockSize){
|
||||
@@ -198,13 +204,14 @@ double getEnergyDistinct
|
||||
for (size_t ii(jj); ii<No; ii+=blockSize){
|
||||
const size_t iend( std::min( No, ii+blockSize) );
|
||||
for (size_t k(kk); k < kend; k++){
|
||||
const F ek(epsi[k]);
|
||||
const size_t jstart = jj > k ? jj : k;
|
||||
for (size_t j(jstart); j < jend; j++){
|
||||
F const ej(epsi[j]);
|
||||
F const facjk = j == k ? F(0.5) : F(1.0);
|
||||
size_t istart = ii > j ? ii : j;
|
||||
for (size_t i(istart); i < iend; i++){
|
||||
#endif
|
||||
const F ek(epsi[k]);
|
||||
const F ej(epsi[j]);
|
||||
const F facjk = j == k ? F(0.5) : F(1.0);
|
||||
const F
|
||||
ei(epsi[i])
|
||||
, facij = i == j ? F(0.5) : F(1.0)
|
||||
@@ -239,14 +246,17 @@ double getEnergyDistinct
|
||||
} // i
|
||||
} // j
|
||||
} // k
|
||||
#if !defined(HAVE_CUDA)
|
||||
} // ii
|
||||
} // jj
|
||||
} // kk
|
||||
#endif
|
||||
return std::real(energy);
|
||||
}
|
||||
|
||||
|
||||
template <typename F>
|
||||
__MAYBE_DEVICE__
|
||||
double getEnergySame
|
||||
( F const epsabc
|
||||
, size_t const No
|
||||
@@ -256,6 +266,12 @@ double getEnergySame
|
||||
) {
|
||||
constexpr size_t blockSize = 16;
|
||||
F energy = F(0.);
|
||||
#if defined(HAVE_CUDA)
|
||||
#pragma acc kernels
|
||||
for (size_t k(0); k < No; k++) {
|
||||
for (size_t j(k); j < No; j++) {
|
||||
for (size_t i(j); i < No; i++) {
|
||||
#else
|
||||
for (size_t kk=0; kk<No; kk+=blockSize){
|
||||
const size_t kend( std::min( kk+blockSize, No) );
|
||||
for (size_t jj(kk); jj<No; jj+=blockSize){
|
||||
@@ -263,13 +279,14 @@ double getEnergySame
|
||||
for (size_t ii(jj); ii<No; ii+=blockSize){
|
||||
const size_t iend( std::min( ii+blockSize, No) );
|
||||
for (size_t k(kk); k < kend; k++){
|
||||
const F ek(epsi[k]);
|
||||
const size_t jstart = jj > k ? jj : k;
|
||||
for(size_t j(jstart); j < jend; j++){
|
||||
const F facjk( j == k ? F(0.5) : F(1.0));
|
||||
const F ej(epsi[j]);
|
||||
const size_t istart = ii > j ? ii : j;
|
||||
for(size_t i(istart); i < iend; i++){
|
||||
#endif
|
||||
const F facjk( j == k ? F(0.5) : F(1.0));
|
||||
const F ek(epsi[k]);
|
||||
const F ej(epsi[j]);
|
||||
const F
|
||||
ei(epsi[i])
|
||||
, facij ( i==j ? F(0.5) : F(1.0))
|
||||
@@ -291,9 +308,11 @@ double getEnergySame
|
||||
} // i
|
||||
} // j
|
||||
} // k
|
||||
#if !defined(HAVE_CUDA)
|
||||
} // ii
|
||||
} // jj
|
||||
} // kk
|
||||
#endif
|
||||
return std::real(energy);
|
||||
}
|
||||
// Energy:2 ends here
|
||||
|
||||
@@ -6,6 +6,7 @@ set -eu
|
||||
flags=("${@}")
|
||||
PROJECTS=()
|
||||
|
||||
############################################################
|
||||
#
|
||||
## Check root directory
|
||||
#
|
||||
@@ -35,6 +36,7 @@ EOF
|
||||
exit 1
|
||||
}
|
||||
|
||||
############################################################
|
||||
#
|
||||
## Create configuration function
|
||||
#
|
||||
@@ -48,7 +50,8 @@ create_config () {
|
||||
echo "> creating: $name"
|
||||
cat <<SH > configure
|
||||
#!/usr/bin/env bash
|
||||
# created by $0 on $(date)
|
||||
# creator: $0
|
||||
# date: $(date)
|
||||
|
||||
$root_project/configure $(cat $file | paste -s) \\
|
||||
$(for word in "${flags[@]}"; do
|
||||
@@ -62,9 +65,14 @@ SH
|
||||
cd - > /dev/null
|
||||
}
|
||||
|
||||
############################################################
|
||||
# begin doc
|
||||
#
|
||||
## default configuration
|
||||
# - default ::
|
||||
# This configuration uses a CPU code with dgemm
|
||||
# and without computing slices.
|
||||
#
|
||||
# end doc
|
||||
|
||||
tmp=`mktemp`
|
||||
cat <<EOF > $tmp
|
||||
@@ -74,9 +82,12 @@ EOF
|
||||
create_config $tmp default
|
||||
rm $tmp
|
||||
|
||||
# begin doc
|
||||
#
|
||||
## only-dgemm configuration
|
||||
# - only-dgemm ::
|
||||
# This only runs the computation part that involves dgemms.
|
||||
#
|
||||
# end doc
|
||||
|
||||
tmp=`mktemp`
|
||||
cat <<EOF > $tmp
|
||||
@@ -87,6 +98,46 @@ EOF
|
||||
create_config $tmp only-dgemm
|
||||
rm $tmp
|
||||
|
||||
# begin doc
|
||||
#
|
||||
# - cuda-only-dgemm ::
|
||||
# This is the naive CUDA implementation compiling only the dgemm parts
|
||||
# of the compute.
|
||||
#
|
||||
# end doc
|
||||
|
||||
tmp=`mktemp`
|
||||
cat <<EOF > $tmp
|
||||
--enable-cuda
|
||||
--enable-only-dgemm
|
||||
--disable-slice
|
||||
EOF
|
||||
|
||||
create_config $tmp cuda-only-dgemm
|
||||
rm $tmp
|
||||
|
||||
# begin doc
|
||||
#
|
||||
# - cuda-slices-on-gpu-only-dgemm ::
|
||||
# This configuration tests that slices reside completely on the gpu
|
||||
# and it should use a CUDA aware MPI implementation.
|
||||
# It also only uses the routines that involve dgemm.
|
||||
#
|
||||
# end doc
|
||||
|
||||
tmp=`mktemp`
|
||||
cat <<EOF > $tmp
|
||||
--enable-cuda
|
||||
--enable-sources-in-gpu
|
||||
--enable-cuda-aware-mpi
|
||||
--enable-only-dgemm
|
||||
--disable-slice
|
||||
EOF
|
||||
|
||||
create_config $tmp cuda-slices-on-gpu-only-dgemm
|
||||
rm $tmp
|
||||
|
||||
############################################################
|
||||
#
|
||||
## Create makefile
|
||||
#
|
||||
@@ -128,5 +179,5 @@ EOF
|
||||
## Emacs stuff
|
||||
# Local Variables:
|
||||
# eval: (outline-minor-mode)
|
||||
# outline-regexp: "## "
|
||||
# outline-regexp: "############################################################"
|
||||
# End:
|
||||
|
||||
Reference in New Issue
Block a user