Skip to content
Commits on Source (7)
PROGRAM=dot-product-p
export NX_SMP_WORKERS=1
export NX_SMP_WORKERS=4
./$PROGRAM 8192 128
PROGRAM=matmul-p
export NX_SMP_WORKERS=1
export NX_SMP_WORKERS=4
./$PROGRAM 16
PROGRAM=multisort-p
export NX_SMP_WORKERS=1
export NX_SMP_WORKERS=4
./$PROGRAM 65536 256 512
PROGRAM=nbody-p
export NX_SMP_WORKERS=1
export NX_GPUS=2 #change this in order to use more GPUs
for gpus in 1 2; do
export NX_GPUS=$gpus
NX_ARGS="--cache-policy writethrough --gpu-max-memory 90" ./$PROGRAM nbody_input-16384.in
done
PROGRAM=saxpy-p
export NX_GPUMAXMEM=90
export NX_GPUS=2 #change this in order to use more GPUs
for gpus in 1 2; do
export NX_GPUS=$gpus
./$PROGRAM
done
PROGRAM=heat-mpi-ompss-p
export IFS=";"
THREADS="01;02;03;04;05;06;07;08;09;10;11;12"
THREADS=(1 2 3 4 5 6)
INPUT=test.dat
for thread in $THREADS; do
NX_SMP_WORKERS=$thread mpirun ./$PROGRAM $INPUT test.ppm
for thread in ${THREADS[@]}; do
NX_SMP_WORKERS=$thread ${MPIRUN_COMMAND} ./$PROGRAM $INPUT test.ppm
done
PROGRAM=heat-mpi-ompss-i
PROGRAM=heat-mpi-ompss-p
# Run with 2 threads per MPI process in the same node
export SMP_NUM_WORKERS=2
# Run with 6 threads per MPI process in the same node
export NX_SMP_WORKERS=6
# Uncomment to instrument
#export INST=./graph.sh
#export INST=./trace.sh
mpirun $INST ./$PROGRAM test.dat test.ppm
${MPIRUN_COMMAND} $INST ./$PROGRAM test.dat test.ppm
# Generate the trace if needed
if [[ "$INST" == *"trace"* ]]; then
mpi2prv -f TRACE.mpits -o myTrace.prv
fi
......@@ -4,6 +4,7 @@ PREFIX=.
TARGETS=$(PROGRAM)-p $(PROGRAM)-i $(PROGRAM)-d
JOB_SCHED_VERSION=-mpi
TRACE_VERSION=-mpi
BASE_DIR=../..
include $(BASE_DIR)/common-files/Makefile
......
#!/bin/bash
# Uncomment following line to trace MPI programs
export NX_INSTRUMENTATION=new-graph
$*
PROGRAM=matmul-p
export NX_ARGS="--pes 2 --disable-cuda=yes"
echo "NX_ARGS is $NX_ARGS"
echo "LD_PRELOAD is $LD_PRELOAD"
echo "LD_LIBRARY_PATH is $LD_LIBRARY_PATH"
echo "##################################################"
srun ./$PROGRAM
THREADS=(1 2 3 4 5 6)
for thread in ${THREADS[@]}; do
NX_SMP_WORKERS=$thread ${MPIRUN_COMMAND} ./$PROGRAM
done
PROGRAM=matmul-i
PROGRAM=matmul-p
export NX_ARGS="--threads 2"
# Run with 6 threads per MPI process in the same node
export NX_SMP_WORKERS=6
echo "NX_ARGS is $NX_ARGS"
echo "LD_PRELOAD is $LD_PRELOAD"
echo "LD_LIBRARY_PATH is $LD_LIBRARY_PATH"
echo "##################################################"
# Uncomment to enable tracing
# Uncomment to instrument
#export INST=./graph.sh
#export INST=./trace.sh
mpirun --cpus-per-proc 2 $INST ./$PROGRAM
${MPIRUN_COMMAND} $INST ./$PROGRAM
# Generate the trace if needed
if [[ "$INST" == *"trace"* ]]; then
mpi2prv -f TRACE.mpits -o myTrace.prv
fi
......@@ -4,6 +4,7 @@ PREFIX=.
TARGETS=$(PROGRAM)-p $(PROGRAM)-i $(PROGRAM)-d
JOB_SCHED_VERSION=-mpi
TRACE_VERSION=-mpi
BASE_DIR=../..
include $(BASE_DIR)/common-files/Makefile
......
......@@ -4,19 +4,23 @@ CONFIG_DIR=.config
GFLAGS=
PARAVER=extrae.xml trace.sh
GRAPH=graph.sh
SCRIPTS=run-once.sh multirun.sh
MPI_CHECK=$(MPI_LIB_DIR)/libmpi.so
MKL_CHECK=$(MKL_LIB_DIR)/libmkl_sequential.so
ATLAS_CHECK=$(ATLAS_LIB_DIR)/libatlas.a
all: $(TARGETS) $(SCRIPTS) $(PARAVER)
all: $(TARGETS) $(SCRIPTS) $(PARAVER) $(GRAPH)
extrae.xml:
cp $(COMMON_DIR)/extrae.xml .
trace.sh:
cp $(COMMON_DIR)/trace.sh .
cp $(COMMON_DIR)/trace$(TRACE_VERSION).sh trace.sh
graph.sh:
cp $(COMMON_DIR)/graph.sh .
run-once.sh: $(COMMON_DIR)/run-once.sh $(CONFIG_DIR)/run-once.sh
cp $(COMMON_DIR)/run-once.sh .
......
......@@ -10,6 +10,7 @@ export MPI_HOME=/usr/lib/openmpi
export MPI_LIB_DIR=$MPI_HOME/lib
export MPI_INC_DIR=$MPI_HOME/include
export MPI_CC=mpicc
export MPIRUN_COMMAND="mpirun"
export MKL_LIB_DIR=/home/user/Builds/mkl/lib/intel64
export MKL_INC_DIR=/home/user/Builds/mkl/include
export ATLAS_LIB_DIR=/usr/lib
......
......@@ -11,6 +11,7 @@ export PARAVER_HOME=
export TEMANEJO_HOME=
export MPI_LIB_DIR=
export MPI_INC_DIR=
export MPIRUN_COMMAND="mpirun"
export MKL_LIB_DIR=
export MKL_INC_DIR=
export ATLAS_LIB_DIR=/home/xteruel/Applications/atlas-3.10.3/lib/
......
......@@ -14,6 +14,7 @@ export MPICC=mpiicc
export MPICXX=mpiicpc
export MPI_NATIVE_CC=I_MPI_CC
export MPI_NATIVE_CXX=I_MPI_CXX
export MPIRUN_COMMAND="mpirun"
export MKL_LIB_DIR=/apps/INTEL/2017.4/mkl/lib/intel64
export MKL_INC_DIR=/apps/INTEL/2017.4/mkl/include
export ATLAS_LIB_DIR=/apps/ATLAS/3.10.3/INTEL_BK/lib
......
......@@ -14,6 +14,7 @@ export MPICC=mpiicc
export MPICXX=mpiicpc
export MPI_NATIVE_CC=I_MPI_CC
export MPI_NATIVE_CXX=I_MPI_CXX
export MPIRUN_COMMAND="mpirun"
# Note (vlopez); wxparaver needs glibcxx from gcc >= 5
module load gcc/5.1.0
......
......@@ -2,18 +2,19 @@
DIRNAME=$(readlink -f $(dirname ${BASH_SOURCE[0]}))
export OMPSS_HOME=/apps/PM/ompss/17.06/
export OMPSS_HOME=/apps/PM/ompss/19.06/
export DLB_HOME=/apps/PM/dlb/latest/bullxmpi
export EXTRAE_HOME=/apps/BSCTOOLS/extrae/latest/bullxmpi_1_2_9_1
export EXTRAE_HOME=/apps/BSCTOOLS/extrae/latest/impi_2017_1_132
export PARAVER_HOME=/apps/BSCTOOLS/wxparaver/latest
export TEMANEJO_HOME=
export MPI_HOME=/opt/mpi/bullxmpi/1.1.11.1
export MPI_LIB_DIR=$MPI_HOME/lib
export MPI_INC_DIR=$MPI_HOME/include
export MPICC=mpicc
export MPICXX=mpicxx
export MPI_NATIVE_CC=OMPI_CC
export MPI_NATIVE_CXX=OMPI_CXX
export MPI_HOME=/apps/INTEL/2017.1-043/impi/2017.1.132
export MPI_LIB_DIR=$MPI_HOME/lib64
export MPI_INC_DIR=$MPI_HOME/include64
export MPICC=mpiicc
export MPICXX=mpiicpc
export MPI_NATIVE_CC=I_MPI_CC
export MPI_NATIVE_CXX=I_MPI_CXX
export MPIRUN_COMMAND="srun --cpu_bind=cores"
export MKL_LIB_DIR=/opt/compilers/intel/2016.3.067/mkl/lib/intel64/
export MKL_INC_DIR=/opt/compilers/intel/2016.3.067/mkl/include
export ATLAS_LIB_DIR=/gpfs/apps/NVIDIA/ATLAS/3.9.51/lib
......@@ -23,8 +24,12 @@ ln -sf $DIRNAME/sched-job-smp_nvidia $DIRNAME/sched-job-smp
ln -sf $DIRNAME/sched-job-mpi_nvidia $DIRNAME/sched-job-mpi
module unload gcc
module load gcc/4.9.4
module load gcc/4.6.1
module load GRAPHVIZ
module unload bullxmpi
module load impi/2017.1
module unload cuda
module load cuda/8.0
alias submit=mnsubmit
alias queue=mnq
#!/bin/bash
# Instrumentation to generate a task dependency graph
export NX_INSTRUMENTATION=tdg
$*
......@@ -8,4 +8,5 @@
# @ gpus_per_node = 2
# @ cpus_per_task = 6
# @ node_usage = not_shared
# @ features = k80
# @ wall_clock_limit = 00:15:00