Skip to content
Snippets Groups Projects
Commit 1addab87 authored by Antonio Ragagnin's avatar Antonio Ragagnin :speech_balloon:
Browse files

jobfile for leonardo booster

parent ca4941ec
No related branches found
No related tags found
No related merge requests found
Pipeline #25541 passed
#!/bin/bash
#SBATCH --nodes 1
#SBATCH --ntasks-per-node 1
#SBATCH --cpus-per-task 32
#SBATCH --gres=gpu:1
#SBATCH --partition=boost_usr_prod
set -xev #exit on error
export DEBUG=1
PPL=(1 64) #vary particle per leaf
THREADS=(1 2 4 8 16 32 64 128) #vary branching (thread) count
MODE=(cpu gpu) #vary gpu/cpu switch
#N=(1000 10000 100000 1000000 10000000)
N=(50000000) #scaling will use 5e7 particles
#
# run tree build tests on GPU
#
CC='nvc++' HW_FLAGS='-mp=gpu -gpu=rdc,managed -gpu=cc80 -Minfo=mp,accel -Minline=1000 -O3 -DTARGET=1 -DOPENMP_RECURSION_TRICK -DOPENMP_TARGET_HILBERTCOPY' srun -n 1 python -m hotwheels_core.wrap hotwheels_octree/test_target.cpp
rm -f report.csv
#
# loop over tree build configurations PPL, THREADS, MODE, N
#
for ppl in "${PPL[@]}" ; do
for n in "${N[@]}" ; do
for mode in "${MODE[@]}" ; do
for threads in "${THREADS[@]}" ; do
if [[ "$mode" == "cpu" ]]; then
export CC=gcc
export HW_FLAGS='-O3 -fopenmp'
export OMP_NUM_THREADS=$threads
else
if [[ $threads -lt 16 ]]; then
continue
fi
export CC=nvc++
export HW_FLAGS='-mp=gpu -gpu=rdc,managed -gpu=cc80 -Minfo=mp,accel -Minline=1000 -O3'
export OMP_NUM_THREADS=4
fi
# rnu the test
srun -n 1 python hotwheels_octree/testgpu.py -ppl $ppl -n $n -mode $mode -threads $threads 1>run.out 2>run.err
t=$(grep time: run.out | sed s/time://)
echo $ppl,$n,$mode,$threads,$t >> report.csv # store the report
done
done
done
done
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment