FiniteVolumeGPU/dgx-2-test.job
Martin Lilleeng Sætra 8f565f57f6 Tweaked DGX-2 run setup.
2022-01-24 19:57:58 +01:00

36 lines
1.2 KiB
Bash

#!/bin/bash
# See http://wiki.ex3.simula.no before changing the values below
#SBATCH -p dgx2q # partition (GPU queue)
#SBATCH -N 1 # number of nodes
#SBATCH -n 4 # number of cores
#SBATCH -w g001 # DGX-2 node
#SBATCH --gres=gpu:4 # number of V100's
#SBATCH -t 0-00:10 # time (D-HH:MM)
#SBATCH -o slurm.%N.%j.out # STDOUT
#SBATCH -e slurm.%N.%j.err # STDERR
ulimit -s 10240
module load slurm
module load openmpi/4.0.1
module load cuda10.1/toolkit/10.1.243
# Check how many gpu's your job got
#nvidia-smi
## Copy input files to the work directory:
rm -rf /work/$USER/ShallowWaterGPU
mkdir -p /work/$USER/ShallowWaterGPU
cp -r . /work/$USER/ShallowWaterGPU
# Run job
# (Assumes Miniconda is installed in user root dir.)
cd /work/$USER/ShallowWaterGPU
mpirun --mca btl_openib_if_include mlx5_0 --mca btl_openib_warn_no_device_params_found 0 $HOME/miniconda3/envs/ShallowWaterGPU_HPC/bin/python3 mpiTesting.py
cd $HOME/src/ShallowWaterGPU
## Copy files from work directory:
# (NOTE: Copying is not performed if job fails!)
cp /work/$USER/ShallowWaterGPU/*.log .
cp /work/$USER/ShallowWaterGPU/*.nc .