#!/bin/bash # Job name: #SBATCH --job-name=ShallowWaterGPUStrongScaling # # Project: #SBATCH --account=nn9882k # # Wall clock limit: #SBATCH --time=10:00:00 # # Ask for 1 GPU (max is 2) # Note: The environment variable CUDA_VISIBLE_DEVICES will show which GPU # device(s) to use. It will have values '0', '1' or '0,1' corresponding to # /dev/nvidia0, /dev/nvidia1 or both, respectively. #SBATCH --partition=accel --gres=gpu:1 # # Max memory usage per task (core) - increasing this will cost more core hours: #SBATCH --mem-per-cpu=16G # # Number of tasks: #SBATCH --nodes=1 --ntasks-per-node=1 ## Set up job environment: (this is done automatically behind the scenes) ## (make sure to comment '#' or remove the following line 'source ...') # source /cluster/bin/jobsetup module restore system # instead of 'module purge' rather set module environment to the system default module load CUDA/10.2.89 # It is also recommended to to list loaded modules, for easier debugging: module list set -o errexit # exit on errors set -o nounset # Treat unset variables as errors (added for more easily discovering issues in your batch script) ## Copy input files to the work directory: mkdir $SCRATCH/ShallowWaterGPU cp -r . $SCRATCH/ShallowWaterGPU ## Make sure the results are copied back to the submit directory (see Work Directory below): # chkfile MyResultFile # chkfile is replaced by 'savefile' on Saga savefile "$SCRATCH/ShallowWaterGPU/*.log" savefile "$SCRATCH/ShallowWaterGPU/*.nc" savefile "$SCRATCH/ShallowWaterGPU/*.json" ## Do some work: cd $SCRATCH/ShallowWaterGPU srun $HOME/.conda/envs/ShallowWaterGPU_HPC/bin/python3 --version srun $HOME/.conda/envs/ShallowWaterGPU_HPC/bin/python3 mpiTesting.py -nx 1024 -ny 1024 --profile