mirror of
				https://github.com/smyalygames/FiniteVolumeGPU.git
				synced 2025-10-31 13:07:41 +01:00 
			
		
		
		
	Merge branch 'master' of github.com:setmar/ShallowWaterGPU
This commit is contained in:
		
						commit
						5b6e145d80
					
				| @ -21,6 +21,8 @@ Example job script: | ||||
| dgx-2-test.job | ||||
| 
 | ||||
| Submit:   | ||||
| module use /cm/shared/ex3-modules/latest/modulefiles   # Latest ex3-modules     | ||||
| module load slurm/20.02.7                              # To load slurm module   | ||||
| sbatch dgx-2-test.job | ||||
| 
 | ||||
| ### PPI 4 x P100 (VPN necessary) | ||||
|  | ||||
							
								
								
									
										15
									
								
								saga-dev.job
									
									
									
									
									
								
							
							
						
						
									
										15
									
								
								saga-dev.job
									
									
									
									
									
								
							| @ -3,22 +3,23 @@ | ||||
| #SBATCH --job-name=ShallowWaterGPUScalingDev | ||||
| # | ||||
| # Project: | ||||
| #SBATCH --account=nn9550k | ||||
| #SBATCH --account=nn9882k | ||||
| # | ||||
| # Wall clock limit: | ||||
| #SBATCH --time=01:00:00 | ||||
| #SBATCH --time=00:20:00 | ||||
| # | ||||
| # NOTE: See https://documentation.sigma2.no/jobs/projects_accounting.html when adjusting the values below | ||||
| # | ||||
| # Ask for 1 GPU (max is 2) | ||||
| # Note: The environment variable CUDA_VISIBLE_DEVICES will show which GPU  | ||||
| # device(s) to use. It will have values '0', '1' or '0,1' corresponding to  | ||||
| # /dev/nvidia0, /dev/nvidia1 or both, respectively. | ||||
| #SBATCH --partition=accel --gres=gpu:1 | ||||
| #SBATCH --partition=accel | ||||
| # | ||||
| # Max memory usage per task (core) - increasing this will cost more core hours: | ||||
| #SBATCH --mem-per-cpu=16G | ||||
| #SBATCH --mem-per-cpu=3800M | ||||
| # | ||||
| # Number of tasks: | ||||
| #SBATCH --nodes=1 --ntasks-per-node=1 | ||||
| #SBATCH --nodes=1 --gpus-per-node=1 --ntasks-per-node=1 | ||||
| # | ||||
| #SBATCH --qos=devel | ||||
| 
 | ||||
| @ -49,5 +50,5 @@ savefile "$SCRATCH/ShallowWaterGPU/*.json" | ||||
| ## Do some work: | ||||
| cd $SCRATCH/ShallowWaterGPU | ||||
| srun $HOME/.conda/envs/ShallowWaterGPU_HPC/bin/python3 --version | ||||
| srun $HOME/.conda/envs/ShallowWaterGPU_HPC/bin/python3 mpiTesting.py -nx 8192 -ny 8192 --profile | ||||
| srun $HOME/.conda/envs/ShallowWaterGPU_HPC/bin/python3 mpiTesting.py -nx 1024 -ny 1024 --profile | ||||
| 
 | ||||
|  | ||||
| @ -3,22 +3,23 @@ | ||||
| #SBATCH --job-name=ShallowWaterGPUStrongScaling | ||||
| # | ||||
| # Project: | ||||
| #SBATCH --account=nn9550k | ||||
| #SBATCH --account=nn9882k | ||||
| # | ||||
| # Wall clock limit: | ||||
| #SBATCH --time=24:00:00 | ||||
| # | ||||
| # Ask for 1 GPU (max is 2) | ||||
| # NOTE: See https://documentation.sigma2.no/jobs/projects_accounting.html when adjusting the values below | ||||
| # | ||||
| # Note: The environment variable CUDA_VISIBLE_DEVICES will show which GPU  | ||||
| # device(s) to use. It will have values '0', '1' or '0,1' corresponding to  | ||||
| # /dev/nvidia0, /dev/nvidia1 or both, respectively. | ||||
| #SBATCH --partition=accel --gres=gpu:1 | ||||
| #SBATCH --partition=accel | ||||
| # | ||||
| # Max memory usage per task (core) - increasing this will cost more core hours: | ||||
| #SBATCH --mem-per-cpu=16G | ||||
| #SBATCH --mem-per-cpu=3800M | ||||
| # | ||||
| # Number of tasks: | ||||
| #SBATCH --nodes=1 --ntasks-per-node=1 | ||||
| #SBATCH --nodes=1 --gpus-per-node=1 --ntasks-per-node=1 | ||||
| 
 | ||||
| ## Set up job environment: (this is done automatically behind the scenes) | ||||
| ## (make sure to comment '#' or remove the following line 'source ...') | ||||
|  | ||||
| @ -3,7 +3,7 @@ | ||||
| #SBATCH --job-name=ShallowWaterGPUStrongScaling | ||||
| # | ||||
| # Project: | ||||
| #SBATCH --account=nn9550k | ||||
| #SBATCH --account=nn9882k | ||||
| # | ||||
| # Wall clock limit: | ||||
| #SBATCH --time=10:00:00 | ||||
|  | ||||
		Loading…
	
	
			
			x
			
			
		
	
		Reference in New Issue
	
	Block a user
	 Martin Lilleeng Sætra
						Martin Lilleeng Sætra