Compare commits

...

2 Commits

Author SHA1 Message Date
Anthony Berg
aa21733806 fix: floating point number practically causing an infinite loop 2025-03-29 22:16:55 +01:00
Anthony Berg
5a27445de8 fix: deprecated modules on LUMI
(cherry picked from commit 277a6b4a3c3a017e16ba3c6238349491e38b4792)
2025-03-26 14:56:55 +01:00
4 changed files with 35 additions and 36 deletions

View File

@ -157,7 +157,7 @@ class BaseSimulator(object):
self.num_substeps = num_substeps
#Handle autotuning block size
if (self.context.autotuner):
if self.context.autotuner:
peak_configuration = self.context.autotuner.get_peak_performance(self.__class__)
block_width = int(peak_configuration["block_width"])
block_height = int(peak_configuration["block_height"])
@ -202,22 +202,22 @@ class BaseSimulator(object):
t_end = t_start + t
update_dt = True
if (dt is not None):
if dt is not None:
update_dt = False
self.dt = dt
with tqdm(total=t_end) as pbar:
while(self.simTime() < t_end):
for _ in tqdm(range(math.ceil(t_end / self.dt))):
# Update dt every 100 timesteps and cross your fingers it works
# for the next 100
if (update_dt and (self.simSteps() % 100 == 0)):
# TODO this is probably broken now after fixing the "infinite" loop
if update_dt and (self.simSteps() % 100 == 0):
self.dt = self.computeDt()*self.cfl_scale
# Compute timestep for "this" iteration (i.e., shorten last timestep)
current_dt = np.float32(min(self.dt, t_end-self.simTime()))
# Stop if end reached (should not happen)
if (current_dt <= 0.0):
if current_dt <= 0.0:
self.logger.warning("Timestep size {:d} is less than or equal to zero!".format(self.simSteps()))
break
@ -225,7 +225,6 @@ class BaseSimulator(object):
self.step(current_dt)
#Print info
pbar.update(current_dt)
# print_string = printer.getPrintString(self.simTime() - t_start)
# if (print_string):
# self.logger.info("%s: %s", self, print_string)

View File

@ -19,18 +19,18 @@ Mydir=/project/project_4650000xx
Myapplication=${Mydir}/FiniteVolumeGPU_hip/mpiTesting.py
#modules
ml LUMI/23.03 partition/G
ml LUMI/24.03 partition/G
ml lumi-container-wrapper
ml cray-python/3.9.13.1
ml rocm/5.2.3
ml cray-python/3.11.7
ml rocm/6.2.2
ml craype-accel-amd-gfx90a
ml cray-mpich/8.1.27
ml cray-mpich/8.1.29
export PATH="/project/project_4650000xx/FiniteVolumeGPU_hip/MyCondaEnv/bin:$PATH"
#missing library
export LD_LIBRARY_PATH=/opt/cray/pe/mpich/8.1.27/ofi/cray/14.0/lib-abi-mpich:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=/opt/cray/pe/mpich/8.1.29/ofi/cray/17.0/lib-abi-mpich:$LD_LIBRARY_PATH
#Binding mask
bind_mask="0x${fe}000000000000,0x${fe}00000000000000,0x${fe}0000,0x${fe}000000,0x${fe},0x${fe}00,0x${fe}00000000,0x${fe}0000000000"

View File

@ -5,13 +5,13 @@ This is a HIP version of the [FiniteVolume code](https://github.com/babrodtk/Fin
## Setup on LUMI-G
Here is a step-by-step guide on installing packages on LUMI-G
### Step 1: Install rocm-5.2.5 with Easybuild
### Step 1: Install rocm-5.4.6 with Easybuild
```
export EBU_USER_PREFIX=/project/project_xxxxxx/EasyBuild
ml LUMI/24.03 partition/G
ml EasyBuild-user
export PYTHONIOENCODING=utf-8
eb rocm-5.2.5.eb -r
eb rocm-5.4.6.eb -r
```
### Step 2: run conda-container

View File

@ -5,7 +5,7 @@ channels:
- conda-forge
dependencies:
- python=3.11.11
- python=3.11.7
- pip
- numpy
- mpi4py