mirror of
https://github.com/smyalygames/FiniteVolumeGPU_HIP.git
synced 2025-12-24 13:29:17 +01:00
Compare commits
11 Commits
main
...
6d9f36968d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d9f36968d | ||
|
|
5b925cdb42 | ||
|
|
b054a4dbcd | ||
|
|
2e5cf88eef | ||
|
|
80afd31286 | ||
|
|
e2306406a7 | ||
|
|
aa21733806 | ||
|
|
5a27445de8 | ||
|
|
cd69f69080 | ||
|
|
9761ff4924 | ||
|
|
5931cee93f |
@@ -35,6 +35,8 @@ import gc
|
||||
import netCDF4
|
||||
import json
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
#import pycuda.compiler as cuda_compiler
|
||||
#import pycuda.gpuarray
|
||||
#import pycuda.driver as cuda
|
||||
@@ -178,11 +180,11 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
|
||||
profiling_data_sim_runner["end"]["t_sim_init"] = time.time()
|
||||
|
||||
#Start simulation loop
|
||||
progress_printer = ProgressPrinter(save_times[-1], print_every=10)
|
||||
for k in range(len(save_times)):
|
||||
# progress_printer = ProgressPrinter(save_times[-1], print_every=10)
|
||||
for k, t_step in tqdm(enumerate(t_steps), desc="Simulation Loop"):
|
||||
#Get target time and step size there
|
||||
t_step = t_steps[k]
|
||||
t_end = save_times[k]
|
||||
# t_step = t_steps[k]
|
||||
# t_end = save_times[k]
|
||||
|
||||
#Sanity check simulator
|
||||
try:
|
||||
@@ -194,7 +196,7 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
|
||||
profiling_data_sim_runner["start"]["t_full_step"] += time.time()
|
||||
|
||||
#Simulate
|
||||
if (t_step > 0.0):
|
||||
if t_step > 0.0:
|
||||
sim.simulate(t_step, dt)
|
||||
|
||||
profiling_data_sim_runner["end"]["t_full_step"] += time.time()
|
||||
@@ -211,11 +213,11 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
|
||||
profiling_data_sim_runner["end"]["t_nc_write"] += time.time()
|
||||
|
||||
#Write progress to screen
|
||||
print_string = progress_printer.getPrintString(t_end)
|
||||
if (print_string):
|
||||
logger.debug(print_string)
|
||||
# print_string = progress_printer.getPrintString(t_end)
|
||||
# if (print_string):
|
||||
# logger.debug(print_string)
|
||||
|
||||
logger.debug("Simulated to t={:f} in {:d} timesteps (average dt={:f})".format(t_end, sim.simSteps(), sim.simTime() / sim.simSteps()))
|
||||
logger.debug("Simulated to t={:f} in {:d} timesteps (average dt={:f})".format(save_times[-1], sim.simSteps(), sim.simTime() / sim.simSteps()))
|
||||
|
||||
return outdata.filename, profiling_data_sim_runner, sim.profiling_data_mpi
|
||||
#return outdata.filename
|
||||
@@ -306,7 +308,7 @@ class IPEngine(object):
|
||||
import ipyparallel
|
||||
self.cluster = ipyparallel.Client()#profile='mpi')
|
||||
time.sleep(3)
|
||||
while(len(self.cluster.ids) != n_engines):
|
||||
while len(self.cluster.ids) != n_engines:
|
||||
time.sleep(0.5)
|
||||
self.logger.info("Waiting for cluster...")
|
||||
self.cluster = ipyparallel.Client()#profile='mpi')
|
||||
@@ -433,58 +435,58 @@ class DataDumper(object):
|
||||
|
||||
|
||||
|
||||
class ProgressPrinter(object):
|
||||
"""
|
||||
Small helper class for
|
||||
"""
|
||||
def __init__(self, total_steps, print_every=5):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.start = time.time()
|
||||
self.total_steps = total_steps
|
||||
self.print_every = print_every
|
||||
self.next_print_time = self.print_every
|
||||
self.last_step = 0
|
||||
self.secs_per_iter = None
|
||||
# class ProgressPrinter(object):
|
||||
# """
|
||||
# Small helper class for
|
||||
# """
|
||||
# def __init__(self, total_steps, print_every=5):
|
||||
# self.logger = logging.getLogger(__name__)
|
||||
# self.start = time.time()
|
||||
# self.total_steps = total_steps
|
||||
# self.print_every = print_every
|
||||
# self.next_print_time = self.print_every
|
||||
# self.last_step = 0
|
||||
# self.secs_per_iter = None
|
||||
|
||||
def getPrintString(self, step):
|
||||
elapsed = time.time() - self.start
|
||||
if (elapsed > self.next_print_time):
|
||||
dt = elapsed - (self.next_print_time - self.print_every)
|
||||
dsteps = step - self.last_step
|
||||
steps_remaining = self.total_steps - step
|
||||
# def getPrintString(self, step):
|
||||
# elapsed = time.time() - self.start
|
||||
# if (elapsed > self.next_print_time):
|
||||
# dt = elapsed - (self.next_print_time - self.print_every)
|
||||
# dsteps = step - self.last_step
|
||||
# steps_remaining = self.total_steps - step
|
||||
|
||||
if (dsteps == 0):
|
||||
return
|
||||
# if (dsteps == 0):
|
||||
# return
|
||||
|
||||
self.last_step = step
|
||||
self.next_print_time = elapsed + self.print_every
|
||||
# self.last_step = step
|
||||
# self.next_print_time = elapsed + self.print_every
|
||||
|
||||
if not self.secs_per_iter:
|
||||
self.secs_per_iter = dt / dsteps
|
||||
self.secs_per_iter = 0.2*self.secs_per_iter + 0.8*(dt / dsteps)
|
||||
# if not self.secs_per_iter:
|
||||
# self.secs_per_iter = dt / dsteps
|
||||
# self.secs_per_iter = 0.2*self.secs_per_iter + 0.8*(dt / dsteps)
|
||||
|
||||
remaining_time = steps_remaining * self.secs_per_iter
|
||||
# remaining_time = steps_remaining * self.secs_per_iter
|
||||
|
||||
return "{:s}. Total: {:s}, elapsed: {:s}, remaining: {:s}".format(
|
||||
ProgressPrinter.progressBar(step, self.total_steps),
|
||||
ProgressPrinter.timeString(elapsed + remaining_time),
|
||||
ProgressPrinter.timeString(elapsed),
|
||||
ProgressPrinter.timeString(remaining_time))
|
||||
# return "{:s}. Total: {:s}, elapsed: {:s}, remaining: {:s}".format(
|
||||
# ProgressPrinter.progressBar(step, self.total_steps),
|
||||
# ProgressPrinter.timeString(elapsed + remaining_time),
|
||||
# ProgressPrinter.timeString(elapsed),
|
||||
# ProgressPrinter.timeString(remaining_time))
|
||||
|
||||
def timeString(seconds):
|
||||
seconds = int(max(seconds, 1))
|
||||
minutes, seconds = divmod(seconds, 60)
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
periods = [('h', hours), ('m', minutes), ('s', seconds)]
|
||||
time_string = ' '.join('{}{}'.format(value, name)
|
||||
for name, value in periods
|
||||
if value)
|
||||
return time_string
|
||||
# def timeString(seconds):
|
||||
# seconds = int(max(seconds, 1))
|
||||
# minutes, seconds = divmod(seconds, 60)
|
||||
# hours, minutes = divmod(minutes, 60)
|
||||
# periods = [('h', hours), ('m', minutes), ('s', seconds)]
|
||||
# time_string = ' '.join('{}{}'.format(value, name)
|
||||
# for name, value in periods
|
||||
# if value)
|
||||
# return time_string
|
||||
|
||||
def progressBar(step, total_steps, width=30):
|
||||
progress = np.round(width * step / total_steps).astype(np.int32)
|
||||
progressbar = "0% [" + "#"*(progress) + "="*(width-progress) + "] 100%"
|
||||
return progressbar
|
||||
# def progressBar(step, total_steps, width=30):
|
||||
# progress = np.round(width * step / total_steps).astype(np.int32)
|
||||
# progressbar = "0% [" + "#"*(progress) + "="*(width-progress) + "] 100%"
|
||||
# return progressbar
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@@ -25,6 +25,7 @@ import numpy as np
|
||||
import math
|
||||
import logging
|
||||
from enum import IntEnum
|
||||
from tqdm import tqdm
|
||||
|
||||
#import pycuda.compiler as cuda_compiler
|
||||
#import pycuda.gpuarray
|
||||
@@ -156,7 +157,7 @@ class BaseSimulator(object):
|
||||
self.num_substeps = num_substeps
|
||||
|
||||
#Handle autotuning block size
|
||||
if (self.context.autotuner):
|
||||
if self.context.autotuner:
|
||||
peak_configuration = self.context.autotuner.get_peak_performance(self.__class__)
|
||||
block_width = int(peak_configuration["block_width"])
|
||||
block_height = int(peak_configuration["block_height"])
|
||||
@@ -195,42 +196,45 @@ class BaseSimulator(object):
|
||||
Requires that the step() function is implemented in the subclasses
|
||||
"""
|
||||
|
||||
printer = Common.ProgressPrinter(t)
|
||||
# printer = Common.ProgressPrinter(t)
|
||||
|
||||
t_start = self.simTime()
|
||||
t_end = t_start + t
|
||||
|
||||
update_dt = True
|
||||
if (dt is not None):
|
||||
if dt is not None:
|
||||
update_dt = False
|
||||
self.dt = dt
|
||||
|
||||
while(self.simTime() < t_end):
|
||||
for _ in tqdm(range(math.ceil((t_end - t_start) / self.dt)), desc="Simulation"):
|
||||
# Update dt every 100 timesteps and cross your fingers it works
|
||||
# for the next 100
|
||||
if (update_dt and (self.simSteps() % 100 == 0)):
|
||||
# TODO this is probably broken now after fixing the "infinite" loop
|
||||
if update_dt and (self.simSteps() % 100 == 0):
|
||||
self.dt = self.computeDt()*self.cfl_scale
|
||||
|
||||
|
||||
# Compute timestep for "this" iteration (i.e., shorten last timestep)
|
||||
current_dt = np.float32(min(self.dt, t_end-self.simTime()))
|
||||
|
||||
# Stop if end reached (should not happen)
|
||||
if (current_dt <= 0.0):
|
||||
if current_dt <= 0.0:
|
||||
self.logger.warning("Timestep size {:d} is less than or equal to zero!".format(self.simSteps()))
|
||||
break
|
||||
|
||||
|
||||
# Step forward in time
|
||||
self.step(current_dt)
|
||||
|
||||
#Print info
|
||||
print_string = printer.getPrintString(self.simTime() - t_start)
|
||||
if (print_string):
|
||||
self.logger.info("%s: %s", self, print_string)
|
||||
try:
|
||||
self.check()
|
||||
except AssertionError as e:
|
||||
e.args += ("Step={:d}, time={:f}".format(self.simSteps(), self.simTime()),)
|
||||
raise
|
||||
# print_string = printer.getPrintString(self.simTime() - t_start)
|
||||
# if (print_string):
|
||||
# self.logger.info("%s: %s", self, print_string)
|
||||
# try:
|
||||
# self.check()
|
||||
# except AssertionError as e:
|
||||
# e.args += ("Step={:d}, time={:f}".format(self.simSteps(), self.simTime()),)
|
||||
# raise
|
||||
|
||||
print("Done")
|
||||
|
||||
|
||||
def step(self, dt):
|
||||
|
||||
Binary file not shown.
@@ -19,18 +19,18 @@ Mydir=/project/project_4650000xx
|
||||
Myapplication=${Mydir}/FiniteVolumeGPU_hip/mpiTesting.py
|
||||
|
||||
#modules
|
||||
ml LUMI/23.03 partition/G
|
||||
ml LUMI/24.03 partition/G
|
||||
ml lumi-container-wrapper
|
||||
ml cray-python/3.9.13.1
|
||||
ml rocm/5.2.3
|
||||
ml cray-python/3.11.7
|
||||
ml rocm/6.2.2
|
||||
|
||||
ml craype-accel-amd-gfx90a
|
||||
ml cray-mpich/8.1.27
|
||||
ml cray-mpich/8.1.29
|
||||
|
||||
export PATH="/project/project_4650000xx/FiniteVolumeGPU_hip/MyCondaEnv/bin:$PATH"
|
||||
|
||||
#missing library
|
||||
export LD_LIBRARY_PATH=/opt/cray/pe/mpich/8.1.27/ofi/cray/14.0/lib-abi-mpich:$LD_LIBRARY_PATH
|
||||
export LD_LIBRARY_PATH=/opt/cray/pe/mpich/8.1.29/ofi/cray/17.0/lib-abi-mpich:$LD_LIBRARY_PATH
|
||||
|
||||
#Binding mask
|
||||
bind_mask="0x${fe}000000000000,0x${fe}00000000000000,0x${fe}0000,0x${fe}000000,0x${fe},0x${fe}00,0x${fe}00000000,0x${fe}0000000000"
|
||||
|
||||
@@ -5,13 +5,13 @@ This is a HIP version of the [FiniteVolume code](https://github.com/babrodtk/Fin
|
||||
## Setup on LUMI-G
|
||||
Here is a step-by-step guide on installing packages on LUMI-G
|
||||
|
||||
### Step 1: Install rocm-5.2.5 with Easybuild
|
||||
### Step 1: Install rocm-5.4.6 with Easybuild
|
||||
```
|
||||
export EBU_USER_PREFIX=/project/project_xxxxxx/EasyBuild
|
||||
ml LUMI/24.03 partition/G
|
||||
ml EasyBuild-user
|
||||
export PYTHONIOENCODING=utf-8
|
||||
eb rocm-5.2.5.eb -r
|
||||
eb rocm-5.4.6.eb -r
|
||||
```
|
||||
|
||||
### Step 2: run conda-container
|
||||
|
||||
@@ -5,15 +5,17 @@ channels:
|
||||
- conda-forge
|
||||
|
||||
dependencies:
|
||||
- python=3.9.13
|
||||
- python=3.11.7
|
||||
- pip
|
||||
- numpy
|
||||
- mpi4py
|
||||
- six
|
||||
- pytools
|
||||
- netcdf4
|
||||
- scipy
|
||||
- tqdm
|
||||
- pip:
|
||||
- hip-python==5.4.3.470.16
|
||||
- hip-python==6.2.0.499.16
|
||||
- -i https://test.pypi.org/simple/
|
||||
|
||||
|
||||
|
||||
@@ -70,7 +70,7 @@ def hip_check(call_result):
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if(args.profile):
|
||||
if args.profile:
|
||||
profiling_data = {}
|
||||
# profiling: total run time
|
||||
t_total_start = time.time()
|
||||
@@ -79,6 +79,8 @@ if(args.profile):
|
||||
|
||||
# Get MPI COMM to use
|
||||
comm = MPI.COMM_WORLD
|
||||
size = comm.Get_size()
|
||||
rank = comm.Get_rank()
|
||||
|
||||
|
||||
####
|
||||
@@ -86,7 +88,7 @@ comm = MPI.COMM_WORLD
|
||||
####
|
||||
log_level_console = 20
|
||||
log_level_file = 10
|
||||
log_filename = 'mpi_' + str(comm.rank) + '.log'
|
||||
log_filename = 'mpi_' + str(rank) + '.log'
|
||||
logger = logging.getLogger('GPUSimulators')
|
||||
logger.setLevel(min(log_level_console, log_level_file))
|
||||
|
||||
@@ -110,7 +112,7 @@ logger.info("File logger using level %s to %s",
|
||||
# Initialize MPI grid etc
|
||||
####
|
||||
logger.info("Creating MPI grid")
|
||||
grid = MPISimulator.MPIGrid(MPI.COMM_WORLD)
|
||||
grid = MPISimulator.MPIGrid(comm)
|
||||
|
||||
"""
|
||||
job_id = int(os.environ["SLURM_JOB_ID"])
|
||||
@@ -152,7 +154,7 @@ gamma = 1.4
|
||||
#save_times = np.linspace(0, 0.000099, 11)
|
||||
#save_times = np.linspace(0, 0.000099, 2)
|
||||
save_times = np.linspace(0, 0.0000999, 2)
|
||||
outfile = "mpi_out_" + str(MPI.COMM_WORLD.rank) + ".nc"
|
||||
outfile = "mpi_out_" + str(rank) + ".nc"
|
||||
save_var_names = ['rho', 'rho_u', 'rho_v', 'E']
|
||||
|
||||
arguments = IC.genKelvinHelmholtz(nx, ny, gamma, grid=grid)
|
||||
@@ -160,7 +162,7 @@ arguments['context'] = cuda_context
|
||||
arguments['theta'] = 1.2
|
||||
arguments['grid'] = grid
|
||||
|
||||
if(args.profile):
|
||||
if args.profile:
|
||||
t_init_end = time.time()
|
||||
t_init = t_init_end - t_init_start
|
||||
profiling_data["t_init"] = t_init
|
||||
@@ -178,17 +180,17 @@ def genSim(grid, **kwargs):
|
||||
return sim
|
||||
|
||||
|
||||
outfile, sim_runner_profiling_data, sim_profiling_data = Common.runSimulation(
|
||||
(outfile, sim_runner_profiling_data, sim_profiling_data) = Common.runSimulation(
|
||||
genSim, arguments, outfile, save_times, save_var_names, dt)
|
||||
|
||||
if(args.profile):
|
||||
if args.profile:
|
||||
t_total_end = time.time()
|
||||
t_total = t_total_end - t_total_start
|
||||
profiling_data["t_total"] = t_total
|
||||
print("Total run time on rank " + str(MPI.COMM_WORLD.rank) + " is " + str(t_total) + " s")
|
||||
print("Total run time on rank " + str(rank) + " is " + str(t_total) + " s")
|
||||
|
||||
# write profiling to json file
|
||||
if(args.profile and MPI.COMM_WORLD.rank == 0):
|
||||
if args.profile and rank == 0:
|
||||
job_id = ""
|
||||
if "SLURM_JOB_ID" in os.environ:
|
||||
job_id = int(os.environ["SLURM_JOB_ID"])
|
||||
@@ -199,7 +201,7 @@ if(args.profile and MPI.COMM_WORLD.rank == 0):
|
||||
str(job_id) + "_" + str(allocated_nodes) + "_nodes_and_" + str(allocated_gpus) + "_GPUs_profiling.json"
|
||||
profiling_data["outfile"] = outfile
|
||||
else:
|
||||
profiling_file = "MPI_" + str(MPI.COMM_WORLD.size) + "_procs_and_" + str(num_cuda_devices) + "_GPUs_profiling.json"
|
||||
profiling_file = "MPI_" + str(size) + "_procs_and_" + str(num_cuda_devices) + "_GPUs_profiling.json"
|
||||
|
||||
for stage in sim_runner_profiling_data["start"].keys():
|
||||
profiling_data[stage] = sim_runner_profiling_data["end"][stage] - sim_runner_profiling_data["start"][stage]
|
||||
@@ -214,7 +216,7 @@ if(args.profile and MPI.COMM_WORLD.rank == 0):
|
||||
|
||||
profiling_data["slurm_job_id"] = job_id
|
||||
profiling_data["n_cuda_devices"] = str(num_cuda_devices)
|
||||
profiling_data["n_processes"] = str(MPI.COMM_WORLD.size)
|
||||
profiling_data["n_processes"] = str(size)
|
||||
profiling_data["git_hash"] = Common.getGitHash()
|
||||
profiling_data["git_status"] = Common.getGitStatus()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user