Merge branch 'master' of github.com:setmar/ShallowWaterGPU

This commit is contained in:
Martin Lilleeng Sætra 2022-04-07 13:52:28 +02:00
commit f8baa365d8
4 changed files with 72 additions and 18 deletions

View File

@ -95,6 +95,16 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
save_times, and saves all of the variables in list save_var_names. Elements in
save_var_names can be set to None if you do not want to save them
"""
profiling_data_sim_runner = { 'start': {}, 'end': {} }
profiling_data_sim_runner["start"]["t_sim_init"] = 0
profiling_data_sim_runner["end"]["t_sim_init"] = 0
profiling_data_sim_runner["start"]["t_nc_write"] = 0
profiling_data_sim_runner["end"]["t_nc_write"] = 0
profiling_data_sim_runner["start"]["t_step"] = 0
profiling_data_sim_runner["end"]["t_step"] = 0
profiling_data_sim_runner["start"]["t_sim_init"] = time.time()
logger = logging.getLogger(__name__)
assert len(save_times) > 0, "Need to specify which times to save"
@ -146,6 +156,8 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
t_steps[0] = save_times[0]
t_steps[1:] = save_times[1:] - save_times[0:-1]
profiling_data_sim_runner["end"]["t_sim_init"] = time.time()
#Start simulation loop
progress_printer = ProgressPrinter(save_times[-1], print_every=10)
for k in range(len(save_times)):
@ -160,10 +172,16 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
logger.error("Error after {:d} steps (t={:f}: {:s}".format(sim.simSteps(), sim.simTime(), str(e)))
return outdata.filename
profiling_data_sim_runner["start"]["t_step"] += time.time()
#Simulate
if (t_step > 0.0):
sim.simulate(t_step)
profiling_data_sim_runner["end"]["t_step"] += time.time()
profiling_data_sim_runner["start"]["t_nc_write"] += time.time()
#Download
save_vars = sim.download(download_vars)
@ -171,6 +189,8 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
for i, var_name in enumerate(save_var_names):
ncvars[var_name][k, :] = save_vars[i]
profiling_data_sim_runner["end"]["t_nc_write"] += time.time()
#Write progress to screen
print_string = progress_printer.getPrintString(t_end)
if (print_string):
@ -178,7 +198,7 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
logger.debug("Simulated to t={:f} in {:d} timesteps (average dt={:f})".format(t_end, sim.simSteps(), sim.simTime() / sim.simSteps()))
return outdata.filename
return outdata.filename, profiling_data_sim_runner, sim.profiling_data_mpi

View File

@ -24,6 +24,7 @@ import logging
from GPUSimulators import Simulator
import numpy as np
from mpi4py import MPI
import time
@ -200,6 +201,13 @@ class MPISimulator(Simulator.BaseSimulator):
Class which handles communication between simulators on different MPI nodes
"""
def __init__(self, sim, grid):
self.profiling_data_mpi = { 'start': {}, 'end': {} }
self.profiling_data_mpi["start"]["t_step_mpi_halo_exchange"] = 0
self.profiling_data_mpi["end"]["t_step_mpi_halo_exchange"] = 0
self.profiling_data_mpi["start"]["t_step_mpi"] = 0
self.profiling_data_mpi["end"]["t_step_mpi"] = 0
self.profiling_data_mpi["n_time_steps"] = 0
self.profiling_data_mpi["start"]["t_sim_mpi_init"] = time.time()
self.logger = logging.getLogger(__name__)
autotuner = sim.context.autotuner
@ -284,12 +292,26 @@ class MPISimulator(Simulator.BaseSimulator):
self.out_s = np.empty_like(self.in_s)
self.logger.debug("Simlator rank {:d} initialized on {:s}".format(self.grid.comm.rank, MPI.Get_processor_name()))
self.profiling_data_mpi["end"]["t_sim_mpi_init"] = time.time()
def substep(self, dt, step_number):
if self.profiling_data_mpi["n_time_steps"] > 0:
self.profiling_data_mpi["start"]["t_step_mpi_halo_exchange"] += time.time()
self.exchange()
self.sim.stream.synchronize() # only necessary for profiling!
if self.profiling_data_mpi["n_time_steps"] > 0:
self.profiling_data_mpi["end"]["t_step_mpi_halo_exchange"] += time.time()
self.profiling_data_mpi["start"]["t_step_mpi"] += time.time()
self.sim.substep(dt, step_number)
self.sim.stream.synchronize() # only necessary for profiling!
if self.profiling_data_mpi["n_time_steps"] > 0:
self.profiling_data_mpi["end"]["t_step_mpi"] += time.time()
self.profiling_data_mpi["n_time_steps"] += 1
def getOutput(self):
return self.sim.getOutput()
@ -409,5 +431,3 @@ class MPISimulator(Simulator.BaseSimulator):
for comm in comm_send:
comm.wait()

View File

@ -8,8 +8,7 @@ Connect:
ssh -AX ip-from-webpage
For Jupyter Notebook:
ssh -L 8888:localhost:80 ip-from-webpage
(access localhost:8888 in browser and open terminal)
Access https://seymour.cs.oslomet.no in browser and open terminal
(one time operation) conda env create -f conda_environment.yml
conda activate ShallowWaterGPU / choose the "conda:ShallowWaterGPU" kernel in the notebook

View File

@ -49,8 +49,10 @@ parser.add_argument('--profile', action='store_true') # default: False
args = parser.parse_args()
if(args.profile):
profiling_data = {}
# profiling: total run time
t_total_start = time.time()
t_init_start = time.time()
# Get MPI COMM to use
@ -109,7 +111,7 @@ nx = args.nx
ny = args.ny
gamma = 1.4
save_times = np.linspace(0, 0.02, 2)
save_times = np.linspace(0, 0.5, 2)
outfile = "mpi_out_" + str(MPI.COMM_WORLD.rank) + ".nc"
save_var_names = ['rho', 'rho_u', 'rho_v', 'E']
@ -118,6 +120,10 @@ arguments['context'] = cuda_context
arguments['theta'] = 1.2
arguments['grid'] = grid
if(args.profile):
t_init_end = time.time()
t_init = t_init_end - t_init_start
profiling_data["t_init"] = t_init
####
# Run simulation
@ -132,12 +138,13 @@ def genSim(grid, **kwargs):
return sim
outfile = Common.runSimulation(
outfile, sim_runner_profiling_data, sim_profiling_data = Common.runSimulation(
genSim, arguments, outfile, save_times, save_var_names)
if(args.profile):
t_total_end = time.time()
t_total = t_total_end - t_total_start
profiling_data["t_total"] = t_total
print("Total run time on rank " + str(MPI.COMM_WORLD.rank) + " is " + str(t_total) + " s")
# write profiling to json file
@ -148,14 +155,22 @@ if(args.profile and MPI.COMM_WORLD.rank == 0):
allocated_gpus = int(os.environ["CUDA_VISIBLE_DEVICES"].count(",") + 1)
profiling_file = "MPI_jobid_" + \
str(job_id) + "_" + str(allocated_nodes) + "_nodes_and_" + str(allocated_gpus) + "_GPUs_profiling.json"
profiling_data["outfile"] = outfile
else:
profiling_file = "MPI_test_profiling.json"
profiling_file = "MPI_" + str(MPI.COMM_WORLD.size) + "_procs_and_" + str(num_cuda_devices) + "_GPUs_profiling.json"
write_profiling_data = {}
write_profiling_data["total"] = t_total
for stage in sim_runner_profiling_data["start"].keys():
profiling_data[stage] = sim_runner_profiling_data["end"][stage] - sim_runner_profiling_data["start"][stage]
for stage in sim_profiling_data["start"].keys():
profiling_data[stage] = sim_profiling_data["end"][stage] - sim_profiling_data["start"][stage]
profiling_data["nx"] = nx
profiling_data["ny"] = ny
profiling_data["n_time_steps"] = sim_profiling_data["n_time_steps"]
with open(profiling_file, "w") as write_file:
json.dump(write_profiling_data, write_file)
json.dump(profiling_data, write_file)
####
# Clean shutdown