diff --git a/GPUSimulators/Autotuner.py b/GPUSimulators/Autotuner.py
new file mode 100644
index 0000000..84aedc2
--- /dev/null
+++ b/GPUSimulators/Autotuner.py
@@ -0,0 +1,304 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the different helper functions and
+classes
+
+Copyright (C) 2018 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+import os
+import gc
+import numpy as np
+import logging
+from socket import gethostname
+
+#import pycuda.driver as cuda
+from hip import hip,hiprtc
+
+from GPUSimulators import Common, Simulator, CudaContext
+
+class Autotuner:
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ nx=2048, ny=2048,
+ block_widths=range(8, 32, 1),
+ block_heights=range(8, 32, 1)):
+ logger = logging.getLogger(__name__)
+ self.filename = "autotuning_data_" + gethostname() + ".npz"
+ self.nx = nx
+ self.ny = ny
+ self.block_widths = block_widths
+ self.block_heights = block_heights
+ self.performance = {}
+
+
+ def benchmark(self, simulator, force=False):
+ logger = logging.getLogger(__name__)
+
+ #Run through simulators and benchmark
+ key = str(simulator.__name__)
+ logger.info("Benchmarking %s to %s", key, self.filename)
+
+ #If this simulator has been benchmarked already, skip it
+ if (force==False and os.path.isfile(self.filename)):
+ with np.load(self.filename) as data:
+ if key in data["simulators"]:
+ logger.info("%s already benchmarked - skipping", key)
+ return
+
+ # Set arguments to send to the simulators during construction
+ context = CudaContext.CudaContext(autotuning=False)
+ g = 9.81
+ h0, hu0, hv0, dx, dy, dt = Autotuner.gen_test_data(nx=self.nx, ny=self.ny, g=g)
+ arguments = {
+ 'context': context,
+ 'h0': h0, 'hu0': hu0, 'hv0': hv0,
+ 'nx': self.nx, 'ny': self.ny,
+ 'dx': dx, 'dy': dy, 'dt': 0.9*dt,
+ 'g': g
+ }
+
+ # Load existing data into memory
+ benchmark_data = {
+ "simulators": [],
+ }
+ if (os.path.isfile(self.filename)):
+ with np.load(self.filename) as data:
+ for k, v in data.items():
+ benchmark_data[k] = v
+
+ # Run benchmark
+ benchmark_data[key + "_megacells"] = Autotuner.benchmark_single_simulator(simulator, arguments, self.block_widths, self.block_heights)
+ benchmark_data[key + "_block_widths"] = self.block_widths
+ benchmark_data[key + "_block_heights"] = self.block_heights
+ benchmark_data[key + "_arguments"] = str(arguments)
+
+ existing_sims = benchmark_data["simulators"]
+ if (isinstance(existing_sims, np.ndarray)):
+ existing_sims = existing_sims.tolist()
+ if (key not in existing_sims):
+ benchmark_data["simulators"] = existing_sims + [key]
+
+ # Save to file
+ np.savez_compressed(self.filename, **benchmark_data)
+
+
+
+ """
+ Function which reads a numpy file with autotuning data
+ and reports the maximum performance and block size
+ """
+ def get_peak_performance(self, simulator):
+ logger = logging.getLogger(__name__)
+
+ assert issubclass(simulator, Simulator.BaseSimulator)
+ key = simulator.__name__
+
+ if (key in self.performance):
+ return self.performance[key]
+ else:
+ #Run simulation if required
+ if (not os.path.isfile(self.filename)):
+ logger.debug("Could not get autotuned peak performance for %s: benchmarking", key)
+ self.benchmark(simulator)
+
+ with np.load(self.filename) as data:
+ if key not in data['simulators']:
+ logger.debug("Could not get autotuned peak performance for %s: benchmarking", key)
+ data.close()
+ self.benchmark(simulator)
+ data = np.load(self.filename)
+
+ def find_max_index(megacells):
+ max_index = np.nanargmax(megacells)
+ return np.unravel_index(max_index, megacells.shape)
+
+ megacells = data[key + '_megacells']
+ block_widths = data[key + '_block_widths']
+ block_heights = data[key + '_block_heights']
+ j, i = find_max_index(megacells)
+
+ self.performance[key] = { "block_width": block_widths[i],
+ "block_height": block_heights[j],
+ "megacells": megacells[j, i] }
+ logger.debug("Returning %s as peak performance parameters", self.performance[key])
+ return self.performance[key]
+
+ #This should never happen
+ raise "Something wrong: Could not get autotuning data!"
+ return None
+
+
+
+ """
+ Runs a set of benchmarks for a single simulator
+ """
+ def benchmark_single_simulator(simulator, arguments, block_widths, block_heights):
+ logger = logging.getLogger(__name__)
+
+ megacells = np.empty((len(block_heights), len(block_widths)))
+ megacells.fill(np.nan)
+
+ logger.debug("Running %d benchmarks with %s", len(block_heights)*len(block_widths), simulator.__name__)
+
+ sim_arguments = arguments.copy()
+
+ with Common.Timer(simulator.__name__) as t:
+ for j, block_height in enumerate(block_heights):
+ sim_arguments.update({'block_height': block_height})
+ for i, block_width in enumerate(block_widths):
+ sim_arguments.update({'block_width': block_width})
+ megacells[j, i] = Autotuner.run_benchmark(simulator, sim_arguments)
+
+
+ logger.debug("Completed %s in %f seconds", simulator.__name__, t.secs)
+
+ return megacells
+
+
+ """
+ Runs a benchmark, and returns the number of megacells achieved
+ """
+ def run_benchmark(simulator, arguments, timesteps=10, warmup_timesteps=2):
+ logger = logging.getLogger(__name__)
+
+ #Initialize simulator
+ try:
+ sim = simulator(**arguments)
+ except:
+ #An exception raised - not possible to continue
+ logger.debug("Failed creating %s with arguments %s", simulator.__name__, str(arguments))
+ return np.nan
+
+ #Create timer events
+ #start = cuda.Event()
+ #end = cuda.Event()
+ stream = hip_check(hip.hipStreamCreate())
+
+ start = hip_check(hip.hipEventCreate())
+ end = hip_check(hip.hipEventCreate())
+
+ #Warmup
+ for i in range(warmup_timesteps):
+ sim.stepEuler(sim.dt)
+
+ #Run simulation with timer
+ #start.record(sim.stream)
+ #start recording
+ hip_check(hip.hipEventRecord(start, stream))
+ for i in range(timesteps):
+ sim.stepEuler(sim.dt)
+ #end.record(sim.stream)
+ #stop recording and synchronize
+ hip_check(hip.hipEventRecord(end, stream))
+
+ #Synchronize end event
+ #end.synchronize()
+ hip_check(hip.hipEventSynchronize(end))
+
+ #Compute megacells
+ #gpu_elapsed = end.time_since(start)*1.0e-3
+ gpu_elapsed = hip_check(hip.hipEventElapsedTime(start, end))
+
+ megacells = (sim.nx*sim.ny*timesteps / (1000*1000)) / gpu_elapsed
+
+ #Sanity check solution
+ h, hu, hv = sim.download()
+ sane = True
+ sane = sane and Autotuner.sanity_check(h, 0.3, 0.7)
+ sane = sane and Autotuner.sanity_check(hu, -0.2, 0.2)
+ sane = sane and Autotuner.sanity_check(hv, -0.2, 0.2)
+
+ if (sane):
+ logger.debug("%s [%d x %d] succeeded: %f megacells, gpu elapsed %f", simulator.__name__, arguments["block_width"], arguments["block_height"], megacells, gpu_elapsed)
+ return megacells
+ else:
+ logger.debug("%s [%d x %d] failed: gpu elapsed %f", simulator.__name__, arguments["block_width"], arguments["block_height"], gpu_elapsed)
+ return np.nan
+
+
+
+ """
+ Generates test dataset
+ """
+ def gen_test_data(nx, ny, g):
+ width = 100.0
+ height = 100.0
+ dx = width / float(nx)
+ dy = height / float(ny)
+
+ x_center = dx*nx/2.0
+ y_center = dy*ny/2.0
+
+ #Create a gaussian "dam break" that will not form shocks
+ size = width / 5.0
+ dt = 10**10
+
+ h = np.zeros((ny, nx), dtype=np.float32);
+ hu = np.zeros((ny, nx), dtype=np.float32);
+ hv = np.zeros((ny, nx), dtype=np.float32);
+
+ extent = 1.0/np.sqrt(2.0)
+ x = (dx*(np.arange(0, nx, dtype=np.float32)+0.5) - x_center) / size
+ y = (dy*(np.arange(0, ny, dtype=np.float32)+0.5) - y_center) / size
+ xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
+ r = np.minimum(1.0, np.sqrt(xv**2 + yv**2))
+ xv = None
+ yv = None
+ gc.collect()
+
+ #Generate highres
+ cos = np.cos(np.pi*r)
+ h = 0.5 + 0.1*0.5*(1.0 + cos)
+ hu = 0.1*0.5*(1.0 + cos)
+ hv = hu.copy()
+
+ scale = 0.7
+ max_h_estimate = 0.6
+ max_u_estimate = 0.1*np.sqrt(2.0)
+ dx = width/nx
+ dy = height/ny
+ dt = scale * min(dx, dy) / (max_u_estimate + np.sqrt(g*max_h_estimate))
+
+ return h, hu, hv, dx, dy, dt
+
+ """
+ Checks that a variable is "sane"
+ """
+ def sanity_check(variable, bound_min, bound_max):
+ maxval = np.amax(variable)
+ minval = np.amin(variable)
+ if (np.isnan(maxval)
+ or np.isnan(minval)
+ or maxval > bound_max
+ or minval < bound_min):
+ return False
+ else:
+ return True
diff --git a/GPUSimulators/Common.py b/GPUSimulators/Common.py
new file mode 100644
index 0000000..6681450
--- /dev/null
+++ b/GPUSimulators/Common.py
@@ -0,0 +1,879 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the different helper functions and
+classes
+
+Copyright (C) 2018 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+import os
+
+import numpy as np
+import time
+import signal
+import subprocess
+import tempfile
+import re
+import io
+import hashlib
+import logging
+import gc
+import netCDF4
+import json
+
+#import pycuda.compiler as cuda_compiler
+#import pycuda.gpuarray
+#import pycuda.driver as cuda
+#from pycuda.tools import PageLockedMemoryPool
+
+from hip import hip, hiprtc
+from hip import hipblas
+
+def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+def safeCall(cmd):
+ logger = logging.getLogger(__name__)
+ try:
+ #git rev-parse HEAD
+ current_dir = os.path.dirname(os.path.realpath(__file__))
+ params = dict()
+ params['stderr'] = subprocess.STDOUT
+ params['cwd'] = current_dir
+ params['universal_newlines'] = True #text=True in more recent python
+ params['shell'] = False
+ if os.name == 'nt':
+ params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
+ stdout = subprocess.check_output(cmd, **params)
+ except subprocess.CalledProcessError as e:
+ output = e.output
+ logger.error("Git failed, \nReturn code: " + str(e.returncode) + "\nOutput: " + output)
+ raise e
+
+ return stdout
+
+def getGitHash():
+ return safeCall(["git", "rev-parse", "HEAD"])
+
+def getGitStatus():
+ return safeCall(["git", "status", "--porcelain", "-uno"])
+
+def toJson(in_dict, compressed=True):
+ """
+ Creates JSON string from a dictionary
+ """
+ logger = logging.getLogger(__name__)
+ out_dict = in_dict.copy()
+ for key in out_dict:
+ if isinstance(out_dict[key], np.ndarray):
+ out_dict[key] = out_dict[key].tolist()
+ else:
+ try:
+ json.dumps(out_dict[key])
+ except:
+ value = str(out_dict[key])
+ logger.warning("JSON: Converting {:s} to string ({:s})".format(key, value))
+ out_dict[key] = value
+ return json.dumps(out_dict)
+
+def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names=[], dt=None):
+ """
+ Runs a simulation, and stores output in netcdf file. Stores the times given in
+ save_times, and saves all of the variables in list save_var_names. Elements in
+ save_var_names can be set to None if you do not want to save them
+ """
+ profiling_data_sim_runner = { 'start': {}, 'end': {} }
+ profiling_data_sim_runner["start"]["t_sim_init"] = 0
+ profiling_data_sim_runner["end"]["t_sim_init"] = 0
+ profiling_data_sim_runner["start"]["t_nc_write"] = 0
+ profiling_data_sim_runner["end"]["t_nc_write"] = 0
+ profiling_data_sim_runner["start"]["t_full_step"] = 0
+ profiling_data_sim_runner["end"]["t_full_step"] = 0
+
+ profiling_data_sim_runner["start"]["t_sim_init"] = time.time()
+
+ logger = logging.getLogger(__name__)
+
+ assert len(save_times) > 0, "Need to specify which times to save"
+
+ with Timer("construct") as t:
+ sim = simulator(**simulator_args)
+ logger.info("Constructed in " + str(t.secs) + " seconds")
+
+ #Create netcdf file and simulate
+ with DataDumper(outfile, mode='w', clobber=False) as outdata:
+
+ #Create attributes (metadata)
+ outdata.ncfile.created = time.ctime(time.time())
+ outdata.ncfile.git_hash = getGitHash()
+ outdata.ncfile.git_status = getGitStatus()
+ outdata.ncfile.simulator = str(simulator)
+
+ # do not write fields to attributes (they are to large)
+ simulator_args_for_ncfile = simulator_args.copy()
+ del simulator_args_for_ncfile["rho"]
+ del simulator_args_for_ncfile["rho_u"]
+ del simulator_args_for_ncfile["rho_v"]
+ del simulator_args_for_ncfile["E"]
+ outdata.ncfile.sim_args = toJson(simulator_args_for_ncfile)
+
+ #Create dimensions
+ outdata.ncfile.createDimension('time', len(save_times))
+ outdata.ncfile.createDimension('x', simulator_args['nx'])
+ outdata.ncfile.createDimension('y', simulator_args['ny'])
+
+ #Create variables for dimensions
+ ncvars = {}
+ ncvars['time'] = outdata.ncfile.createVariable('time', np.dtype('float32').char, 'time')
+ ncvars['x'] = outdata.ncfile.createVariable( 'x', np.dtype('float32').char, 'x')
+ ncvars['y'] = outdata.ncfile.createVariable( 'y', np.dtype('float32').char, 'y')
+
+ #Fill variables with proper values
+ ncvars['time'][:] = save_times
+ extent = sim.getExtent()
+ ncvars['x'][:] = np.linspace(extent[0], extent[1], simulator_args['nx'])
+ ncvars['y'][:] = np.linspace(extent[2], extent[3], simulator_args['ny'])
+
+ #Choose which variables to download (prune None from list, but keep the index)
+ download_vars = []
+ for i, var_name in enumerate(save_var_names):
+ if var_name is not None:
+ download_vars += [i]
+ save_var_names = list(save_var_names[i] for i in download_vars)
+
+ #Create variables
+ for var_name in save_var_names:
+ ncvars[var_name] = outdata.ncfile.createVariable(var_name, np.dtype('float32').char, ('time', 'y', 'x'), zlib=True, least_significant_digit=3)
+
+ #Create step sizes between each save
+ t_steps = np.empty_like(save_times)
+ t_steps[0] = save_times[0]
+ t_steps[1:] = save_times[1:] - save_times[0:-1]
+
+ profiling_data_sim_runner["end"]["t_sim_init"] = time.time()
+
+ #Start simulation loop
+ progress_printer = ProgressPrinter(save_times[-1], print_every=10)
+ for k in range(len(save_times)):
+ #Get target time and step size there
+ t_step = t_steps[k]
+ t_end = save_times[k]
+
+ #Sanity check simulator
+ try:
+ sim.check()
+ except AssertionError as e:
+ logger.error("Error after {:d} steps (t={:f}: {:s}".format(sim.simSteps(), sim.simTime(), str(e)))
+ return outdata.filename
+
+ profiling_data_sim_runner["start"]["t_full_step"] += time.time()
+
+ #Simulate
+ if (t_step > 0.0):
+ sim.simulate(t_step, dt)
+
+ profiling_data_sim_runner["end"]["t_full_step"] += time.time()
+
+ profiling_data_sim_runner["start"]["t_nc_write"] += time.time()
+
+ #Download
+ save_vars = sim.download(download_vars)
+
+ #Save to file
+ for i, var_name in enumerate(save_var_names):
+ ncvars[var_name][k, :] = save_vars[i]
+
+ profiling_data_sim_runner["end"]["t_nc_write"] += time.time()
+
+ #Write progress to screen
+ print_string = progress_printer.getPrintString(t_end)
+ if (print_string):
+ logger.debug(print_string)
+
+ logger.debug("Simulated to t={:f} in {:d} timesteps (average dt={:f})".format(t_end, sim.simSteps(), sim.simTime() / sim.simSteps()))
+
+ return outdata.filename, profiling_data_sim_runner, sim.profiling_data_mpi
+
+
+
+
+
+
+class Timer(object):
+ """
+ Class which keeps track of time spent for a section of code
+ """
+ def __init__(self, tag, log_level=logging.DEBUG):
+ self.tag = tag
+ self.log_level = log_level
+ self.logger = logging.getLogger(__name__)
+
+ def __enter__(self):
+ self.start = time.time()
+ return self
+
+ def __exit__(self, *args):
+ self.end = time.time()
+ self.secs = self.end - self.start
+ self.msecs = self.secs * 1000 # millisecs
+ self.logger.log(self.log_level, "%s: %f ms", self.tag, self.msecs)
+
+ def elapsed(self):
+ return time.time() - self.start
+
+
+
+
+
+class PopenFileBuffer(object):
+ """
+ Simple class for holding a set of tempfiles
+ for communicating with a subprocess
+ """
+ def __init__(self):
+ self.stdout = tempfile.TemporaryFile(mode='w+t')
+ self.stderr = tempfile.TemporaryFile(mode='w+t')
+
+ def __del__(self):
+ self.stdout.close()
+ self.stderr.close()
+
+ def read(self):
+ self.stdout.seek(0)
+ cout = self.stdout.read()
+ self.stdout.seek(0, 2)
+
+ self.stderr.seek(0)
+ cerr = self.stderr.read()
+ self.stderr.seek(0, 2)
+
+ return cout, cerr
+
+class IPEngine(object):
+ """
+ Class for starting IPEngines for MPI processing in IPython
+ """
+ def __init__(self, n_engines):
+ self.logger = logging.getLogger(__name__)
+
+ #Start ipcontroller
+ self.logger.info("Starting IPController")
+ self.c_buff = PopenFileBuffer()
+ c_cmd = ["ipcontroller", "--ip='*'"]
+ c_params = dict()
+ c_params['stderr'] = self.c_buff.stderr
+ c_params['stdout'] = self.c_buff.stdout
+ c_params['shell'] = False
+ if os.name == 'nt':
+ c_params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
+ self.c = subprocess.Popen(c_cmd, **c_params)
+
+ #Wait until controller is running
+ time.sleep(3)
+
+ #Start engines
+ self.logger.info("Starting IPEngines")
+ self.e_buff = PopenFileBuffer()
+ e_cmd = ["mpiexec", "-n", str(n_engines), "ipengine", "--mpi"]
+ e_params = dict()
+ e_params['stderr'] = self.e_buff.stderr
+ e_params['stdout'] = self.e_buff.stdout
+ e_params['shell'] = False
+ if os.name == 'nt':
+ e_params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
+ self.e = subprocess.Popen(e_cmd, **e_params)
+
+ # attach to a running cluster
+ import ipyparallel
+ self.cluster = ipyparallel.Client()#profile='mpi')
+ time.sleep(3)
+ while(len(self.cluster.ids) != n_engines):
+ time.sleep(0.5)
+ self.logger.info("Waiting for cluster...")
+ self.cluster = ipyparallel.Client()#profile='mpi')
+
+ self.logger.info("Done")
+
+ def __del__(self):
+ self.shutdown()
+
+ def shutdown(self):
+ if (self.e is not None):
+ if (os.name == 'nt'):
+ self.logger.warn("Sending CTRL+C to IPEngine")
+ self.e.send_signal(signal.CTRL_C_EVENT)
+
+ try:
+ self.e.communicate(timeout=3)
+ self.e.kill()
+ except subprocess.TimeoutExpired:
+ self.logger.warn("Killing IPEngine")
+ self.e.kill()
+ self.e.communicate()
+ self.e = None
+
+ cout, cerr = self.e_buff.read()
+ self.logger.info("IPEngine cout: {:s}".format(cout))
+ self.logger.info("IPEngine cerr: {:s}".format(cerr))
+ self.e_buff = None
+
+ gc.collect()
+
+ if (self.c is not None):
+ if (os.name == 'nt'):
+ self.logger.warn("Sending CTRL+C to IPController")
+ self.c.send_signal(signal.CTRL_C_EVENT)
+
+ try:
+ self.c.communicate(timeout=3)
+ self.c.kill()
+ except subprocess.TimeoutExpired:
+ self.logger.warn("Killing IPController")
+ self.c.kill()
+ self.c.communicate()
+ self.c = None
+
+ cout, cerr = self.c_buff.read()
+ self.logger.info("IPController cout: {:s}".format(cout))
+ self.logger.info("IPController cerr: {:s}".format(cerr))
+ self.c_buff = None
+
+ gc.collect()
+
+
+
+
+
+
+class DataDumper(object):
+ """
+ Simple class for holding a netCDF4 object
+ (handles opening and closing in a nice way)
+ Use as
+ with DataDumper("filename") as data:
+ ...
+ """
+ def __init__(self, filename, *args, **kwargs):
+ self.logger = logging.getLogger(__name__)
+
+ #Create directory if needed
+ filename = os.path.abspath(filename)
+ dirname = os.path.dirname(filename)
+ if dirname and not os.path.isdir(dirname):
+ self.logger.info("Creating directory " + dirname)
+ os.makedirs(dirname)
+
+ #Get mode of file if we have that
+ mode = None
+ if (args):
+ mode = args[0]
+ elif (kwargs and 'mode' in kwargs.keys()):
+ mode = kwargs['mode']
+
+ #Create new unique file if writing
+ if (mode):
+ if (("w" in mode) or ("+" in mode) or ("a" in mode)):
+ i = 0
+ stem, ext = os.path.splitext(filename)
+ while (os.path.isfile(filename)):
+ filename = "{:s}_{:04d}{:s}".format(stem, i, ext)
+ i = i+1
+ self.filename = os.path.abspath(filename)
+
+ #Save arguments
+ self.args = args
+ self.kwargs = kwargs
+
+ #Log output
+ self.logger.info("Initialized " + self.filename)
+
+
+ def __enter__(self):
+ self.logger.info("Opening " + self.filename)
+ if (self.args):
+ self.logger.info("Arguments: " + str(self.args))
+ if (self.kwargs):
+ self.logger.info("Keyword arguments: " + str(self.kwargs))
+ self.ncfile = netCDF4.Dataset(self.filename, *self.args, **self.kwargs)
+ return self
+
+ def __exit__(self, *args):
+ self.logger.info("Closing " + self.filename)
+ self.ncfile.close()
+
+
+ def toJson(in_dict):
+ out_dict = in_dict.copy()
+
+ for key in out_dict:
+ if isinstance(out_dict[key], np.ndarray):
+ out_dict[key] = out_dict[key].tolist()
+ else:
+ try:
+ json.dumps(out_dict[key])
+ except:
+ out_dict[key] = str(out_dict[key])
+
+ return json.dumps(out_dict)
+
+
+
+
+
+class ProgressPrinter(object):
+ """
+ Small helper class for
+ """
+ def __init__(self, total_steps, print_every=5):
+ self.logger = logging.getLogger(__name__)
+ self.start = time.time()
+ self.total_steps = total_steps
+ self.print_every = print_every
+ self.next_print_time = self.print_every
+ self.last_step = 0
+ self.secs_per_iter = None
+
+ def getPrintString(self, step):
+ elapsed = time.time() - self.start
+ if (elapsed > self.next_print_time):
+ dt = elapsed - (self.next_print_time - self.print_every)
+ dsteps = step - self.last_step
+ steps_remaining = self.total_steps - step
+
+ if (dsteps == 0):
+ return
+
+ self.last_step = step
+ self.next_print_time = elapsed + self.print_every
+
+ if not self.secs_per_iter:
+ self.secs_per_iter = dt / dsteps
+ self.secs_per_iter = 0.2*self.secs_per_iter + 0.8*(dt / dsteps)
+
+ remaining_time = steps_remaining * self.secs_per_iter
+
+ return "{:s}. Total: {:s}, elapsed: {:s}, remaining: {:s}".format(
+ ProgressPrinter.progressBar(step, self.total_steps),
+ ProgressPrinter.timeString(elapsed + remaining_time),
+ ProgressPrinter.timeString(elapsed),
+ ProgressPrinter.timeString(remaining_time))
+
+ def timeString(seconds):
+ seconds = int(max(seconds, 1))
+ minutes, seconds = divmod(seconds, 60)
+ hours, minutes = divmod(minutes, 60)
+ periods = [('h', hours), ('m', minutes), ('s', seconds)]
+ time_string = ' '.join('{}{}'.format(value, name)
+ for name, value in periods
+ if value)
+ return time_string
+
+ def progressBar(step, total_steps, width=30):
+ progress = np.round(width * step / total_steps).astype(np.int32)
+ progressbar = "0% [" + "#"*(progress) + "="*(width-progress) + "] 100%"
+ return progressbar
+
+
+
+
+
+
+
+"""
+Class that holds 2D data
+"""
+class CudaArray2D:
+ """
+ Uploads initial data to the CUDA device
+ """
+ def __init__(self, stream, nx, ny, x_halo, y_halo, cpu_data=None, dtype=np.float32):
+ self.logger = logging.getLogger(__name__)
+ self.nx = nx
+ self.ny = ny
+ self.x_halo = x_halo
+ self.y_halo = y_halo
+
+ nx_halo = nx + 2*x_halo
+ ny_halo = ny + 2*y_halo
+
+ #self.logger.debug("Allocating [%dx%d] buffer", self.nx, self.ny)
+ #Should perhaps use pycuda.driver.mem_alloc_data.pitch() here
+ #Initialize an array on GPU with zeros
+ #self.data = pycuda.gpuarray.zeros((ny_halo, nx_halo), dtype)
+ self.data_h = np.zeros((ny_halo, nx_halo), dtype="float32")
+ num_bytes = self.data_h.size * self.data_h.itemsize
+
+ # init device array and upload host data
+ self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=(ny_halo, nx_halo))
+
+ # copy data from host to device
+ hip_check(hip.hipMemcpy(self.data,self.data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
+
+ #For returning to download (No counterpart in hip-python)
+ #self.memorypool = PageLockedMemoryPool()
+
+ #If we don't have any data, just allocate and return
+ if cpu_data is None:
+ return
+
+ #Make sure data is in proper format
+ assert cpu_data.shape == (ny_halo, nx_halo) or cpu_data.shape == (self.ny, self.nx), "Wrong shape of data %s vs %s / %s" % (str(cpu_data.shape), str((self.ny, self.nx)), str((ny_halo, nx_halo)))
+ assert cpu_data.itemsize == 4, "Wrong size of data type"
+ assert not np.isfortran(cpu_data), "Wrong datatype (Fortran, expected C)"
+
+ #Create copy object from host to device
+ x = (nx_halo - cpu_data.shape[1]) // 2
+ y = (ny_halo - cpu_data.shape[0]) // 2
+ self.upload(stream, cpu_data, extent=[x, y, cpu_data.shape[1], cpu_data.shape[0]])
+ #self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data.gpudata), self.nx, self.ny)
+
+
+ def __del__(self, *args):
+ #self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data.gpudata), self.nx, self.ny)
+ self.data.gpudata.free()
+ self.data = None
+
+ """
+ Enables downloading data from GPU to Python
+ """
+ def download(self, stream, cpu_data=None, asynch=False, extent=None):
+ if (extent is None):
+ x = self.x_halo
+ y = self.y_halo
+ nx = self.nx
+ ny = self.ny
+ else:
+ x, y, nx, ny = extent
+
+ if (cpu_data is None):
+ #self.logger.debug("Downloading [%dx%d] buffer", self.nx, self.ny)
+ #Allocate host memory
+ #The following fails, don't know why (crashes python)
+ #cpu_data = cuda.pagelocked_empty((int(ny), int(nx)), dtype=np.float32, mem_flags=cuda.host_alloc_flags.PORTABLE)
+ #see here type of memory: https://rocm.docs.amd.com/projects/hip-python/en/latest/python_api/hip.html#hip.hip.hipMemoryType
+ cpu_data = np.empty((ny, nx), dtype=np.float32)
+ num_bytes = cpu_data.size * cpu_data.itemsize
+ #hipHostMalloc allocates pinned host memory which is mapped into the address space of all GPUs in the system, the memory can #be accessed directly by the GPU device
+ #hipHostMallocDefault:Memory is mapped and portable (default allocation)
+ #hipHostMallocPortable: memory is explicitely portable across different devices
+ cpu_data = hip_check(hip.hipHostMalloc(num_bytes,hip.hipHostMallocPortable))
+ #Non-pagelocked: cpu_data = np.empty((ny, nx), dtype=np.float32)
+ #cpu_data = self.memorypool.allocate((ny, nx), dtype=np.float32)
+
+ assert nx == cpu_data.shape[1]
+ assert ny == cpu_data.shape[0]
+ assert x+nx <= self.nx + 2*self.x_halo
+ assert y+ny <= self.ny + 2*self.y_halo
+
+ #Create copy object from device to host
+ #copy = cuda.Memcpy2D()
+ #copy.set_src_device(self.data.gpudata)
+ #copy.set_dst_host(cpu_data)
+
+ #Set offsets and pitch of source
+ #copy.src_x_in_bytes = int(x)*self.data.strides[1]
+ #copy.src_y = int(y)
+ #copy.src_pitch = self.data.strides[0]
+
+ #Set width in bytes to copy for each row and
+ #number of rows to copy
+ #copy.width_in_bytes = int(nx)*cpu_data.itemsize
+ #copy.height = int(ny)
+
+ #The equivalent of cuda.Memcpy2D in hip-python would be: but it fails with an error pointing to cpu_data
+ #and a message: "RuntimeError: hipError_t.hipErrorInvalidValue"
+ #shape = (nx,ny)
+ #num_bytes = cpu_data.size * cpu_data.itemsize
+ #dst_pitch_bytes = cpu_data.strides[0]
+ #src_pitch_bytes = num_bytes // shape[0]
+ #src_pitch_bytes = data.strides[0]
+ #width_bytes = int(nx)*cpu_data.itemsize
+ #height_Nrows = int(ny)
+ #hipMemcpy2D(dst, unsigned long dpitch, src, unsigned long spitch, unsigned long width, unsigned long height, kind)
+ #copy = hip_check(hip.hipMemcpy2D(cpu_data, #pointer to destination
+ # dst_pitch_bytes, #pitch of destination array
+ # data, #pointer to source
+ # src_pitch_bytes, #pitch of source array
+ # width_bytes, #number of bytes in each row
+ # height_Nrows, #number of rows to copy
+ # hip.hipMemcpyKind.hipMemcpyDeviceToHost)) #kind
+
+ #this is an alternative:
+ #copy from device to host
+ cpu_data = np.empty((ny, nx), dtype=np.float32)
+ num_bytes = cpu_data.size * cpu_data.itemsize
+ #hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
+ copy = hip_check(hip.hipMemcpy(cpu_data,self.data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
+
+ copy(stream)
+ if asynch==False:
+ stream.synchronize()
+
+ return cpu_data
+
+
+ def upload(self, stream, cpu_data, extent=None):
+ if (extent is None):
+ x = self.x_halo
+ y = self.y_halo
+ nx = self.nx
+ ny = self.ny
+ else:
+ x, y, nx, ny = extent
+
+ assert(nx == cpu_data.shape[1])
+ assert(ny == cpu_data.shape[0])
+ assert(x+nx <= self.nx + 2*self.x_halo)
+ assert(y+ny <= self.ny + 2*self.y_halo)
+
+ #Create copy object from device to host
+ #Well this copy from src:host to dst:device AND NOT from device to host
+ #copy = cuda.Memcpy2D()
+ #copy.set_dst_device(self.data.gpudata)
+ #copy.set_src_host(cpu_data)
+
+ #Set offsets and pitch of source
+ #copy.dst_x_in_bytes = int(x)*self.data.strides[1]
+ #copy.dst_y = int(y)
+ #copy.dst_pitch = self.data.strides[0]
+
+ #Set width in bytes to copy for each row and
+ #number of rows to copy
+ #copy.width_in_bytes = int(nx)*cpu_data.itemsize
+ #copy.height = int(ny)
+
+ #copy from host de device
+ num_bytes = cpu_data.size * cpu_data.itemsize
+ self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=cpu_data.shape)
+ #hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
+ copy = hip_check(hip.hipMemcpy(self.data,cpu_data,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
+
+ copy(stream)
+
+
+
+
+"""
+Class that holds 2D data
+"""
+class CudaArray3D:
+ """
+ Uploads initial data to the CL device
+ """
+ def __init__(self, stream, nx, ny, nz, x_halo, y_halo, z_halo, cpu_data=None, dtype=np.float32):
+ self.logger = logging.getLogger(__name__)
+ self.nx = nx
+ self.ny = ny
+ self.nz = nz
+ self.x_halo = x_halo
+ self.y_halo = y_halo
+ self.z_halo = z_halo
+
+ nx_halo = nx + 2*x_halo
+ ny_halo = ny + 2*y_halo
+ nz_halo = nz + 2*z_halo
+
+ #self.logger.debug("Allocating [%dx%dx%d] buffer", self.nx, self.ny, self.nz)
+ #Should perhaps use pycuda.driver.mem_alloc_data.pitch() here
+ #self.data = pycuda.gpuarray.zeros((nz_halo, ny_halo, nx_halo), dtype)
+
+ self.data_h = np.zeros((nz_halo, ny_halo, nx_halo), dtype="float32")
+ num_bytes = self.data_h.size * self.data_h.itemsize
+
+ # init device array and upload host data
+ self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=(nz_halo, ny_halo, nx_halo))
+
+ # copy data from host to device
+ hip_check(hip.hipMemcpy(self.data,self.data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
+
+ #For returning to download
+ #self.memorypool = PageLockedMemoryPool()
+
+ #If we don't have any data, just allocate and return
+ if cpu_data is None:
+ return
+
+ #Make sure data is in proper format
+ assert cpu_data.shape == (nz_halo, ny_halo, nx_halo) or cpu_data.shape == (self.nz, self.ny, self.nx), "Wrong shape of data %s vs %s / %s" % (str(cpu_data.shape), str((self.nz, self.ny, self.nx)), str((nz_halo, ny_halo, nx_halo)))
+ assert cpu_data.itemsize == 4, "Wrong size of data type"
+ assert not np.isfortran(cpu_data), "Wrong datatype (Fortran, expected C)"
+
+ #Create copy object from host to device
+ #copy = cuda.Memcpy3D()
+ #copy.set_src_host(cpu_data)
+ #copy.set_dst_device(self.data.gpudata)
+
+ #Set offsets of destination
+ #x_offset = (nx_halo - cpu_data.shape[2]) // 2
+ #y_offset = (ny_halo - cpu_data.shape[1]) // 2
+ #z_offset = (nz_halo - cpu_data.shape[0]) // 2
+ #copy.dst_x_in_bytes = x_offset*self.data.strides[1]
+ #copy.dst_y = y_offset
+ #copy.dst_z = z_offset
+
+ #Set pitch of destination
+ #copy.dst_pitch = self.data.strides[0]
+
+ #Set width in bytes to copy for each row and
+ #number of rows to copy
+ #width = max(self.nx, cpu_data.shape[2])
+ #height = max(self.ny, cpu_data.shape[1])
+ #depth = max(self.nz, cpu-data.shape[0])
+ #copy.width_in_bytes = width*cpu_data.itemsize
+ #copy.height = height
+ #copy.depth = depth
+
+ #copy from host to device
+ num_bytes = cpu_data.size * cpu_data.itemsize
+ self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=cpu_data.shape)
+ #hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
+ copy = hip_check(hip.hipMemcpy(self.data,cpu_data,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
+
+ #Perform the copy
+ copy(stream)
+
+ #self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data.gpudata), self.nx, self.ny)
+
+
+ def __del__(self, *args):
+ #self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data.gpudata), self.nx, self.ny)
+ self.data.gpudata.free()
+ self.data = None
+
+ """
+ Enables downloading data from GPU to Python
+ """
+ def download(self, stream, asynch=False):
+ #self.logger.debug("Downloading [%dx%d] buffer", self.nx, self.ny)
+ #Allocate host memory
+ #cpu_data = cuda.pagelocked_empty((self.ny, self.nx), np.float32)
+ cpu_data = np.empty((self.nz, self.ny, self.nx), dtype=np.float32)
+ #cpu_data = self.memorypool.allocate((self.nz, self.ny, self.nx), dtype=np.float32)
+
+ #Create copy object from device to host
+ #copy = cuda.Memcpy2D()
+ #copy.set_src_device(self.data.gpudata)
+ #copy.set_dst_host(cpu_data)
+
+ #Set offsets and pitch of source
+ #copy.src_x_in_bytes = self.x_halo*self.data.strides[1]
+ #copy.src_y = self.y_halo
+ #copy.src_z = self.z_halo
+ #copy.src_pitch = self.data.strides[0]
+
+ #Set width in bytes to copy for each row and
+ #number of rows to copy
+ #copy.width_in_bytes = self.nx*cpu_data.itemsize
+ #copy.height = self.ny
+ #copy.depth = self.nz
+
+ #copy from device to host
+ num_bytes = cpu_data.size * cpu_data.itemsize
+ #hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
+ copy = hip_check(hip.hipMemcpy(cpu_data,self.data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
+
+ copy(stream)
+ if asynch==False:
+ stream.synchronize()
+
+ return cpu_data
+
+
+"""
+A class representing an Arakawa A type (unstaggered, logically Cartesian) grid
+"""
+class ArakawaA2D:
+ def __init__(self, stream, nx, ny, halo_x, halo_y, cpu_variables):
+ """
+ Uploads initial data to the GPU device
+ """
+ self.logger = logging.getLogger(__name__)
+ self.gpu_variables = []
+ for cpu_variable in cpu_variables:
+ self.gpu_variables += [CudaArray2D(stream, nx, ny, halo_x, halo_y, cpu_variable)]
+
+ def __getitem__(self, key):
+ assert type(key) == int, "Indexing is int based"
+ if (key > len(self.gpu_variables) or key < 0):
+ raise IndexError("Out of bounds")
+ return self.gpu_variables[key]
+
+ def download(self, stream, variables=None):
+ """
+ Enables downloading data from the GPU device to Python
+ """
+ if variables is None:
+ variables=range(len(self.gpu_variables))
+
+ cpu_variables = []
+ for i in variables:
+ assert i < len(self.gpu_variables), "Variable {:d} is out of range".format(i)
+ cpu_variables += [self.gpu_variables[i].download(stream, asynch=True)]
+
+ #stream.synchronize()
+ return cpu_variables
+
+ #hipblas
+ def sum_hipblas(self, num_elements, data):
+ num_bytes_r = np.dtype(np.float32).itemsize
+ result_d = hip_check(hip.hipMalloc(num_bytes_r))
+ result_h = np.zeros(1, dtype=np.float32)
+ print("--bytes:", num_bytes_r)
+
+ # call hipblasSaxpy + initialization
+ handle = hip_check(hipblas.hipblasCreate())
+ #hip_check(hipblas.hipblasSaxpy(handle, num_elements, ctypes.addressof(alpha), x_d, 1, y_d, 1))
+ #"incx" [int] specifies the increment for the elements of x. incx must be > 0.
+ hip_check(hipblas.hipblasSasum(handle, num_elements, data, 1, result_d))
+
+ # destruction of handle
+ hip_check(hipblas.hipblasDestroy(handle))
+
+ # copy result (stored in result_d) back to host (store in result_h)
+ hip_check(hip.hipMemcpy(result_h,result_d,num_bytes_r,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
+
+ # clean up
+ hip_check(hip.hipFree(data))
+ return result_h
+
+ def check(self):
+ """
+ Checks that data is still sane
+ """
+ for i, gpu_variable in enumerate(self.gpu_variables):
+ #compute sum with hipblas
+ #var_sum = pycuda.gpuarray.sum(gpu_variable.data).get()
+ var_sum = self.sum_hipblas(gpu_variable.ny,gpu_variable.data)
+
+ self.logger.debug("Data %d with size [%d x %d] has average %f", i, gpu_variable.nx, gpu_variable.ny, var_sum / (gpu_variable.nx * gpu_variable.ny))
+ assert np.isnan(var_sum) == False, "Data contains NaN values!"
+
diff --git a/GPUSimulators/CudaContext.py b/GPUSimulators/CudaContext.py
new file mode 100644
index 0000000..e77ef06
--- /dev/null
+++ b/GPUSimulators/CudaContext.py
@@ -0,0 +1,328 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements Cuda context handling
+
+Copyright (C) 2018 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+
+
+import os
+
+import numpy as np
+import time
+import re
+import io
+import hashlib
+import logging
+import gc
+
+#import pycuda.compiler as cuda_compiler
+#import pycuda.gpuarray
+#import pycuda.driver as cuda
+
+from hip import hip,hiprtc
+from hip import rccl
+
+from GPUSimulators import Autotuner, Common
+
+
+def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+"""
+Class which keeps track of the CUDA context and some helper functions
+"""
+class CudaContext(object):
+
+ def __init__(self, device=None, context_flags=None, use_cache=True, autotuning=True):
+ """
+ Create a new CUDA context
+ Set device to an id or pci_bus_id to select a specific GPU
+ Set context_flags to cuda.ctx_flags.SCHED_BLOCKING_SYNC for a blocking context
+ """
+ self.use_cache = use_cache
+ self.logger = logging.getLogger(__name__)
+ self.modules = {}
+
+ self.module_path = os.path.dirname(os.path.realpath(__file__))
+
+ #Initialize cuda (must be first call to PyCUDA)
+ ##cuda.init(flags=0)
+
+ ##self.logger.info("PyCUDA version %s", str(pycuda.VERSION_TEXT))
+
+ #Print some info about CUDA
+ ##self.logger.info("CUDA version %s", str(cuda.get_version()))
+ #self.logger.info("Driver version %s", str(cuda.get_driver_version()))
+ self.logger.info("Driver version %s", str(hip_check(hip.hipDriverGetVersion())))
+
+ if device is None:
+ device = 0
+
+ self.cuda_device = hip.Device(device)
+ #self.logger.info("Using device %d/%d '%s' (%s) GPU", device, cuda.Device.count(), self.cuda_device.name(), self.cuda_device.pci_bus_id())
+ self.logger.info("Using device %d/%d '%s' (%s) GPU", device, hip_check(hip.hipGetDeviceCount()))
+ #self.logger.debug(" => compute capability: %s", str(self.cuda_device.compute_capability()))
+ self.logger.debug(" => compute capability: %s", str(self.hip.hipDeviceComputeCapability(device)))
+
+ # Create the CUDA context
+ #In HIP there is no need to specify a scheduling policy (it is abstracted). Here the HIP runtime system manages the workload to fit a specifc target architecture
+ #if context_flags is None:
+ # context_flags=cuda.ctx_flags.SCHED_AUTO
+
+ #self.cuda_context = self.cuda_device.make_context(flags=context_flags)
+
+ #free, total = cuda.mem_get_info()
+ total = hip_check(hip.hipDeviceTotalMem(device))
+ #self.logger.debug(" => memory: %d / %d MB available", int(free/(1024*1024)), int(total/(1024*1024)))
+ self.logger.debug(" => memory: %d / %d MB available", int(total/(1024*1024)))
+
+ #self.logger.info("Created context handle <%s>", str(self.cuda_context.handle))
+
+ #Create cache dir for cubin files
+ self.cache_path = os.path.join(self.module_path, "cuda_cache")
+ if (self.use_cache):
+ if not os.path.isdir(self.cache_path):
+ os.mkdir(self.cache_path)
+ self.logger.info("Using CUDA cache dir %s", self.cache_path)
+
+ self.autotuner = None
+ if (autotuning):
+ self.logger.info("Autotuning enabled. It may take several minutes to run the code the first time: have patience")
+ self.autotuner = Autotuner.Autotuner()
+
+
+# def __del__(self, *args):
+# self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context.handle))
+
+ # Loop over all contexts in stack, and remove "this"
+# other_contexts = []
+# while (cuda.Context.get_current() != None):
+# context = cuda.Context.get_current()
+# if (context.handle != self.cuda_context.handle):
+# self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context.handle), str(context.handle))
+# other_contexts = [context] + other_contexts
+# cuda.Context.pop()
+# else:
+# self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context.handle), str(context.handle))
+# cuda.Context.pop()
+
+ # Add all the contexts we popped that were not our own
+# for context in other_contexts:
+# self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context.handle), str(context.handle))
+# cuda.Context.push(context)
+
+# self.logger.debug("<%s> Detaching", str(self.cuda_context.handle))
+# self.cuda_context.detach()
+
+
+# def __str__(self):
+# return "CudaContext id " + str(self.cuda_context.handle)
+
+
+ def hash_kernel(kernel_filename, include_dirs):
+ # Generate a kernel ID for our caches
+ num_includes = 0
+ max_includes = 100
+ kernel_hasher = hashlib.md5()
+ logger = logging.getLogger(__name__)
+
+ # Loop over file and includes, and check if something has changed
+ files = [kernel_filename]
+ while len(files):
+
+ if (num_includes > max_includes):
+ raise("Maximum number of includes reached - circular include in {:}?".format(kernel_filename))
+
+ filename = files.pop()
+
+ #logger.debug("Hashing %s", filename)
+
+ modified = os.path.getmtime(filename)
+
+ # Open the file
+ with io.open(filename, "r") as file:
+
+ # Search for #inclue and also hash the file
+ file_str = file.read()
+ kernel_hasher.update(file_str.encode('utf-8'))
+ kernel_hasher.update(str(modified).encode('utf-8'))
+
+ #Find all includes
+ includes = re.findall('^\W*#include\W+(.+?)\W*$', file_str, re.M)
+
+ # Loop over everything that looks like an include
+ for include_file in includes:
+
+ #Search through include directories for the file
+ file_path = os.path.dirname(filename)
+ for include_path in [file_path] + include_dirs:
+
+ # If we find it, add it to list of files to check
+ temp_path = os.path.join(include_path, include_file)
+ if (os.path.isfile(temp_path)):
+ files = files + [temp_path]
+ num_includes = num_includes + 1 #For circular includes...
+ break
+
+ return kernel_hasher.hexdigest()
+
+
+ """
+ Reads a text file and creates an OpenCL kernel from that
+ """
+ def get_module(self, kernel_filename,
+ include_dirs=[], \
+ defines={}, \
+ compile_args={'no_extern_c', True}, jit_compile_args={}):
+ """
+ Helper function to print compilation output
+ """
+ def cuda_compile_message_handler(compile_success_bool, info_str, error_str):
+ self.logger.debug("Compilation returned %s", str(compile_success_bool))
+ if info_str:
+ self.logger.debug("Info: %s", info_str)
+ if error_str:
+ self.logger.debug("Error: %s", error_str)
+
+ kernel_filename = os.path.normpath(kernel_filename)
+ kernel_path = os.path.abspath(os.path.join(self.module_path, kernel_filename))
+ #self.logger.debug("Getting %s", kernel_filename)
+
+ # Create a hash of the kernel options
+ options_hasher = hashlib.md5()
+ options_hasher.update(str(defines).encode('utf-8') + str(compile_args).encode('utf-8'));
+ options_hash = options_hasher.hexdigest()
+
+ # Create hash of kernel souce
+ source_hash = CudaContext.hash_kernel( \
+ kernel_path, \
+ include_dirs=[self.module_path] + include_dirs)
+
+ # Create final hash
+ root, ext = os.path.splitext(kernel_filename)
+ kernel_hash = root \
+ + "_" + source_hash \
+ + "_" + options_hash \
+ + ext
+ cached_kernel_filename = os.path.join(self.cache_path, kernel_hash)
+
+ # If we have the kernel in our hashmap, return it
+ if (kernel_hash in self.modules.keys()):
+ self.logger.debug("Found kernel %s cached in hashmap (%s)", kernel_filename, kernel_hash)
+ return self.modules[kernel_hash]
+
+ # If we have it on disk, return it
+ elif (self.use_cache and os.path.isfile(cached_kernel_filename)):
+ self.logger.debug("Found kernel %s cached on disk (%s)", kernel_filename, kernel_hash)
+
+ with io.open(cached_kernel_filename, "rb") as file:
+ file_str = file.read()
+ #No hip counterpart of module_from_buffer
+ module = cuda.module_from_buffer(file_str, message_handler=cuda_compile_message_handler, **jit_compile_args)
+
+ self.modules[kernel_hash] = module
+ return module
+
+ # Otherwise, compile it from source
+ else:
+ self.logger.debug("Compiling %s (%s)", kernel_filename, kernel_hash)
+
+ #Create kernel string
+ kernel_string = ""
+ for key, value in defines.items():
+ kernel_string += "#define {:s} {:s}\n".format(str(key), str(value))
+ kernel_string += '#include "{:s}"'.format(os.path.join(self.module_path, kernel_filename))
+ if (self.use_cache):
+ cached_kernel_dir = os.path.dirname(cached_kernel_filename)
+ if not os.path.isdir(cached_kernel_dir):
+ os.mkdir(cached_kernel_dir)
+ with io.open(cached_kernel_filename + ".txt", "w") as file:
+ file.write(kernel_string)
+
+
+ with Common.Timer("compiler") as timer:
+ import warnings
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", message="The CUDA compiler succeeded, but said the following:\nkernel.cu", category=UserWarning)
+
+ #cubin = cuda_compiler.compile(kernel_string, include_dirs=include_dirs, cache_dir=False, **compile_args)
+ #module = cuda.module_from_buffer(cubin, message_handler=cuda_compile_message_handler, **jit_compile_args)
+
+ #HIP version of compilation: but "name_of_fct" needs to be defined. e.g.
+ #source = b"""\
+ #extern "C" __global__ void name_of_fct(float factor, int n, short unused1, int unused2, float unused3, float *x) {
+ #int tid = threadIdx.x + blockIdx.x * blockDim.x;
+ #if (tid < n) {
+ #x[tid] *= factor;
+ # }
+ #}
+ #"""
+
+ prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_string, b"name_of_fct", 0, [], []))
+
+ props = hip.hipDeviceProp_t()
+ hip_check(hip.hipGetDeviceProperties(props,0))
+ arch = props.gcnArchName
+
+ print(f"Compiling kernel for {arch}")
+
+ cflags = [b"--offload-arch="+arch]
+ err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
+ if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
+ log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
+ log = bytearray(log_size)
+ hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
+ raise RuntimeError(log.decode())
+ code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
+ code = bytearray(code_size)
+ hip_check(hiprtc.hiprtcGetCode(prog, code))
+ module = hip_check(hip.hipModuleLoadData(code))
+ #kernel = hip_check(hip.hipModuleGetFunction(module, b"name_of_fct"))
+
+ if (self.use_cache):
+ with io.open(cached_kernel_filename, "wb") as file:
+ file.write(cubin)
+
+ self.modules[kernel_hash] = module
+ return module
+
+ """
+ Clears the kernel cache (useful for debugging & development)
+ """
+ def clear_kernel_cache(self):
+ self.logger.debug("Clearing cache")
+ self.modules = {}
+ gc.collect()
+
+ """
+ Synchronizes all streams etc
+ """
+ def synchronize(self):
+ self.cuda_context.synchronize()
diff --git a/GPUSimulators/CudaContext_cu.py b/GPUSimulators/CudaContext_cu.py
new file mode 100644
index 0000000..6c90636
--- /dev/null
+++ b/GPUSimulators/CudaContext_cu.py
@@ -0,0 +1,272 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements Cuda context handling
+
+Copyright (C) 2018 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+
+
+import os
+
+import numpy as np
+import time
+import re
+import io
+import hashlib
+import logging
+import gc
+
+import pycuda.compiler as cuda_compiler
+import pycuda.gpuarray
+import pycuda.driver as cuda
+
+from GPUSimulators import Autotuner, Common
+
+
+
+"""
+Class which keeps track of the CUDA context and some helper functions
+"""
+class CudaContext(object):
+
+ def __init__(self, device=None, context_flags=None, use_cache=True, autotuning=True):
+ """
+ Create a new CUDA context
+ Set device to an id or pci_bus_id to select a specific GPU
+ Set context_flags to cuda.ctx_flags.SCHED_BLOCKING_SYNC for a blocking context
+ """
+ self.use_cache = use_cache
+ self.logger = logging.getLogger(__name__)
+ self.modules = {}
+
+ self.module_path = os.path.dirname(os.path.realpath(__file__))
+
+ #Initialize cuda (must be first call to PyCUDA)
+ cuda.init(flags=0)
+
+ self.logger.info("PyCUDA version %s", str(pycuda.VERSION_TEXT))
+
+ #Print some info about CUDA
+ self.logger.info("CUDA version %s", str(cuda.get_version()))
+ self.logger.info("Driver version %s", str(cuda.get_driver_version()))
+
+ if device is None:
+ device = 0
+
+ self.cuda_device = cuda.Device(device)
+ self.logger.info("Using device %d/%d '%s' (%s) GPU", device, cuda.Device.count(), self.cuda_device.name(), self.cuda_device.pci_bus_id())
+ self.logger.debug(" => compute capability: %s", str(self.cuda_device.compute_capability()))
+
+ # Create the CUDA context
+ if context_flags is None:
+ context_flags=cuda.ctx_flags.SCHED_AUTO
+
+ self.cuda_context = self.cuda_device.make_context(flags=context_flags)
+
+ free, total = cuda.mem_get_info()
+ self.logger.debug(" => memory: %d / %d MB available", int(free/(1024*1024)), int(total/(1024*1024)))
+
+ self.logger.info("Created context handle <%s>", str(self.cuda_context.handle))
+
+ #Create cache dir for cubin files
+ self.cache_path = os.path.join(self.module_path, "cuda_cache")
+ if (self.use_cache):
+ if not os.path.isdir(self.cache_path):
+ os.mkdir(self.cache_path)
+ self.logger.info("Using CUDA cache dir %s", self.cache_path)
+
+ self.autotuner = None
+ if (autotuning):
+ self.logger.info("Autotuning enabled. It may take several minutes to run the code the first time: have patience")
+ self.autotuner = Autotuner.Autotuner()
+
+
+ def __del__(self, *args):
+ self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context.handle))
+
+ # Loop over all contexts in stack, and remove "this"
+ other_contexts = []
+ while (cuda.Context.get_current() != None):
+ context = cuda.Context.get_current()
+ if (context.handle != self.cuda_context.handle):
+ self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context.handle), str(context.handle))
+ other_contexts = [context] + other_contexts
+ cuda.Context.pop()
+ else:
+ self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context.handle), str(context.handle))
+ cuda.Context.pop()
+
+ # Add all the contexts we popped that were not our own
+ for context in other_contexts:
+ self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context.handle), str(context.handle))
+ cuda.Context.push(context)
+
+ self.logger.debug("<%s> Detaching", str(self.cuda_context.handle))
+ self.cuda_context.detach()
+
+
+ def __str__(self):
+ return "CudaContext id " + str(self.cuda_context.handle)
+
+
+ def hash_kernel(kernel_filename, include_dirs):
+ # Generate a kernel ID for our caches
+ num_includes = 0
+ max_includes = 100
+ kernel_hasher = hashlib.md5()
+ logger = logging.getLogger(__name__)
+
+ # Loop over file and includes, and check if something has changed
+ files = [kernel_filename]
+ while len(files):
+
+ if (num_includes > max_includes):
+ raise("Maximum number of includes reached - circular include in {:}?".format(kernel_filename))
+
+ filename = files.pop()
+
+ #logger.debug("Hashing %s", filename)
+
+ modified = os.path.getmtime(filename)
+
+ # Open the file
+ with io.open(filename, "r") as file:
+
+ # Search for #inclue and also hash the file
+ file_str = file.read()
+ kernel_hasher.update(file_str.encode('utf-8'))
+ kernel_hasher.update(str(modified).encode('utf-8'))
+
+ #Find all includes
+ includes = re.findall('^\W*#include\W+(.+?)\W*$', file_str, re.M)
+
+ # Loop over everything that looks like an include
+ for include_file in includes:
+
+ #Search through include directories for the file
+ file_path = os.path.dirname(filename)
+ for include_path in [file_path] + include_dirs:
+
+ # If we find it, add it to list of files to check
+ temp_path = os.path.join(include_path, include_file)
+ if (os.path.isfile(temp_path)):
+ files = files + [temp_path]
+ num_includes = num_includes + 1 #For circular includes...
+ break
+
+ return kernel_hasher.hexdigest()
+
+
+ """
+ Reads a text file and creates an OpenCL kernel from that
+ """
+ def get_module(self, kernel_filename,
+ include_dirs=[], \
+ defines={}, \
+ compile_args={'no_extern_c', True}, jit_compile_args={}):
+ """
+ Helper function to print compilation output
+ """
+ def cuda_compile_message_handler(compile_success_bool, info_str, error_str):
+ self.logger.debug("Compilation returned %s", str(compile_success_bool))
+ if info_str:
+ self.logger.debug("Info: %s", info_str)
+ if error_str:
+ self.logger.debug("Error: %s", error_str)
+
+ kernel_filename = os.path.normpath(kernel_filename)
+ kernel_path = os.path.abspath(os.path.join(self.module_path, kernel_filename))
+ #self.logger.debug("Getting %s", kernel_filename)
+
+ # Create a hash of the kernel options
+ options_hasher = hashlib.md5()
+ options_hasher.update(str(defines).encode('utf-8') + str(compile_args).encode('utf-8'));
+ options_hash = options_hasher.hexdigest()
+
+ # Create hash of kernel souce
+ source_hash = CudaContext.hash_kernel( \
+ kernel_path, \
+ include_dirs=[self.module_path] + include_dirs)
+
+ # Create final hash
+ root, ext = os.path.splitext(kernel_filename)
+ kernel_hash = root \
+ + "_" + source_hash \
+ + "_" + options_hash \
+ + ext
+ cached_kernel_filename = os.path.join(self.cache_path, kernel_hash)
+
+ # If we have the kernel in our hashmap, return it
+ if (kernel_hash in self.modules.keys()):
+ self.logger.debug("Found kernel %s cached in hashmap (%s)", kernel_filename, kernel_hash)
+ return self.modules[kernel_hash]
+
+ # If we have it on disk, return it
+ elif (self.use_cache and os.path.isfile(cached_kernel_filename)):
+ self.logger.debug("Found kernel %s cached on disk (%s)", kernel_filename, kernel_hash)
+
+ with io.open(cached_kernel_filename, "rb") as file:
+ file_str = file.read()
+ module = cuda.module_from_buffer(file_str, message_handler=cuda_compile_message_handler, **jit_compile_args)
+
+ self.modules[kernel_hash] = module
+ return module
+
+ # Otherwise, compile it from source
+ else:
+ self.logger.debug("Compiling %s (%s)", kernel_filename, kernel_hash)
+
+ #Create kernel string
+ kernel_string = ""
+ for key, value in defines.items():
+ kernel_string += "#define {:s} {:s}\n".format(str(key), str(value))
+ kernel_string += '#include "{:s}"'.format(os.path.join(self.module_path, kernel_filename))
+ if (self.use_cache):
+ cached_kernel_dir = os.path.dirname(cached_kernel_filename)
+ if not os.path.isdir(cached_kernel_dir):
+ os.mkdir(cached_kernel_dir)
+ with io.open(cached_kernel_filename + ".txt", "w") as file:
+ file.write(kernel_string)
+
+
+ with Common.Timer("compiler") as timer:
+ import warnings
+ with warnings.catch_warnings():
+ warnings.filterwarnings("ignore", message="The CUDA compiler succeeded, but said the following:\nkernel.cu", category=UserWarning)
+ cubin = cuda_compiler.compile(kernel_string, include_dirs=include_dirs, cache_dir=False, **compile_args)
+ module = cuda.module_from_buffer(cubin, message_handler=cuda_compile_message_handler, **jit_compile_args)
+ if (self.use_cache):
+ with io.open(cached_kernel_filename, "wb") as file:
+ file.write(cubin)
+
+ self.modules[kernel_hash] = module
+ return module
+
+ """
+ Clears the kernel cache (useful for debugging & development)
+ """
+ def clear_kernel_cache(self):
+ self.logger.debug("Clearing cache")
+ self.modules = {}
+ gc.collect()
+
+ """
+ Synchronizes all streams etc
+ """
+ def synchronize(self):
+ self.cuda_context.synchronize()
\ No newline at end of file
diff --git a/GPUSimulators/EE2D_KP07_dimsplit.py b/GPUSimulators/EE2D_KP07_dimsplit.py
new file mode 100644
index 0000000..935eb90
--- /dev/null
+++ b/GPUSimulators/EE2D_KP07_dimsplit.py
@@ -0,0 +1,575 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the 2nd order HLL flux
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+#Import packages we need
+from GPUSimulators import Simulator, Common
+from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
+import numpy as np
+import ctypes
+
+#from pycuda import gpuarray
+from hip import hip,hiprtc
+
+
+
+
+
+
+
+
+"""
+Class that solves the SW equations using the Forward-Backward linear scheme
+"""
+class EE2D_KP07_dimsplit (BaseSimulator):
+
+ """
+ Initialization routine
+ rho: Density
+ rho_u: Momentum along x-axis
+ rho_v: Momentum along y-axis
+ E: energy
+ nx: Number of cells along x-axis
+ ny: Number of cells along y-axis
+ dx: Grid cell spacing along x-axis
+ dy: Grid cell spacing along y-axis
+ dt: Size of each timestep
+ g: Gravitational constant
+ gamma: Gas constant
+ p: pressure
+ """
+
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ context,
+ rho, rho_u, rho_v, E,
+ nx, ny,
+ dx, dy,
+ g,
+ gamma,
+ theta=1.3,
+ cfl_scale=0.9,
+ boundary_conditions=BoundaryCondition(),
+ block_width=16, block_height=8):
+
+ # Call super constructor
+ super().__init__(context,
+ nx, ny,
+ dx, dy,
+ boundary_conditions,
+ cfl_scale,
+ 2,
+ block_width, block_height)
+ self.g = np.float32(g)
+ self.gamma = np.float32(gamma)
+ self.theta = np.float32(theta)
+
+ #Get kernels
+ #module = context.get_module("cuda/EE2D_KP07_dimsplit.cu",
+ # defines={
+ # 'BLOCK_WIDTH': self.block_size[0],
+ # 'BLOCK_HEIGHT': self.block_size[1]
+ # },
+ # compile_args={
+ # 'no_extern_c': True,
+ # 'options': ["--use_fast_math"],
+ # },
+ # jit_compile_args={})
+ #self.kernel = module.get_function("KP07DimsplitKernel")
+ #self.kernel.prepare("iiffffffiiPiPiPiPiPiPiPiPiPiiii")
+ #
+ kernel_file_path = os.path.abspath(os.path.join('cuda', 'EE2D_KP07_dimsplit.cu.hip'))
+ with open(kernel_file_path, 'r') as file:
+ kernel_source = file.read()
+
+ prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07DimsplitKernel", 0, [], []))
+
+ props = hip.hipDeviceProp_t()
+ hip_check(hip.hipGetDeviceProperties(props,0))
+ arch = props.gcnArchName
+
+ print(f"Compiling kernel for {arch}")
+
+ cflags = [b"--offload-arch="+arch]
+ err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
+ if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
+ log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
+ log = bytearray(log_size)
+ hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
+ raise RuntimeError(log.decode())
+ code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
+ code = bytearray(code_size)
+ hip_check(hiprtc.hiprtcGetCode(prog, code))
+ module = hip_check(hip.hipModuleLoadData(code))
+
+ kernel = hip_check(hip.hipModuleGetFunction(module, b"KP07DimsplitKernel"))
+
+ #Create data by uploading to device
+ self.u0 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 2, 2,
+ [rho, rho_u, rho_v, E])
+ self.u1 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 2, 2,
+ [None, None, None, None])
+ #self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
+ # init device array cfl_data
+ data_h = np.empty(self.grid_size, dtype=np.float32)
+ num_bytes = data_h.size * data_h.itemsize
+ self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=self.grid_size)
+
+ dt_x = np.min(self.dx / (np.abs(rho_u/rho) + np.sqrt(gamma*rho)))
+ dt_y = np.min(self.dy / (np.abs(rho_v/rho) + np.sqrt(gamma*rho)))
+ self.dt = min(dt_x, dt_y)
+ self.cfl_data.fill(self.dt, stream=self.stream)
+
+
+ def substep(self, dt, step_number, external=True, internal=True):
+ self.substepDimsplit(0.5*dt, step_number, external, internal)
+
+ def substepDimsplit(self, dt, substep, external, internal):
+ if external and internal:
+ #print("COMPLETE DOMAIN (dt=" + str(dt) + ")")
+
+# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.gamma,
+# self.theta,
+# substep,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
+# self.cfl_data.gpudata,
+# 0, 0,
+# self.nx, self.ny)
+
+ #launch kernel
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *self.grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_float(self.gamma),
+ ctypes.c_float(self.theta),
+ ctypes.c_int(substep),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
+ self.cfl_data,
+ 0, 0,
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny)
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--External & Internal: Launching Kernel is ok")
+
+ return
+
+ if external and not internal:
+ ###################################
+ # XXX: Corners are treated twice! #
+ ###################################
+
+ ns_grid_size = (self.grid_size[0], 1)
+
+ # NORTH
+ # (x0, y0) x (x1, y1)
+ # (0, ny-y_halo) x (nx, ny)
+# self.kernel.prepared_async_call(ns_grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.gamma,
+# self.theta,
+# substep,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
+# self.cfl_data.gpudata,
+# 0, self.ny - int(self.u0[0].y_halo),
+# self.nx, self.ny)
+
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *ns_grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_float(self.gamma),
+ ctypes.c_float(self.theta),
+ ctypes.c_int(substep),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
+ self.cfl_data,
+ 0, ctypes.c_int(self.ny) - ctypes.c_int(self.u0[0].y_halo),
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny)
+ )
+ )
+ )
+
+
+ # SOUTH
+ # (x0, y0) x (x1, y1)
+ # (0, 0) x (nx, y_halo)
+# self.kernel.prepared_async_call(ns_grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.gamma,
+# self.theta,
+# substep,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
+# self.cfl_data.gpudata,
+# 0, 0,
+# self.nx, int(self.u0[0].y_halo))
+
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *ns_grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_float(self.gamma),
+ ctypes.c_float(self.theta),
+ ctypes.c_int(substep),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
+ self.cfl_data,
+ 0, 0,
+ ctypes.c_int(self.nx), ctypes.c_int(self.u0[0].y_halo)
+ )
+ )
+ )
+
+
+ we_grid_size = (1, self.grid_size[1])
+
+ # WEST
+ # (x0, y0) x (x1, y1)
+ # (0, 0) x (x_halo, ny)
+# self.kernel.prepared_async_call(we_grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.gamma,
+# self.theta,
+# substep,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
+# self.cfl_data.gpudata,
+# 0, 0,
+# int(self.u0[0].x_halo), self.ny)
+
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *we_grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_float(self.gamma),
+ ctypes.c_float(self.theta),
+ ctypes.c_int(substep),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
+ self.cfl_data,
+ 0, 0,
+ ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.ny)
+ )
+ )
+ )
+
+
+ # EAST
+ # (x0, y0) x (x1, y1)
+ # (nx-x_halo, 0) x (nx, ny)
+# self.kernel.prepared_async_call(we_grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.gamma,
+# self.theta,
+# substep,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
+# self.cfl_data.gpudata,
+# self.nx - int(self.u0[0].x_halo), 0,
+# self.nx, self.ny)
+
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *we_grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_float(self.gamma),
+ ctypes.c_float(self.theta),
+ ctypes.c_int(substep),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
+ self.cfl_data,
+ ctypes.c_int(self.nx) - ctypes.c_int(self.u0[0].x_halo), 0,
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny)
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--External and not Internal: Launching Kernel is ok")
+
+ return
+
+ if internal and not external:
+
+ # INTERNAL DOMAIN
+ # (x0, y0) x (x1, y1)
+ # (x_halo, y_halo) x (nx - x_halo, ny - y_halo)
+ self.kernel.prepared_async_call(self.grid_size, self.block_size, self.internal_stream,
+ self.nx, self.ny,
+ self.dx, self.dy, dt,
+ self.g,
+ self.gamma,
+ self.theta,
+ substep,
+ self.boundary_conditions,
+ self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+ self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+ self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+ self.u0[3].data.gpudata, self.u0[3].data.strides[0],
+ self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+ self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+ self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+ self.u1[3].data.gpudata, self.u1[3].data.strides[0],
+ self.cfl_data.gpudata,
+ int(self.u0[0].x_halo), int(self.u0[0].y_halo),
+ self.nx - int(self.u0[0].x_halo), self.ny - int(self.u0[0].y_halo))
+
+
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *self.grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.internal_stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_float(self.gamma),
+ ctypes.c_float(self.theta),
+ ctypes.c_int(substep),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
+ self.cfl_data,
+ ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.u0[0].y_halo),
+ ctypes.c_int(self.nx) - ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.ny) - ctypes.c_int(self.u0[0].y_halo)
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--Internal and not External: Launching Kernel is ok")
+ return
+
+ def swapBuffers(self):
+ self.u0, self.u1 = self.u1, self.u0
+ return
+
+ def getOutput(self):
+ return self.u0
+
+ def check(self):
+ self.u0.check()
+ self.u1.check()
+ return
+
+ # computing min with hipblas: the output is an index
+ def min_hipblas(self, num_elements, cfl_data, stream):
+ num_bytes = num_elements * np.dtype(np.float32).itemsize
+ num_bytes_i = np.dtype(np.int32).itemsize
+ indx_d = hip_check(hip.hipMalloc(num_bytes_i))
+ indx_h = np.zeros(1, dtype=np.int32)
+ x_temp = np.zeros(num_elements, dtype=np.float32)
+
+ #print("--size.data:", cfl_data.size)
+ handle = hip_check(hipblas.hipblasCreate())
+
+ #hip_check(hipblas.hipblasGetStream(handle, stream))
+ #"incx" [int] specifies the increment for the elements of x. incx must be > 0.
+ hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
+
+ # destruction of handle
+ hip_check(hipblas.hipblasDestroy(handle))
+
+ # copy result (stored in indx_d) back to the host (store in indx_h)
+ hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ #hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
+ hip_check(hip.hipStreamSynchronize(stream))
+
+ min_value = x_temp.flatten()[indx_h[0]-1]
+
+ # clean up
+ hip_check(hip.hipStreamDestroy(stream))
+ hip_check(hip.hipFree(cfl_data))
+ return min_value
+
+ def computeDt(self):
+ #max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
+ max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
+ return max_dt*0.5
diff --git a/GPUSimulators/FORCE.py b/GPUSimulators/FORCE.py
new file mode 100644
index 0000000..092711a
--- /dev/null
+++ b/GPUSimulators/FORCE.py
@@ -0,0 +1,242 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the FORCE flux
+for the shallow water equations
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+#Import packages we need
+from GPUSimulators import Simulator, Common
+from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
+import numpy as np
+import ctypes
+#from pycuda import gpuarray
+from hip import hip,hiprtc
+
+
+
+
+
+
+
+
+
+"""
+Class that solves the SW equations
+"""
+class FORCE (Simulator.BaseSimulator):
+
+ """
+ Initialization routine
+ h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
+ hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
+ hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
+ nx: Number of cells along x-axis
+ ny: Number of cells along y-axis
+ dx: Grid cell spacing along x-axis (20 000 m)
+ dy: Grid cell spacing along y-axis (20 000 m)
+ dt: Size of each timestep (90 s)
+ g: Gravitational accelleration (9.81 m/s^2)
+ """
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ context,
+ h0, hu0, hv0,
+ nx, ny,
+ dx, dy,
+ g,
+ cfl_scale=0.9,
+ boundary_conditions=BoundaryCondition(),
+ block_width=16, block_height=16):
+
+ # Call super constructor
+ super().__init__(context,
+ nx, ny,
+ dx, dy,
+ boundary_conditions,
+ cfl_scale,
+ 1,
+ block_width, block_height)
+ self.g = np.float32(g)
+
+ #Get kernels
+# module = context.get_module("cuda/SWE2D_FORCE.cu.hip",
+# defines={
+# 'BLOCK_WIDTH': self.block_size[0],
+# 'BLOCK_HEIGHT': self.block_size[1]
+# },
+# compile_args={
+# 'no_extern_c': True,
+# 'options': ["--use_fast_math"],
+# },
+# jit_compile_args={})
+# self.kernel = module.get_function("FORCEKernel")
+# self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
+
+ kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_FORCE.cu'))
+ with open(kernel_file_path, 'r') as file:
+ kernel_source = file.read()
+
+ prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"FORCEKernel", 0, [], []))
+
+ props = hip.hipDeviceProp_t()
+ hip_check(hip.hipGetDeviceProperties(props,0))
+ arch = props.gcnArchName
+
+ print(f"Compiling kernel .FORCEKernel. for {arch}")
+
+ cflags = [b"--offload-arch="+arch]
+ err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
+ if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
+ log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
+ log = bytearray(log_size)
+ hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
+ raise RuntimeError(log.decode())
+ code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
+ code = bytearray(code_size)
+ hip_check(hiprtc.hiprtcGetCode(prog, code))
+ module = hip_check(hip.hipModuleLoadData(code))
+
+ kernel = hip_check(hip.hipModuleGetFunction(module, b"FORCEKernel"))
+
+
+ #Create data by uploading to device
+ self.u0 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 1, 1,
+ [h0, hu0, hv0])
+ self.u1 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 1, 1,
+ [None, None, None])
+ #self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
+ data_h = np.empty(self.grid_size, dtype=np.float32)
+ num_bytes = data_h.size * data_h.itemsize
+ self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=self.grid_size)
+
+ dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
+ dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
+ dt = min(dt_x, dt_y)
+ self.cfl_data.fill(dt, stream=self.stream)
+
+ def substep(self, dt, step_number):
+# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.cfl_data.gpudata)
+# self.u0, self.u1 = self.u1, self.u0
+
+ #launch kernel
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *self.grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ self.cfl_data
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+ self.u0, self.u1 = self.u1, self.u0
+
+
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--Launching Kernel .FORCEKernel. is ok")
+
+ def getOutput(self):
+ return self.u0
+
+ def check(self):
+ self.u0.check()
+ self.u1.check()
+
+ # computing min with hipblas: the output is an index
+ def min_hipblas(self, num_elements, cfl_data, stream):
+ num_bytes = num_elements * np.dtype(np.float32).itemsize
+ num_bytes_i = np.dtype(np.int32).itemsize
+ indx_d = hip_check(hip.hipMalloc(num_bytes_i))
+ indx_h = np.zeros(1, dtype=np.int32)
+ x_temp = np.zeros(num_elements, dtype=np.float32)
+
+ #print("--size.data:", cfl_data.size)
+ handle = hip_check(hipblas.hipblasCreate())
+
+ #hip_check(hipblas.hipblasGetStream(handle, stream))
+ #"incx" [int] specifies the increment for the elements of x. incx must be > 0.
+ hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
+
+ # destruction of handle
+ hip_check(hipblas.hipblasDestroy(handle))
+
+ # copy result (stored in indx_d) back to the host (store in indx_h)
+ hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ #hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
+ hip_check(hip.hipStreamSynchronize(stream))
+
+ min_value = x_temp.flatten()[indx_h[0]-1]
+
+ # clean up
+ hip_check(hip.hipStreamDestroy(stream))
+ hip_check(hip.hipFree(cfl_data))
+ return min_value
+
+ def computeDt(self):
+ #max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
+ max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
+ return max_dt
diff --git a/GPUSimulators/HLL.py b/GPUSimulators/HLL.py
new file mode 100644
index 0000000..792d3c6
--- /dev/null
+++ b/GPUSimulators/HLL.py
@@ -0,0 +1,235 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the HLL flux
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+#Import packages we need
+from GPUSimulators import Simulator, Common
+from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
+import numpy as np
+import ctypes
+
+#from pycuda import gpuarray
+from hip import hip,hiprtc
+
+
+
+
+
+"""
+Class that solves the SW equations using the Harten-Lax -van Leer approximate Riemann solver
+"""
+class HLL (Simulator.BaseSimulator):
+
+ """
+ Initialization routine
+ h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
+ hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
+ hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
+ nx: Number of cells along x-axis
+ ny: Number of cells along y-axis
+ dx: Grid cell spacing along x-axis (20 000 m)
+ dy: Grid cell spacing along y-axis (20 000 m)
+ dt: Size of each timestep (90 s)
+ g: Gravitational accelleration (9.81 m/s^2)
+ """
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ context,
+ h0, hu0, hv0,
+ nx, ny,
+ dx, dy,
+ g,
+ cfl_scale=0.9,
+ boundary_conditions=BoundaryCondition(),
+ block_width=16, block_height=16):
+
+ # Call super constructor
+ super().__init__(context,
+ nx, ny,
+ dx, dy,
+ boundary_conditions,
+ cfl_scale,
+ 1,
+ block_width, block_height);
+ self.g = np.float32(g)
+
+ #Get kernels
+# module = context.get_module("cuda/SWE2D_HLL.cu",
+# defines={
+# 'BLOCK_WIDTH': self.block_size[0],
+# 'BLOCK_HEIGHT': self.block_size[1]
+# },
+# compile_args={
+# 'no_extern_c': True,
+# 'options': ["--use_fast_math"],
+# },
+# jit_compile_args={})
+# self.kernel = module.get_function("HLLKernel")
+# self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
+
+ kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_HLL.cu.hip'))
+ with open(kernel_file_path, 'r') as file:
+ kernel_source = file.read()
+
+ prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"HLLKernel", 0, [], []))
+
+ props = hip.hipDeviceProp_t()
+ hip_check(hip.hipGetDeviceProperties(props,0))
+ arch = props.gcnArchName
+
+ print(f"Compiling kernel .HLLKernel. for {arch}")
+
+ cflags = [b"--offload-arch="+arch]
+ err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
+ if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
+ log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
+ log = bytearray(log_size)
+ hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
+ raise RuntimeError(log.decode())
+ code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
+ code = bytearray(code_size)
+ hip_check(hiprtc.hiprtcGetCode(prog, code))
+ module = hip_check(hip.hipModuleLoadData(code))
+
+ kernel = hip_check(hip.hipModuleGetFunction(module, b"HLLKernel"))
+
+ #Create data by uploading to device
+ self.u0 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 1, 1,
+ [h0, hu0, hv0])
+ self.u1 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 1, 1,
+ [None, None, None])
+ #self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
+ data_h = np.empty(self.grid_size, dtype=np.float32)
+ num_bytes = data_h.size * data_h.itemsize
+ self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=self.grid_size)
+
+ dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
+ dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
+ dt = min(dt_x, dt_y)
+ self.cfl_data.fill(dt, stream=self.stream)
+
+ def substep(self, dt, step_number):
+# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.cfl_data.gpudata)
+ #launch kernel
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *self.grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ self.cfl_data
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+
+ self.u0, self.u1 = self.u1, self.u0
+
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--Launching Kernel .HLLKernel. is ok")
+
+ def getOutput(self):
+ return self.u0
+
+ def check(self):
+ self.u0.check()
+ self.u1.check()
+
+ # computing min with hipblas: the output is an index
+ def min_hipblas(self, num_elements, cfl_data, stream):
+ num_bytes = num_elements * np.dtype(np.float32).itemsize
+ num_bytes_i = np.dtype(np.int32).itemsize
+ indx_d = hip_check(hip.hipMalloc(num_bytes_i))
+ indx_h = np.zeros(1, dtype=np.int32)
+ x_temp = np.zeros(num_elements, dtype=np.float32)
+
+ #print("--size.data:", cfl_data.size)
+ handle = hip_check(hipblas.hipblasCreate())
+
+ #hip_check(hipblas.hipblasGetStream(handle, stream))
+ #"incx" [int] specifies the increment for the elements of x. incx must be > 0.
+ hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
+
+ # destruction of handle
+ hip_check(hipblas.hipblasDestroy(handle))
+
+ # copy result (stored in indx_d) back to the host (store in indx_h)
+ hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ #hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
+ hip_check(hip.hipStreamSynchronize(stream))
+
+ min_value = x_temp.flatten()[indx_h[0]-1]
+
+ # clean up
+ hip_check(hip.hipStreamDestroy(stream))
+ hip_check(hip.hipFree(cfl_data))
+ return min_value
+
+ def computeDt(self):
+ #max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
+ max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
+ return max_dt*0.5
diff --git a/GPUSimulators/HLL2.py b/GPUSimulators/HLL2.py
new file mode 100644
index 0000000..b5c0dc0
--- /dev/null
+++ b/GPUSimulators/HLL2.py
@@ -0,0 +1,247 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the 2nd order HLL flux
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+#Import packages we need
+from GPUSimulators import Simulator, Common
+from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
+import numpy as np
+import ctypes
+
+#from pycuda import gpuarray
+from hip import hip,hiprtc
+
+
+
+
+
+
+
+"""
+Class that solves the SW equations using the Forward-Backward linear scheme
+"""
+class HLL2 (Simulator.BaseSimulator):
+
+ """
+ Initialization routine
+ h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
+ hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
+ hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
+ nx: Number of cells along x-axis
+ ny: Number of cells along y-axis
+ dx: Grid cell spacing along x-axis (20 000 m)
+ dy: Grid cell spacing along y-axis (20 000 m)
+ dt: Size of each timestep (90 s)
+ g: Gravitational accelleration (9.81 m/s^2)
+ """
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ context,
+ h0, hu0, hv0,
+ nx, ny,
+ dx, dy,
+ g,
+ theta=1.8,
+ cfl_scale=0.9,
+ boundary_conditions=BoundaryCondition(),
+ block_width=16, block_height=16):
+
+ # Call super constructor
+ super().__init__(context,
+ nx, ny,
+ dx, dy,
+ boundary_conditions,
+ cfl_scale,
+ 2,
+ block_width, block_height);
+ self.g = np.float32(g)
+ self.theta = np.float32(theta)
+
+ #Get kernels
+# module = context.get_module("cuda/SWE2D_HLL2.cu",
+# defines={
+# 'BLOCK_WIDTH': self.block_size[0],
+# 'BLOCK_HEIGHT': self.block_size[1]
+# },
+# compile_args={
+# 'no_extern_c': True,
+# 'options': ["--use_fast_math"],
+# },
+# jit_compile_args={})
+# self.kernel = module.get_function("HLL2Kernel")
+# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
+
+ kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_HLL2.cu.hip'))
+ with open(kernel_file_path, 'r') as file:
+ kernel_source = file.read()
+
+ prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"HLL2Kernel", 0, [], []))
+
+ props = hip.hipDeviceProp_t()
+ hip_check(hip.hipGetDeviceProperties(props,0))
+ arch = props.gcnArchName
+
+ print(f"Compiling kernel .HLL2Kernel. for {arch}")
+
+ cflags = [b"--offload-arch="+arch]
+ err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
+ if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
+ log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
+ log = bytearray(log_size)
+ hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
+ raise RuntimeError(log.decode())
+ code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
+ code = bytearray(code_size)
+ hip_check(hiprtc.hiprtcGetCode(prog, code))
+ module = hip_check(hip.hipModuleLoadData(code))
+
+ kernel = hip_check(hip.hipModuleGetFunction(module, b"HLL2Kernel"))
+
+ #Create data by uploading to device
+ self.u0 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 2, 2,
+ [h0, hu0, hv0])
+ self.u1 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 2, 2,
+ [None, None, None])
+ #self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
+ data_h = np.empty(self.grid_size, dtype=np.float32)
+ num_bytes = data_h.size * data_h.itemsize
+ self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=self.grid_size)
+
+ dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
+ dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
+ dt = min(dt_x, dt_y)
+ self.cfl_data.fill(dt, stream=self.stream)
+
+ def substep(self, dt, step_number):
+ self.substepDimsplit(dt*0.5, step_number)
+
+ def substepDimsplit(self, dt, substep):
+# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.theta,
+# substep,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.cfl_data.gpudata)
+
+ #launch kernel
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *self.grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_float(self.theta),
+ ctypes.c_int(substep),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ self.cfl_data
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+ self.u0, self.u1 = self.u1, self.u0
+
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--Launching Kernel .HLL2Kernel. is ok")
+
+ def getOutput(self):
+ return self.u0
+
+ def check(self):
+ self.u0.check()
+ self.u1.check()
+
+ # computing min with hipblas: the output is an index
+ def min_hipblas(self, num_elements, cfl_data, stream):
+ num_bytes = num_elements * np.dtype(np.float32).itemsize
+ num_bytes_i = np.dtype(np.int32).itemsize
+ indx_d = hip_check(hip.hipMalloc(num_bytes_i))
+ indx_h = np.zeros(1, dtype=np.int32)
+ x_temp = np.zeros(num_elements, dtype=np.float32)
+
+ #print("--size.data:", cfl_data.size)
+ handle = hip_check(hipblas.hipblasCreate())
+
+ #hip_check(hipblas.hipblasGetStream(handle, stream))
+ #"incx" [int] specifies the increment for the elements of x. incx must be > 0.
+ hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
+
+ # destruction of handle
+ hip_check(hipblas.hipblasDestroy(handle))
+
+ # copy result (stored in indx_d) back to the host (store in indx_h)
+ hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ #hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
+ hip_check(hip.hipStreamSynchronize(stream))
+
+ min_value = x_temp.flatten()[indx_h[0]-1]
+
+ # clean up
+ hip_check(hip.hipStreamDestroy(stream))
+ hip_check(hip.hipFree(cfl_data))
+ return min_value
+
+ def computeDt(self):
+ #max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
+ max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
+ return max_dt*0.5
+
diff --git a/GPUSimulators/IPythonMagic.py b/GPUSimulators/IPythonMagic.py
new file mode 100644
index 0000000..fa452df
--- /dev/null
+++ b/GPUSimulators/IPythonMagic.py
@@ -0,0 +1,193 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements helpers for IPython / Jupyter and CUDA
+
+Copyright (C) 2018 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+import logging
+import gc
+
+from IPython.core import magic_arguments
+from IPython.core.magic import line_magic, Magics, magics_class
+import pycuda.driver as cuda
+
+from GPUSimulators import Common, CudaContext
+
+
+@magics_class
+class MagicCudaContext(Magics):
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ 'name', type=str, help='Name of context to create')
+ @magic_arguments.argument(
+ '--blocking', '-b', action="store_true", help='Enable blocking context')
+ @magic_arguments.argument(
+ '--no_cache', '-nc', action="store_true", help='Disable caching of kernels')
+ @magic_arguments.argument(
+ '--no_autotuning', '-na', action="store_true", help='Disable autotuning of kernels')
+ def cuda_context_handler(self, line):
+ args = magic_arguments.parse_argstring(self.cuda_context_handler, line)
+ self.logger = logging.getLogger(__name__)
+
+ self.logger.info("Registering %s in user workspace", args.name)
+
+ context_flags = None
+ if (args.blocking):
+ context_flags = cuda.ctx_flags.SCHED_BLOCKING_SYNC
+
+ if args.name in self.shell.user_ns.keys():
+ self.logger.debug("Context already registered! Ignoring")
+ return
+ else:
+ self.logger.debug("Creating context")
+ use_cache = False if args.no_cache else True
+ use_autotuning = False if args.no_autotuning else True
+ self.shell.user_ns[args.name] = CudaContext.CudaContext(context_flags=context_flags, use_cache=use_cache, autotuning=use_autotuning)
+
+ # this function will be called on exceptions in any cell
+ def custom_exc(shell, etype, evalue, tb, tb_offset=None):
+ self.logger.exception("Exception caught: Resetting to CUDA context %s", args.name)
+ while (cuda.Context.get_current() != None):
+ context = cuda.Context.get_current()
+ self.logger.info("Popping <%s>", str(context.handle))
+ cuda.Context.pop()
+
+ if args.name in self.shell.user_ns.keys():
+ self.logger.info("Pushing <%s>", str(self.shell.user_ns[args.name].cuda_context.handle))
+ self.shell.user_ns[args.name].cuda_context.push()
+ else:
+ self.logger.error("No CUDA context called %s found (something is wrong)", args.name)
+ self.logger.error("CUDA will not work now")
+
+ self.logger.debug("==================================================================")
+
+ # still show the error within the notebook, don't just swallow it
+ shell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)
+
+ # this registers a custom exception handler for the whole current notebook
+ get_ipython().set_custom_exc((Exception,), custom_exc)
+
+
+ # Handle CUDA context when exiting python
+ import atexit
+ def exitfunc():
+ self.logger.info("Exitfunc: Resetting CUDA context stack")
+ while (cuda.Context.get_current() != None):
+ context = cuda.Context.get_current()
+ self.logger.info("`-> Popping <%s>", str(context.handle))
+ cuda.Context.pop()
+ self.logger.debug("==================================================================")
+ atexit.register(exitfunc)
+
+
+
+
+
+
+
+
+@magics_class
+class MagicLogger(Magics):
+ logger_initialized = False
+
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ 'name', type=str, help='Name of context to create')
+ @magic_arguments.argument(
+ '--out', '-o', type=str, default='output.log', help='The filename to store the log to')
+ @magic_arguments.argument(
+ '--level', '-l', type=int, default=20, help='The level of logging to screen [0, 50]')
+ @magic_arguments.argument(
+ '--file_level', '-f', type=int, default=10, help='The level of logging to file [0, 50]')
+ def setup_logging(self, line):
+ if (self.logger_initialized):
+ logging.getLogger('GPUSimulators').info("Global logger already initialized!")
+ return;
+ else:
+ self.logger_initialized = True
+
+ args = magic_arguments.parse_argstring(self.setup_logging, line)
+ import sys
+
+ #Get root logger
+ logger = logging.getLogger('GPUSimulators')
+ logger.setLevel(min(args.level, args.file_level))
+
+ #Add log to screen
+ ch = logging.StreamHandler()
+ ch.setLevel(args.level)
+ logger.addHandler(ch)
+ logger.log(args.level, "Console logger using level %s", logging.getLevelName(args.level))
+
+ #Get the outfilename (try to evaluate if Python expression...)
+ try:
+ outfile = eval(args.out, self.shell.user_global_ns, self.shell.user_ns)
+ except:
+ outfile = args.out
+
+ #Add log to file
+ logger.log(args.level, "File logger using level %s to %s", logging.getLevelName(args.file_level), outfile)
+
+ fh = logging.FileHandler(outfile)
+ formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
+ fh.setFormatter(formatter)
+ fh.setLevel(args.file_level)
+ logger.addHandler(fh)
+
+ logger.info("Python version %s", sys.version)
+ self.shell.user_ns[args.name] = logger
+
+
+
+
+
+
+@magics_class
+class MagicMPI(Magics):
+
+ @line_magic
+ @magic_arguments.magic_arguments()
+ @magic_arguments.argument(
+ 'name', type=str, help='Name of context to create')
+ @magic_arguments.argument(
+ '--num_engines', '-n', type=int, default=4, help='Number of engines to start')
+ def setup_mpi(self, line):
+ args = magic_arguments.parse_argstring(self.setup_mpi, line)
+ logger = logging.getLogger('GPUSimulators')
+ if args.name in self.shell.user_ns.keys():
+ logger.warning("MPI alreay set up, resetting")
+ self.shell.user_ns[args.name].shutdown()
+ self.shell.user_ns[args.name] = None
+ gc.collect()
+ self.shell.user_ns[args.name] = Common.IPEngine(args.num_engines)
+
+
+
+
+
+
+
+
+# Register
+ip = get_ipython()
+ip.register_magics(MagicCudaContext)
+ip.register_magics(MagicLogger)
+ip.register_magics(MagicMPI)
+
diff --git a/GPUSimulators/KP07.py b/GPUSimulators/KP07.py
new file mode 100644
index 0000000..93ce5e9
--- /dev/null
+++ b/GPUSimulators/KP07.py
@@ -0,0 +1,252 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the Kurganov-Petrova numerical scheme
+for the shallow water equations, described in
+A. Kurganov & Guergana Petrova
+A Second-Order Well-Balanced Positivity Preserving Central-Upwind
+Scheme for the Saint-Venant System Communications in Mathematical
+Sciences, 5 (2007), 133-160.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+#Import packages we need
+from GPUSimulators import Simulator, Common
+from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
+import numpy as np
+import ctypes
+
+#from pycuda import gpuarray
+from hip import hip,hiprtc
+
+
+
+"""
+Class that solves the SW equations using the Forward-Backward linear scheme
+"""
+class KP07 (Simulator.BaseSimulator):
+
+ """
+ Initialization routine
+ h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
+ hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
+ hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
+ nx: Number of cells along x-axis
+ ny: Number of cells along y-axis
+ dx: Grid cell spacing along x-axis (20 000 m)
+ dy: Grid cell spacing along y-axis (20 000 m)
+ dt: Size of each timestep (90 s)
+ g: Gravitational accelleration (9.81 m/s^2)
+ """
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ context,
+ h0, hu0, hv0,
+ nx, ny,
+ dx, dy,
+ g,
+ theta=1.3,
+ cfl_scale=0.9,
+ order=2,
+ boundary_conditions=BoundaryCondition(),
+ block_width=16, block_height=16):
+
+ # Call super constructor
+ super().__init__(context,
+ nx, ny,
+ dx, dy,
+ boundary_conditions,
+ cfl_scale,
+ order,
+ block_width, block_height);
+ self.g = np.float32(g)
+ self.theta = np.float32(theta)
+ self.order = np.int32(order)
+
+ #Get kernels
+# module = context.get_module("cuda/SWE2D_KP07.cu",
+# defines={
+# 'BLOCK_WIDTH': self.block_size[0],
+# 'BLOCK_HEIGHT': self.block_size[1]
+# },
+# compile_args={
+# 'no_extern_c': True,
+# 'options': ["--use_fast_math"],
+# },
+# jit_compile_args={})
+# self.kernel = module.get_function("KP07Kernel")
+# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
+
+ kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_KP07.cu.hip'))
+ with open(kernel_file_path, 'r') as file:
+ kernel_source = file.read()
+
+ prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07Kernel", 0, [], []))
+
+ props = hip.hipDeviceProp_t()
+ hip_check(hip.hipGetDeviceProperties(props,0))
+ arch = props.gcnArchName
+
+ print(f"Compiling kernel .KP07Kernel. for {arch}")
+
+ cflags = [b"--offload-arch="+arch]
+ err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
+ if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
+ log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
+ log = bytearray(log_size)
+ hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
+ raise RuntimeError(log.decode())
+ code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
+ code = bytearray(code_size)
+ hip_check(hiprtc.hiprtcGetCode(prog, code))
+ module = hip_check(hip.hipModuleLoadData(code))
+
+ kernel = hip_check(hip.hipModuleGetFunction(module, b"KP07Kernel"))
+
+ #Create data by uploading to device
+ self.u0 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 2, 2,
+ [h0, hu0, hv0])
+ self.u1 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 2, 2,
+ [None, None, None])
+ #self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
+ data_h = np.empty(self.grid_size, dtype=np.float32)
+ num_bytes = data_h.size * data_h.itemsize
+ self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=self.grid_size)
+
+ dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
+ dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
+ dt = min(dt_x, dt_y)
+ self.cfl_data.fill(dt, stream=self.stream)
+
+
+ def substep(self, dt, step_number):
+ self.substepRK(dt, step_number)
+
+
+ def substepRK(self, dt, substep):
+# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.theta,
+# Simulator.stepOrderToCodedInt(step=substep, order=self.order),
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.cfl_data.gpudata)
+
+ #launch kernel
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *self.grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_float(self.theta),
+ Simulator.stepOrderToCodedInt(step=substep, order=self.order),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ self.cfl_data
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+
+ self.u0, self.u1 = self.u1, self.u0
+
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--Launching Kernel .KP07Kernel. is ok")
+
+ def getOutput(self):
+ return self.u0
+
+ def check(self):
+ self.u0.check()
+ self.u1.check()
+
+ # computing min with hipblas: the output is an index
+ def min_hipblas(self, num_elements, cfl_data, stream):
+ num_bytes = num_elements * np.dtype(np.float32).itemsize
+ num_bytes_i = np.dtype(np.int32).itemsize
+ indx_d = hip_check(hip.hipMalloc(num_bytes_i))
+ indx_h = np.zeros(1, dtype=np.int32)
+ x_temp = np.zeros(num_elements, dtype=np.float32)
+
+ #print("--size.data:", cfl_data.size)
+ handle = hip_check(hipblas.hipblasCreate())
+
+ #hip_check(hipblas.hipblasGetStream(handle, stream))
+ #"incx" [int] specifies the increment for the elements of x. incx must be > 0.
+ hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
+
+ # destruction of handle
+ hip_check(hipblas.hipblasDestroy(handle))
+
+ # copy result (stored in indx_d) back to the host (store in indx_h)
+ hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ #hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
+ hip_check(hip.hipStreamSynchronize(stream))
+
+ min_value = x_temp.flatten()[indx_h[0]-1]
+
+ # clean up
+ hip_check(hip.hipStreamDestroy(stream))
+ hip_check(hip.hipFree(cfl_data))
+ return min_value
+
+ def computeDt(self):
+ max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
+ #max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
+ return max_dt*0.5**(self.order-1)
diff --git a/GPUSimulators/KP07_dimsplit.py b/GPUSimulators/KP07_dimsplit.py
new file mode 100644
index 0000000..0a5cfc7
--- /dev/null
+++ b/GPUSimulators/KP07_dimsplit.py
@@ -0,0 +1,251 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the Kurganov-Petrova numerical scheme
+for the shallow water equations, described in
+A. Kurganov & Guergana Petrova
+A Second-Order Well-Balanced Positivity Preserving Central-Upwind
+Scheme for the Saint-Venant System Communications in Mathematical
+Sciences, 5 (2007), 133-160.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+#Import packages we need
+from GPUSimulators import Simulator, Common
+from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
+import numpy as np
+import ctypes
+
+#from pycuda import gpuarray
+from hip import hip,hiprtc
+
+
+
+
+"""
+Class that solves the SW equations using the dimentionally split KP07 scheme
+"""
+class KP07_dimsplit(Simulator.BaseSimulator):
+
+ """
+ Initialization routine
+ h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
+ hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
+ hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
+ nx: Number of cells along x-axis
+ ny: Number of cells along y-axis
+ dx: Grid cell spacing along x-axis (20 000 m)
+ dy: Grid cell spacing along y-axis (20 000 m)
+ dt: Size of each timestep (90 s)
+ g: Gravitational accelleration (9.81 m/s^2)
+ """
+
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ context,
+ h0, hu0, hv0,
+ nx, ny,
+ dx, dy,
+ g,
+ theta=1.3,
+ cfl_scale=0.9,
+ boundary_conditions=BoundaryCondition(),
+ block_width=16, block_height=16):
+
+ # Call super constructor
+ super().__init__(context,
+ nx, ny,
+ dx, dy,
+ boundary_conditions,
+ cfl_scale,
+ 2,
+ block_width, block_height)
+ self.gc_x = 2
+ self.gc_y = 2
+ self.g = np.float32(g)
+ self.theta = np.float32(theta)
+
+ #Get kernels
+# module = context.get_module("cuda/SWE2D_KP07_dimsplit.cu",
+# defines={
+# 'BLOCK_WIDTH': self.block_size[0],
+# 'BLOCK_HEIGHT': self.block_size[1]
+# },
+# compile_args={
+# 'no_extern_c': True,
+# 'options': ["--use_fast_math"],
+# },
+# jit_compile_args={})
+# self.kernel = module.get_function("KP07DimsplitKernel")
+# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
+
+ kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_KP07_dimsplit.cu.hip'))
+ with open(kernel_file_path, 'r') as file:
+ kernel_source = file.read()
+
+ prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07DimsplitKernel", 0, [], []))
+
+ props = hip.hipDeviceProp_t()
+ hip_check(hip.hipGetDeviceProperties(props,0))
+ arch = props.gcnArchName
+
+ print(f"Compiling kernel .KP07DimsplitKernel. for {arch}")
+
+ cflags = [b"--offload-arch="+arch]
+ err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
+ if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
+ log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
+ log = bytearray(log_size)
+ hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
+ raise RuntimeError(log.decode())
+ code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
+ code = bytearray(code_size)
+ hip_check(hiprtc.hiprtcGetCode(prog, code))
+ module = hip_check(hip.hipModuleLoadData(code))
+
+ kernel = hip_check(hip.hipModuleGetFunction(module, b"KP07DimsplitKernel"))
+
+ #Create data by uploading to device
+ self.u0 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ self.gc_x, self.gc_y,
+ [h0, hu0, hv0])
+ self.u1 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ self.gc_x, self.gc_y,
+ [None, None, None])
+ #self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
+ data_h = np.empty(self.grid_size, dtype=np.float32)
+ num_bytes = data_h.size * data_h.itemsize
+ self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=self.grid_size)
+
+ dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
+ dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
+ dt = min(dt_x, dt_y)
+ self.cfl_data.fill(dt, stream=self.stream)
+
+ def substep(self, dt, step_number):
+ self.substepDimsplit(dt*0.5, step_number)
+
+ def substepDimsplit(self, dt, substep):
+# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.theta,
+# substep,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.cfl_data.gpudata)
+
+ #launch kernel
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *self.grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_float(self.theta),
+ ctypes.c_int(substep)
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ self.cfl_data
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+
+ self.u0, self.u1 = self.u1, self.u0
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--Launching Kernel .KP07DimsplitKernel. is ok")
+
+ def getOutput(self):
+ return self.u0
+
+ def check(self):
+ self.u0.check()
+ self.u1.check()
+
+ # computing min with hipblas: the output is an index
+ def min_hipblas(self, num_elements, cfl_data, stream):
+ num_bytes = num_elements * np.dtype(np.float32).itemsize
+ num_bytes_i = np.dtype(np.int32).itemsize
+ indx_d = hip_check(hip.hipMalloc(num_bytes_i))
+ indx_h = np.zeros(1, dtype=np.int32)
+ x_temp = np.zeros(num_elements, dtype=np.float32)
+
+ #print("--size.data:", cfl_data.size)
+ handle = hip_check(hipblas.hipblasCreate())
+
+ #hip_check(hipblas.hipblasGetStream(handle, stream))
+ #"incx" [int] specifies the increment for the elements of x. incx must be > 0.
+ hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
+
+ # destruction of handle
+ hip_check(hipblas.hipblasDestroy(handle))
+
+ # copy result (stored in indx_d) back to the host (store in indx_h)
+ hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ #hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
+ hip_check(hip.hipStreamSynchronize(stream))
+
+ min_value = x_temp.flatten()[indx_h[0]-1]
+
+ # clean up
+ hip_check(hip.hipStreamDestroy(stream))
+ hip_check(hip.hipFree(cfl_data))
+ return min_value
+
+ def computeDt(self):
+ #max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
+ max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
+ return max_dt*0.5
diff --git a/GPUSimulators/LxF.py b/GPUSimulators/LxF.py
new file mode 100644
index 0000000..98e54c6
--- /dev/null
+++ b/GPUSimulators/LxF.py
@@ -0,0 +1,238 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the classical Lax-Friedrichs numerical
+scheme for the shallow water equations
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+#Import packages we need
+from GPUSimulators import Simulator, Common
+from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
+import numpy as np
+import ctypes
+
+#from pycuda import gpuarray
+from hip import hip,hiprtc
+
+
+
+
+
+"""
+Class that solves the SW equations using the Lax Friedrichs scheme
+"""
+class LxF (Simulator.BaseSimulator):
+
+ """
+ Initialization routine
+ h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
+ hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
+ hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
+ nx: Number of cells along x-axis
+ ny: Number of cells along y-axis
+ dx: Grid cell spacing along x-axis (20 000 m)
+ dy: Grid cell spacing along y-axis (20 000 m)
+ dt: Size of each timestep (90 s)
+ g: Gravitational accelleration (9.81 m/s^2)
+ """
+
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ context,
+ h0, hu0, hv0,
+ nx, ny,
+ dx, dy,
+ g,
+ cfl_scale=0.9,
+ boundary_conditions=BoundaryCondition(),
+ block_width=16, block_height=16):
+
+ # Call super constructor
+ super().__init__(context,
+ nx, ny,
+ dx, dy,
+ boundary_conditions,
+ cfl_scale,
+ 1,
+ block_width, block_height);
+ self.g = np.float32(g)
+
+ # Get kernels
+# module = context.get_module("cuda/SWE2D_LxF.cu",
+# defines={
+# 'BLOCK_WIDTH': self.block_size[0],
+# 'BLOCK_HEIGHT': self.block_size[1]
+# },
+# compile_args={
+# 'no_extern_c': True,
+# 'options': ["--use_fast_math"],
+# },
+# jit_compile_args={})
+# self.kernel = module.get_function("LxFKernel")
+# self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
+
+ kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_LxF.cu.hip'))
+ with open(kernel_file_path, 'r') as file:
+ kernel_source = file.read()
+
+ prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"LxFKernel", 0, [], []))
+
+ props = hip.hipDeviceProp_t()
+ hip_check(hip.hipGetDeviceProperties(props,0))
+ arch = props.gcnArchName
+
+ print(f"Compiling kernel .LxFKernel. for {arch}")
+
+ cflags = [b"--offload-arch="+arch]
+ err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
+ if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
+ log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
+ log = bytearray(log_size)
+ hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
+ raise RuntimeError(log.decode())
+ code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
+ code = bytearray(code_size)
+ hip_check(hiprtc.hiprtcGetCode(prog, code))
+ module = hip_check(hip.hipModuleLoadData(code))
+
+ kernel = hip_check(hip.hipModuleGetFunction(module, b"LxFKernel"))
+
+ #Create data by uploading to device
+ self.u0 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 1, 1,
+ [h0, hu0, hv0])
+ self.u1 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 1, 1,
+ [None, None, None])
+ #self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
+ data_h = np.empty(self.grid_size, dtype=np.float32)
+ num_bytes = data_h.size * data_h.itemsize
+ self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=self.grid_size)
+
+ dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
+ dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
+ dt = min(dt_x, dt_y)
+ self.cfl_data.fill(dt, stream=self.stream)
+
+ def substep(self, dt, step_number):
+# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.cfl_data.gpudata)
+
+ #launch kernel
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *self.grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ self.cfl_data
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+
+ self.u0, self.u1 = self.u1, self.u0
+
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--Launching Kernel .LxFKernel. is ok")
+
+ def getOutput(self):
+ return self.u0
+
+ def check(self):
+ self.u0.check()
+ self.u1.check()
+
+ # computing min with hipblas: the output is an index
+ def min_hipblas(self, num_elements, cfl_data, stream):
+ num_bytes = num_elements * np.dtype(np.float32).itemsize
+ num_bytes_i = np.dtype(np.int32).itemsize
+ indx_d = hip_check(hip.hipMalloc(num_bytes_i))
+ indx_h = np.zeros(1, dtype=np.int32)
+ x_temp = np.zeros(num_elements, dtype=np.float32)
+
+ #print("--size.data:", cfl_data.size)
+ handle = hip_check(hipblas.hipblasCreate())
+
+ #hip_check(hipblas.hipblasGetStream(handle, stream))
+ #"incx" [int] specifies the increment for the elements of x. incx must be > 0.
+ hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
+
+ # destruction of handle
+ hip_check(hipblas.hipblasDestroy(handle))
+
+ # copy result (stored in indx_d) back to the host (store in indx_h)
+ hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ #hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
+ hip_check(hip.hipStreamSynchronize(stream))
+
+ min_value = x_temp.flatten()[indx_h[0]-1]
+
+ # clean up
+ hip_check(hip.hipStreamDestroy(stream))
+ hip_check(hip.hipFree(cfl_data))
+ return min_value
+
+ def computeDt(self):
+ #max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
+ max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
+ return max_dt*0.5
diff --git a/GPUSimulators/MPISimulator.py b/GPUSimulators/MPISimulator.py
new file mode 100644
index 0000000..f13de52
--- /dev/null
+++ b/GPUSimulators/MPISimulator.py
@@ -0,0 +1,535 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements MPI simulator class
+
+Copyright (C) 2018 SINTEF Digital
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+
+import logging
+from GPUSimulators import Simulator
+import numpy as np
+from mpi4py import MPI
+import time
+
+#import pycuda.driver as cuda
+#import nvtx
+from hip import hip, hiprtc
+
+
+class MPIGrid(object):
+ """
+ Class which represents an MPI grid of nodes. Facilitates easy communication between
+ neighboring nodes
+ """
+ def __init__(self, comm, ndims=2):
+ self.logger = logging.getLogger(__name__)
+
+ assert ndims == 2, "Unsupported number of dimensions. Must be two at the moment"
+ assert comm.size >= 1, "Must have at least one node"
+
+ self.grid = MPIGrid.getGrid(comm.size, ndims)
+ self.comm = comm
+
+ self.logger.debug("Created MPI grid: {:}. Rank {:d} has coordinate {:}".format(
+ self.grid, self.comm.rank, self.getCoordinate()))
+
+ def getCoordinate(self, rank=None):
+ if (rank is None):
+ rank = self.comm.rank
+ i = (rank % self.grid[0])
+ j = (rank // self.grid[0])
+ return i, j
+
+ def getRank(self, i, j):
+ return j*self.grid[0] + i
+
+ def getEast(self):
+ i, j = self.getCoordinate(self.comm.rank)
+ i = (i+1) % self.grid[0]
+ return self.getRank(i, j)
+
+ def getWest(self):
+ i, j = self.getCoordinate(self.comm.rank)
+ i = (i+self.grid[0]-1) % self.grid[0]
+ return self.getRank(i, j)
+
+ def getNorth(self):
+ i, j = self.getCoordinate(self.comm.rank)
+ j = (j+1) % self.grid[1]
+ return self.getRank(i, j)
+
+ def getSouth(self):
+ i, j = self.getCoordinate(self.comm.rank)
+ j = (j+self.grid[1]-1) % self.grid[1]
+ return self.getRank(i, j)
+
+ def getGrid(num_nodes, num_dims):
+ assert(isinstance(num_nodes, int))
+ assert(isinstance(num_dims, int))
+
+ # Adapted from https://stackoverflow.com/questions/28057307/factoring-a-number-into-roughly-equal-factors
+ # Original code by https://stackoverflow.com/users/3928385/ishamael
+ # Factorizes a number into n roughly equal factors
+
+ #Dictionary to remember already computed permutations
+ memo = {}
+ def dp(n, left): # returns tuple (cost, [factors])
+ """
+ Recursively searches through all factorizations
+ """
+
+ #Already tried: return existing result
+ if (n, left) in memo:
+ return memo[(n, left)]
+
+ #Spent all factors: return number itself
+ if left == 1:
+ return (n, [n])
+
+ #Find new factor
+ i = 2
+ best = n
+ bestTuple = [n]
+ while i * i < n:
+ #If factor found
+ if n % i == 0:
+ #Factorize remainder
+ rem = dp(n // i, left - 1)
+
+ #If new permutation better, save it
+ if rem[0] + i < best:
+ best = rem[0] + i
+ bestTuple = [i] + rem[1]
+ i += 1
+
+ #Store calculation
+ memo[(n, left)] = (best, bestTuple)
+ return memo[(n, left)]
+
+
+ grid = dp(num_nodes, num_dims)[1]
+
+ if (len(grid) < num_dims):
+ #Split problematic 4
+ if (4 in grid):
+ grid.remove(4)
+ grid.append(2)
+ grid.append(2)
+
+ #Pad with ones to guarantee num_dims
+ grid = grid + [1]*(num_dims - len(grid))
+
+ #Sort in descending order
+ grid = np.sort(grid)
+ grid = grid[::-1]
+
+ # XXX: We only use vertical (north-south) partitioning for now
+ grid[0] = 1
+ grid[1] = num_nodes
+
+ return grid
+
+
+ def gather(self, data, root=0):
+ out_data = None
+ if (self.comm.rank == root):
+ out_data = np.empty([self.comm.size] + list(data.shape), dtype=data.dtype)
+ self.comm.Gather(data, out_data, root)
+ return out_data
+
+ def getLocalRank(self):
+ """
+ Returns the local rank on this node for this MPI process
+ """
+
+ # This function has been adapted from
+ # https://github.com/SheffieldML/PyDeepGP/blob/master/deepgp/util/parallel.py
+ # by Zhenwen Dai released under BSD 3-Clause "New" or "Revised" License:
+ #
+ # Copyright (c) 2016, Zhenwen Dai
+ # All rights reserved.
+ #
+ # Redistribution and use in source and binary forms, with or without
+ # modification, are permitted provided that the following conditions are met:
+ #
+ # * Redistributions of source code must retain the above copyright notice, this
+ # list of conditions and the following disclaimer.
+ #
+ # * Redistributions in binary form must reproduce the above copyright notice,
+ # this list of conditions and the following disclaimer in the documentation
+ # and/or other materials provided with the distribution.
+ #
+ # * Neither the name of DGP nor the names of its
+ # contributors may be used to endorse or promote products derived from
+ # this software without specific prior written permission.
+ #
+ # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
+ # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
+ # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
+ # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ #Get this ranks unique (physical) node name
+ node_name = MPI.Get_processor_name()
+
+ #Gather the list of all node names on all nodes
+ node_names = self.comm.allgather(node_name)
+
+ #Loop over all node names up until our rank
+ #and count how many duplicates of our nodename we find
+ local_rank = len([0 for name in node_names[:self.comm.rank] if name==node_name])
+
+ return local_rank
+
+
+class MPISimulator(Simulator.BaseSimulator):
+ """
+ Class which handles communication between simulators on different MPI nodes
+ """
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self, sim, grid):
+ self.profiling_data_mpi = { 'start': {}, 'end': {} }
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange"] = 0
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange"] = 0
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_download"] = 0
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_download"] = 0
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_upload"] = 0
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_upload"] = 0
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] = 0
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] = 0
+ self.profiling_data_mpi["start"]["t_mpi_step"] = 0
+ self.profiling_data_mpi["end"]["t_mpi_step"] = 0
+ self.profiling_data_mpi["n_time_steps"] = 0
+ self.logger = logging.getLogger(__name__)
+
+ autotuner = sim.context.autotuner
+ sim.context.autotuner = None;
+ boundary_conditions = sim.getBoundaryConditions()
+ super().__init__(sim.context,
+ sim.nx, sim.ny,
+ sim.dx, sim.dy,
+ boundary_conditions,
+ sim.cfl_scale,
+ sim.num_substeps,
+ sim.block_size[0], sim.block_size[1])
+ sim.context.autotuner = autotuner
+
+ self.sim = sim
+ self.grid = grid
+
+ #Get neighbor node ids
+ self.east = grid.getEast()
+ self.west = grid.getWest()
+ self.north = grid.getNorth()
+ self.south = grid.getSouth()
+
+ #Get coordinate of this node
+ #and handle global boundary conditions
+ new_boundary_conditions = Simulator.BoundaryCondition({
+ 'north': Simulator.BoundaryCondition.Type.Dirichlet,
+ 'south': Simulator.BoundaryCondition.Type.Dirichlet,
+ 'east': Simulator.BoundaryCondition.Type.Dirichlet,
+ 'west': Simulator.BoundaryCondition.Type.Dirichlet
+ })
+ gi, gj = grid.getCoordinate()
+ #print("gi: " + str(gi) + ", gj: " + str(gj))
+ if (gi == 0 and boundary_conditions.west != Simulator.BoundaryCondition.Type.Periodic):
+ self.west = None
+ new_boundary_conditions.west = boundary_conditions.west;
+ if (gj == 0 and boundary_conditions.south != Simulator.BoundaryCondition.Type.Periodic):
+ self.south = None
+ new_boundary_conditions.south = boundary_conditions.south;
+ if (gi == grid.grid[0]-1 and boundary_conditions.east != Simulator.BoundaryCondition.Type.Periodic):
+ self.east = None
+ new_boundary_conditions.east = boundary_conditions.east;
+ if (gj == grid.grid[1]-1 and boundary_conditions.north != Simulator.BoundaryCondition.Type.Periodic):
+ self.north = None
+ new_boundary_conditions.north = boundary_conditions.north;
+ sim.setBoundaryConditions(new_boundary_conditions)
+
+ #Get number of variables
+ self.nvars = len(self.getOutput().gpu_variables)
+
+ #Shorthands for computing extents and sizes
+ gc_x = int(self.sim.getOutput()[0].x_halo)
+ gc_y = int(self.sim.getOutput()[0].y_halo)
+ nx = int(self.sim.nx)
+ ny = int(self.sim.ny)
+
+ #Set regions for ghost cells to read from
+ #These have the format [x0, y0, width, height]
+ self.read_e = np.array([ nx, 0, gc_x, ny + 2*gc_y])
+ self.read_w = np.array([gc_x, 0, gc_x, ny + 2*gc_y])
+ self.read_n = np.array([gc_x, ny, nx, gc_y])
+ self.read_s = np.array([gc_x, gc_y, nx, gc_y])
+
+ #Set regions for ghost cells to write to
+ self.write_e = self.read_e + np.array([gc_x, 0, 0, 0])
+ self.write_w = self.read_w - np.array([gc_x, 0, 0, 0])
+ self.write_n = self.read_n + np.array([0, gc_y, 0, 0])
+ self.write_s = self.read_s - np.array([0, gc_y, 0, 0])
+
+ #Allocate data for receiving
+ #Note that east and west also transfer ghost cells
+ #whilst north/south only transfer internal cells
+ #Reuses the width/height defined in the read-extets above
+ ##self.in_e = cuda.pagelocked_empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32) #np.empty((self.nvars, self.read_e[3], self.read_e[2]), dtype=np.float32)
+
+ ##self.in_w = cuda.pagelocked_empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32) #np.empty((self.nvars, self.read_w[3], self.read_w[2]), dtype=np.float32)
+ ##self.in_n = cuda.pagelocked_empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32) #np.empty((self.nvars, self.read_n[3], self.read_n[2]), dtype=np.float32)
+ ##self.in_s = cuda.pagelocked_empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32) #np.empty((self.nvars, self.read_s[3], self.read_s[2]), dtype=np.float32)
+
+ self.in_e = np.empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32)
+ num_bytes_e = self.in_e.size * self.in_e.itemsize
+ #hipHostMalloc allocates pinned host memory which is mapped into the address space of all GPUs in the system, the memory can be accessed directly by the GPU device
+ #hipHostMallocDefault:Memory is mapped and portable (default allocation)
+ #hipHostMallocPortable: memory is explicitely portable across different devices
+ self.in_e = hip_check(hip.hipHostMalloc(num_bytes_e,hip.hipHostMallocPortable))
+
+ self.in_w = np.empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32)
+ num_bytes_w = self.in_w.size * self.in_w.itemsize
+ self.in_w = hip_check(hip.hipHostMalloc(num_bytes_w,hip.hipHostMallocPortable))
+
+ self.in_n = np.empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32)
+ num_bytes_n = self.in_n.size * self.in_n.itemsize
+ self.in_n = hip_check(hip.hipHostMalloc(num_bytes_n,hip.hipHostMallocPortable))
+
+ self.in_s = np.empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32)
+ num_bytes_s = self.in_s.size * self.in_s.itemsize
+ self.in_s = hip_check(hip.hipHostMalloc(num_bytes_s,hip.hipHostMallocPortable))
+
+ #Allocate data for sending
+ #self.out_e = cuda.pagelocked_empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32) #np.empty_like(self.in_e)
+ #self.out_w = cuda.pagelocked_empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32) #np.empty_like(self.in_w)
+ #self.out_n = cuda.pagelocked_empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32) #np.empty_like(self.in_n)
+ #self.out_s = cuda.pagelocked_empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32) #np.empty_like(self.in_s)
+
+ self.out_e = np.empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32)
+ num_bytes_e = self.out_e.size * self.out_e.itemsize
+ self.out_e = hip_check(hip.hipHostMalloc(num_bytes_e,hip.hipHostMallocPortable))
+
+ self.out_w = np.empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32)
+ num_bytes_w = self.out_w.size * self.out_w.itemsize
+ self.out_w = hip_check(hip.hipHostMalloc(num_bytes_w,hip.hipHostMallocPortable))
+
+ self.out_n = np.empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32)
+ num_bytes_n = self.out_n.size * self.out_n.itemsize
+ self.out_n = hip_check(hip.hipHostMalloc(num_bytes_n,hip.hipHostMallocPortable))
+
+ self.out_s = np.empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32)
+ num_bytes_s = self.out_s.size * self.out_s.itemsize
+ self.out_s = hip_check(hip.hipHostMalloc(num_bytes_s,hip.hipHostMallocPortable))
+
+
+ self.logger.debug("Simlator rank {:d} initialized on {:s}".format(self.grid.comm.rank, MPI.Get_processor_name()))
+
+ self.full_exchange()
+ sim.context.synchronize()
+
+ def substep(self, dt, step_number):
+
+ #nvtx.mark("substep start", color="yellow")
+
+ self.profiling_data_mpi["start"]["t_mpi_step"] += time.time()
+
+ #nvtx.mark("substep external", color="blue")
+ self.sim.substep(dt, step_number, external=True, internal=False) # only "internal ghost cells"
+
+ #nvtx.mark("substep internal", color="red")
+ self.sim.substep(dt, step_number, internal=True, external=False) # "internal ghost cells" excluded
+
+ #nvtx.mark("substep full", color="blue")
+ #self.sim.substep(dt, step_number, external=True, internal=True)
+
+ self.sim.swapBuffers()
+
+ self.profiling_data_mpi["end"]["t_mpi_step"] += time.time()
+
+ #nvtx.mark("exchange", color="blue")
+ self.full_exchange()
+
+ #nvtx.mark("sync start", color="blue")
+ #self.sim.stream.synchronize()
+ #self.sim.internal_stream.synchronize()
+ hip_check(hip.hipStreamSynchronize(self.sim.stream))
+ hip_check(hip.hipStreamSynchronize(self.sim.internal_stream))
+ #nvtx.mark("sync end", color="blue")
+
+ self.profiling_data_mpi["n_time_steps"] += 1
+
+ def getOutput(self):
+ return self.sim.getOutput()
+
+ def synchronize(self):
+ self.sim.synchronize()
+
+ def check(self):
+ return self.sim.check()
+
+ def computeDt(self):
+ local_dt = np.array([np.float32(self.sim.computeDt())]);
+ global_dt = np.empty(1, dtype=np.float32)
+ self.grid.comm.Allreduce(local_dt, global_dt, op=MPI.MIN)
+ self.logger.debug("Local dt: {:f}, global dt: {:f}".format(local_dt[0], global_dt[0]))
+ return global_dt[0]
+
+
+ def getExtent(self):
+ """
+ Function which returns the extent of node with rank
+ rank in the grid
+ """
+ width = self.sim.nx*self.sim.dx
+ height = self.sim.ny*self.sim.dy
+ i, j = self.grid.getCoordinate()
+ x0 = i * width
+ y0 = j * height
+ x1 = x0 + width
+ y1 = y0 + height
+ return [x0, x1, y0, y1]
+
+ def full_exchange(self):
+ ####
+ # First transfer internal cells north-south
+ ####
+
+ #Download from the GPU
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_download"] += time.time()
+
+ if self.north is not None:
+ for k in range(self.nvars):
+ self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_n[k,:,:], asynch=True, extent=self.read_n)
+ if self.south is not None:
+ for k in range(self.nvars):
+ self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_s[k,:,:], asynch=True, extent=self.read_s)
+ #self.sim.stream.synchronize()
+ hip_check(hip.hipStreamSynchronize(self.sim.stream))
+
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_download"] += time.time()
+
+ #Send/receive to north/south neighbours
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] += time.time()
+
+ comm_send = []
+ comm_recv = []
+ if self.north is not None:
+ comm_send += [self.grid.comm.Isend(self.out_n, dest=self.north, tag=4*self.nt + 0)]
+ comm_recv += [self.grid.comm.Irecv(self.in_n, source=self.north, tag=4*self.nt + 1)]
+ if self.south is not None:
+ comm_send += [self.grid.comm.Isend(self.out_s, dest=self.south, tag=4*self.nt + 1)]
+ comm_recv += [self.grid.comm.Irecv(self.in_s, source=self.south, tag=4*self.nt + 0)]
+
+ #Wait for incoming transfers to complete
+ for comm in comm_recv:
+ comm.wait()
+
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] += time.time()
+
+ #Upload to the GPU
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_upload"] += time.time()
+
+ if self.north is not None:
+ for k in range(self.nvars):
+ self.sim.u0[k].upload(self.sim.stream, self.in_n[k,:,:], extent=self.write_n)
+ if self.south is not None:
+ for k in range(self.nvars):
+ self.sim.u0[k].upload(self.sim.stream, self.in_s[k,:,:], extent=self.write_s)
+
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_upload"] += time.time()
+
+ #Wait for sending to complete
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] += time.time()
+
+ for comm in comm_send:
+ comm.wait()
+
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] += time.time()
+
+ ####
+ # Then transfer east-west including ghost cells that have been filled in by north-south transfer above
+ ####
+
+ #Download from the GPU
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_download"] += time.time()
+
+ if self.east is not None:
+ for k in range(self.nvars):
+ self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_e[k,:,:], asynch=True, extent=self.read_e)
+ if self.west is not None:
+ for k in range(self.nvars):
+ self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_w[k,:,:], asynch=True, extent=self.read_w)
+ #self.sim.stream.synchronize()
+ hip_check(hip.hipStreamSynchronize(self.sim.stream))
+
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_download"] += time.time()
+
+ #Send/receive to east/west neighbours
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] += time.time()
+
+ comm_send = []
+ comm_recv = []
+ if self.east is not None:
+ comm_send += [self.grid.comm.Isend(self.out_e, dest=self.east, tag=4*self.nt + 2)]
+ comm_recv += [self.grid.comm.Irecv(self.in_e, source=self.east, tag=4*self.nt + 3)]
+ if self.west is not None:
+ comm_send += [self.grid.comm.Isend(self.out_w, dest=self.west, tag=4*self.nt + 3)]
+ comm_recv += [self.grid.comm.Irecv(self.in_w, source=self.west, tag=4*self.nt + 2)]
+
+ #Wait for incoming transfers to complete
+ for comm in comm_recv:
+ comm.wait()
+
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] += time.time()
+
+ #Upload to the GPU
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_upload"] += time.time()
+
+ if self.east is not None:
+ for k in range(self.nvars):
+ self.sim.u0[k].upload(self.sim.stream, self.in_e[k,:,:], extent=self.write_e)
+ if self.west is not None:
+ for k in range(self.nvars):
+ self.sim.u0[k].upload(self.sim.stream, self.in_w[k,:,:], extent=self.write_w)
+
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_upload"] += time.time()
+
+ #Wait for sending to complete
+ self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] += time.time()
+
+ for comm in comm_send:
+ comm.wait()
+
+ self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] += time.time()
diff --git a/GPUSimulators/SHMEMSimulator.py b/GPUSimulators/SHMEMSimulator.py
new file mode 100644
index 0000000..cfee8f3
--- /dev/null
+++ b/GPUSimulators/SHMEMSimulator.py
@@ -0,0 +1,266 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements SHMEM simulator group class
+
+Copyright (C) 2020 Norwegian Meteorological Institute
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+
+import logging
+from GPUSimulators import Simulator, CudaContext
+import numpy as np
+
+#import pycuda.driver as cuda
+from hip import hip, hiprtc
+
+import time
+
+class SHMEMSimulator(Simulator.BaseSimulator):
+ """
+ Class which handles communication and synchronization between simulators in different
+ contexts (presumably on different GPUs)
+ """
+ def __init__(self, sims, grid):
+ self.logger = logging.getLogger(__name__)
+
+ assert(len(sims) > 1)
+
+ self.sims = sims
+
+ # XXX: This is not what was intended. Do we need extra wrapper class SHMEMSimulator?
+ # See also getOutput() and check().
+ #
+ # SHMEMSimulatorGroup would then not have any superclass, but manage a collection of
+ # SHMEMSimulators that have BaseSimulator as a superclass.
+ #
+ # This would also eliminate the need for all the array bookkeeping in this class.
+ autotuner = sims[0].context.autotuner
+ sims[0].context.autotuner = None
+ boundary_conditions = sims[0].getBoundaryConditions()
+ super().__init__(sims[0].context,
+ sims[0].nx, sims[0].ny,
+ sims[0].dx, sims[0].dy,
+ boundary_conditions,
+ sims[0].cfl_scale,
+ sims[0].num_substeps,
+ sims[0].block_size[0], sims[0].block_size[1])
+ sims[0].context.autotuner = autotuner
+
+ self.sims = sims
+ self.grid = grid
+
+ self.east = [None] * len(self.sims)
+ self.west = [None] * len(self.sims)
+ self.north = [None] * len(self.sims)
+ self.south = [None] * len(self.sims)
+
+ self.nvars = [None] * len(self.sims)
+
+ self.read_e = [None] * len(self.sims)
+ self.read_w = [None] * len(self.sims)
+ self.read_n = [None] * len(self.sims)
+ self.read_s = [None] * len(self.sims)
+
+ self.write_e = [None] * len(self.sims)
+ self.write_w = [None] * len(self.sims)
+ self.write_n = [None] * len(self.sims)
+ self.write_s = [None] * len(self.sims)
+
+ self.e = [None] * len(self.sims)
+ self.w = [None] * len(self.sims)
+ self.n = [None] * len(self.sims)
+ self.s = [None] * len(self.sims)
+
+ for i, sim in enumerate(self.sims):
+ #Get neighbor subdomain ids
+ self.east[i] = grid.getEast(i)
+ self.west[i] = grid.getWest(i)
+ self.north[i] = grid.getNorth(i)
+ self.south[i] = grid.getSouth(i)
+
+ #Get coordinate of this subdomain
+ #and handle global boundary conditions
+ new_boundary_conditions = Simulator.BoundaryCondition({
+ 'north': Simulator.BoundaryCondition.Type.Dirichlet,
+ 'south': Simulator.BoundaryCondition.Type.Dirichlet,
+ 'east': Simulator.BoundaryCondition.Type.Dirichlet,
+ 'west': Simulator.BoundaryCondition.Type.Dirichlet
+ })
+ gi, gj = grid.getCoordinate(i)
+ if (gi == 0 and boundary_conditions.west != Simulator.BoundaryCondition.Type.Periodic):
+ self.west = None
+ new_boundary_conditions.west = boundary_conditions.west;
+ if (gj == 0 and boundary_conditions.south != Simulator.BoundaryCondition.Type.Periodic):
+ self.south = None
+ new_boundary_conditions.south = boundary_conditions.south;
+ if (gi == grid.grid[0]-1 and boundary_conditions.east != Simulator.BoundaryCondition.Type.Periodic):
+ self.east = None
+ new_boundary_conditions.east = boundary_conditions.east;
+ if (gj == grid.grid[1]-1 and boundary_conditions.north != Simulator.BoundaryCondition.Type.Periodic):
+ self.north = None
+ new_boundary_conditions.north = boundary_conditions.north;
+ sim.setBoundaryConditions(new_boundary_conditions)
+
+ #Get number of variables
+ self.nvars[i] = len(sim.getOutput().gpu_variables)
+
+ #Shorthands for computing extents and sizes
+ gc_x = int(sim.getOutput()[0].x_halo)
+ gc_y = int(sim.getOutput()[0].y_halo)
+ nx = int(sim.nx)
+ ny = int(sim.ny)
+
+ #Set regions for ghost cells to read from
+ #These have the format [x0, y0, width, height]
+ self.read_e[i] = np.array([ nx, 0, gc_x, ny + 2*gc_y])
+ self.read_w[i] = np.array([gc_x, 0, gc_x, ny + 2*gc_y])
+ self.read_n[i] = np.array([gc_x, ny, nx, gc_y])
+ self.read_s[i] = np.array([gc_x, gc_y, nx, gc_y])
+
+ #Set regions for ghost cells to write to
+ self.write_e[i] = self.read_e[i] + np.array([gc_x, 0, 0, 0])
+ self.write_w[i] = self.read_w[i] - np.array([gc_x, 0, 0, 0])
+ self.write_n[i] = self.read_n[i] + np.array([0, gc_y, 0, 0])
+ self.write_s[i] = self.read_s[i] - np.array([0, gc_y, 0, 0])
+
+ #Allocate host data
+ #Note that east and west also transfer ghost cells
+ #whilst north/south only transfer internal cells
+ #Reuses the width/height defined in the read-extets above
+ self.e[i] = np.empty((self.nvars[i], self.read_e[i][3], self.read_e[i][2]), dtype=np.float32)
+ self.w[i] = np.empty((self.nvars[i], self.read_w[i][3], self.read_w[i][2]), dtype=np.float32)
+ self.n[i] = np.empty((self.nvars[i], self.read_n[i][3], self.read_n[i][2]), dtype=np.float32)
+ self.s[i] = np.empty((self.nvars[i], self.read_s[i][3], self.read_s[i][2]), dtype=np.float32)
+
+ self.logger.debug("Initialized {:d} subdomains".format(len(self.sims)))
+
+
+ def substep(self, dt, step_number):
+ self.exchange()
+
+ for i, sim in enumerate(self.sims):
+ sim.substep(dt, step_number)
+
+ def getOutput(self):
+ # XXX: Does not return what we would expect.
+ # Returns first subdomain, but we want the whole domain.
+ return self.sims[0].getOutput()
+
+ def synchronize(self):
+ for sim in self.sims:
+ sim.synchronize()
+
+ def check(self):
+ # XXX: Does not return what we would expect.
+ # Checks only first subdomain, but we want to check the whole domain.
+ return self.sims[0].check()
+
+ def computeDt(self):
+ global_dt = float("inf")
+
+ for sim in self.sims:
+ sim.context.synchronize()
+
+ for sim in self.sims:
+ local_dt = sim.computeDt()
+ if local_dt < global_dt:
+ global_dt = local_dt
+ self.logger.debug("Local dt: {:f}".format(local_dt))
+
+ self.logger.debug("Global dt: {:f}".format(global_dt))
+ return global_dt
+
+ def getExtent(self, index=0):
+ """
+ Function which returns the extent of the subdomain with index
+ index in the grid
+ """
+ width = self.sims[index].nx*self.sims[index].dx
+ height = self.sims[index].ny*self.sims[index].dy
+ i, j = self.grid.getCoordinate(index)
+ x0 = i * width
+ y0 = j * height
+ x1 = x0 + width
+ y1 = y0 + height
+ return [x0, x1, y0, y1]
+
+ def exchange(self):
+ ####
+ # First transfer internal cells north-south
+ ####
+ for i in range(len(self.sims)):
+ self.ns_download(i)
+
+ for i in range(len(self.sims)):
+ self.ns_upload(i)
+
+ ####
+ # Then transfer east-west including ghost cells that have been filled in by north-south transfer above
+ ####
+ for i in range(len(self.sims)):
+ self.ew_download(i)
+
+ for i in range(len(self.sims)):
+ self.ew_upload(i)
+
+ def ns_download(self, i):
+ #Download from the GPU
+ if self.north[i] is not None:
+ for k in range(self.nvars[i]):
+ # XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the north)
+ self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.n[i][k,:,:], extent=self.read_n[i])
+ if self.south[i] is not None:
+ for k in range(self.nvars[i]):
+ # XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the south)
+ self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.s[i][k,:,:], extent=self.read_s[i])
+ #self.sims[i].stream.synchronize()
+ hip_check(hip.hipStreamSynchronize(self.sims[i].stream))
+
+ def ns_upload(self, i):
+ #Upload to the GPU
+ if self.north[i] is not None:
+ for k in range(self.nvars[i]):
+ self.sims[i].u0[k].upload(self.sims[i].stream, self.s[self.north[i]][k,:,:], extent=self.write_n[i])
+ if self.south[i] is not None:
+ for k in range(self.nvars[i]):
+ self.sims[i].u0[k].upload(self.sims[i].stream, self.n[self.south[i]][k,:,:], extent=self.write_s[i])
+
+ def ew_download(self, i):
+ #Download from the GPU
+ if self.east[i] is not None:
+ for k in range(self.nvars[i]):
+ # XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the east)
+ self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.e[i][k,:,:], extent=self.read_e[i])
+ if self.west[i] is not None:
+ for k in range(self.nvars[i]):
+ # XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the west)
+ self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.w[i][k,:,:], extent=self.read_w[i])
+ #self.sims[i].stream.synchronize()
+ hip_check(hip.hipStreamSynchronize(self.sims[i].stream))
+
+ def ew_upload(self, i):
+ #Upload to the GPU
+ if self.east[i] is not None:
+ for k in range(self.nvars[i]):
+ self.sims[i].u0[k].upload(self.sims[i].stream, self.w[self.east[i]][k,:,:], extent=self.write_e[i])
+ #test_east = np.ones_like(self.e[self.east[i]][k,:,:])
+ #self.sims[i].u0[k].upload(self.sims[i].stream, test_east, extent=self.write_e[i])
+ if self.west[i] is not None:
+ for k in range(self.nvars[i]):
+ self.sims[i].u0[k].upload(self.sims[i].stream, self.e[self.west[i]][k,:,:], extent=self.write_w[i])
+ #test_west = np.ones_like(self.e[self.west[i]][k,:,:])
+ #self.sims[i].u0[k].upload(self.sims[i].stream, test_west, extent=self.write_w[i])
diff --git a/GPUSimulators/SHMEMSimulatorGroup.py b/GPUSimulators/SHMEMSimulatorGroup.py
new file mode 100644
index 0000000..c9dc30f
--- /dev/null
+++ b/GPUSimulators/SHMEMSimulatorGroup.py
@@ -0,0 +1,413 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements SHMEM simulator group class
+
+Copyright (C) 2020 Norwegian Meteorological Institute
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+
+import logging
+from GPUSimulators import Simulator, CudaContext
+import numpy as np
+
+#import pycuda.driver as cuda
+from hip import hip, hiprtc
+
+import time
+
+class SHMEMGrid(object):
+ """
+ Class which represents an SHMEM grid of GPUs. Facilitates easy communication between
+ neighboring subdomains in the grid. Contains one CUDA context per subdomain.
+ """
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self, ngpus=None, ndims=2):
+ self.logger = logging.getLogger(__name__)
+
+ #cuda.init(flags=0)
+ self.logger.info("Initializing HIP")
+ #num_cuda_devices = cuda.Device.count()
+ num_cuda_devices = hip_check(hip.hipGetDeviceCount())
+
+ if ngpus is None:
+ ngpus = num_cuda_devices
+
+ # XXX: disabled for testing on single-GPU system
+ #assert ngpus <= num_cuda_devices, "Trying to allocate more GPUs than are available in the system."
+ #assert ngpus >= 2, "Must have at least two GPUs available to run multi-GPU simulations."
+
+ assert ndims == 2, "Unsupported number of dimensions. Must be two at the moment"
+
+ self.ngpus = ngpus
+ self.ndims = ndims
+
+ self.grid = SHMEMGrid.getGrid(self.ngpus, self.ndims)
+
+ self.logger.debug("Created {:}-dimensional SHMEM grid, using {:} GPUs".format(
+ self.ndims, self.ngpus))
+
+ # XXX: Is this a natural place to store the contexts? Consider moving contexts out of this
+ # class, into notebook / calling script (shmemTesting.py)
+ self.cuda_contexts = []
+
+ for i in range(self.ngpus):
+ # XXX: disabled for testing on single-GPU system
+ #self.cuda_contexts.append(CudaContext.CudaContext(device=i, autotuning=False))
+ self.cuda_contexts.append(CudaContext.CudaContext(device=0, autotuning=False))
+
+ def getCoordinate(self, index):
+ i = (index % self.grid[0])
+ j = (index // self.grid[0])
+ return i, j
+
+ def getIndex(self, i, j):
+ return j*self.grid[0] + i
+
+ def getEast(self, index):
+ i, j = self.getCoordinate(index)
+ i = (i+1) % self.grid[0]
+ return self.getIndex(i, j)
+
+ def getWest(self, index):
+ i, j = self.getCoordinate(index)
+ i = (i+self.grid[0]-1) % self.grid[0]
+ return self.getIndex(i, j)
+
+ def getNorth(self, index):
+ i, j = self.getCoordinate(index)
+ j = (j+1) % self.grid[1]
+ return self.getIndex(i, j)
+
+ def getSouth(self, index):
+ i, j = self.getCoordinate(index)
+ j = (j+self.grid[1]-1) % self.grid[1]
+ return self.getIndex(i, j)
+
+ def getGrid(num_gpus, num_dims):
+ assert(isinstance(num_gpus, int))
+ assert(isinstance(num_dims, int))
+
+ # Adapted from https://stackoverflow.com/questions/28057307/factoring-a-number-into-roughly-equal-factors
+ # Original code by https://stackoverflow.com/users/3928385/ishamael
+ # Factorizes a number into n roughly equal factors
+
+ #Dictionary to remember already computed permutations
+ memo = {}
+ def dp(n, left): # returns tuple (cost, [factors])
+ """
+ Recursively searches through all factorizations
+ """
+
+ #Already tried: return existing result
+ if (n, left) in memo:
+ return memo[(n, left)]
+
+ #Spent all factors: return number itself
+ if left == 1:
+ return (n, [n])
+
+ #Find new factor
+ i = 2
+ best = n
+ bestTuple = [n]
+ while i * i < n:
+ #If factor found
+ if n % i == 0:
+ #Factorize remainder
+ rem = dp(n // i, left - 1)
+
+ #If new permutation better, save it
+ if rem[0] + i < best:
+ best = rem[0] + i
+ bestTuple = [i] + rem[1]
+ i += 1
+
+ #Store calculation
+ memo[(n, left)] = (best, bestTuple)
+ return memo[(n, left)]
+
+
+ grid = dp(num_gpus, num_dims)[1]
+
+ if (len(grid) < num_dims):
+ #Split problematic 4
+ if (4 in grid):
+ grid.remove(4)
+ grid.append(2)
+ grid.append(2)
+
+ #Pad with ones to guarantee num_dims
+ grid = grid + [1]*(num_dims - len(grid))
+
+ #Sort in descending order
+ grid = np.sort(grid)
+ grid = grid[::-1]
+
+ return grid
+
+class SHMEMSimulatorGroup(object):
+ """
+ Class which handles communication and synchronization between simulators in different
+ contexts (typically on different GPUs)
+ """
+ def __init__(self, sims, grid):
+ self.logger = logging.getLogger(__name__)
+
+ assert(len(sims) > 1)
+
+ self.sims = sims
+
+ # XXX: This is not what was intended. Do we need extra wrapper class SHMEMSimulator?
+ # See also getOutput() and check().
+ #
+ # SHMEMSimulatorGroup would then not have any superclass, but manage a collection of
+ # SHMEMSimulators that have BaseSimulator as a superclass.
+ #
+ # This would also eliminate the need for all the array bookkeeping in this class.
+ #
+ CONT HERE! Model shmemTesting after mpiTesting and divide existing functionality between SHMEMSimulatorGroup and SHMEMSimulator
+
+ autotuner = sims[0].context.autotuner
+ sims[0].context.autotuner = None
+ boundary_conditions = sims[0].getBoundaryConditions()
+ super().__init__(sims[0].context,
+ sims[0].nx, sims[0].ny,
+ sims[0].dx, sims[0].dy,
+ boundary_conditions,
+ sims[0].cfl_scale,
+ sims[0].num_substeps,
+ sims[0].block_size[0], sims[0].block_size[1])
+ sims[0].context.autotuner = autotuner
+
+ self.sims = sims
+ self.grid = grid
+
+ self.east = [None] * len(self.sims)
+ self.west = [None] * len(self.sims)
+ self.north = [None] * len(self.sims)
+ self.south = [None] * len(self.sims)
+
+ self.nvars = [None] * len(self.sims)
+
+ self.read_e = [None] * len(self.sims)
+ self.read_w = [None] * len(self.sims)
+ self.read_n = [None] * len(self.sims)
+ self.read_s = [None] * len(self.sims)
+
+ self.write_e = [None] * len(self.sims)
+ self.write_w = [None] * len(self.sims)
+ self.write_n = [None] * len(self.sims)
+ self.write_s = [None] * len(self.sims)
+
+ self.e = [None] * len(self.sims)
+ self.w = [None] * len(self.sims)
+ self.n = [None] * len(self.sims)
+ self.s = [None] * len(self.sims)
+
+ for i, sim in enumerate(self.sims):
+ #Get neighbor subdomain ids
+ self.east[i] = grid.getEast(i)
+ self.west[i] = grid.getWest(i)
+ self.north[i] = grid.getNorth(i)
+ self.south[i] = grid.getSouth(i)
+
+ #Get coordinate of this subdomain
+ #and handle global boundary conditions
+ new_boundary_conditions = Simulator.BoundaryCondition({
+ 'north': Simulator.BoundaryCondition.Type.Dirichlet,
+ 'south': Simulator.BoundaryCondition.Type.Dirichlet,
+ 'east': Simulator.BoundaryCondition.Type.Dirichlet,
+ 'west': Simulator.BoundaryCondition.Type.Dirichlet
+ })
+ gi, gj = grid.getCoordinate(i)
+ if (gi == 0 and boundary_conditions.west != Simulator.BoundaryCondition.Type.Periodic):
+ self.west = None
+ new_boundary_conditions.west = boundary_conditions.west;
+ if (gj == 0 and boundary_conditions.south != Simulator.BoundaryCondition.Type.Periodic):
+ self.south = None
+ new_boundary_conditions.south = boundary_conditions.south;
+ if (gi == grid.grid[0]-1 and boundary_conditions.east != Simulator.BoundaryCondition.Type.Periodic):
+ self.east = None
+ new_boundary_conditions.east = boundary_conditions.east;
+ if (gj == grid.grid[1]-1 and boundary_conditions.north != Simulator.BoundaryCondition.Type.Periodic):
+ self.north = None
+ new_boundary_conditions.north = boundary_conditions.north;
+ sim.setBoundaryConditions(new_boundary_conditions)
+
+ #Get number of variables
+ self.nvars[i] = len(sim.getOutput().gpu_variables)
+
+ #Shorthands for computing extents and sizes
+ gc_x = int(sim.getOutput()[0].x_halo)
+ gc_y = int(sim.getOutput()[0].y_halo)
+ nx = int(sim.nx)
+ ny = int(sim.ny)
+
+ #Set regions for ghost cells to read from
+ #These have the format [x0, y0, width, height]
+ self.read_e[i] = np.array([ nx, 0, gc_x, ny + 2*gc_y])
+ self.read_w[i] = np.array([gc_x, 0, gc_x, ny + 2*gc_y])
+ self.read_n[i] = np.array([gc_x, ny, nx, gc_y])
+ self.read_s[i] = np.array([gc_x, gc_y, nx, gc_y])
+
+ #Set regions for ghost cells to write to
+ self.write_e[i] = self.read_e[i] + np.array([gc_x, 0, 0, 0])
+ self.write_w[i] = self.read_w[i] - np.array([gc_x, 0, 0, 0])
+ self.write_n[i] = self.read_n[i] + np.array([0, gc_y, 0, 0])
+ self.write_s[i] = self.read_s[i] - np.array([0, gc_y, 0, 0])
+
+ #Allocate host data
+ #Note that east and west also transfer ghost cells
+ #whilst north/south only transfer internal cells
+ #Reuses the width/height defined in the read-extets above
+ self.e[i] = np.empty((self.nvars[i], self.read_e[i][3], self.read_e[i][2]), dtype=np.float32)
+ self.w[i] = np.empty((self.nvars[i], self.read_w[i][3], self.read_w[i][2]), dtype=np.float32)
+ self.n[i] = np.empty((self.nvars[i], self.read_n[i][3], self.read_n[i][2]), dtype=np.float32)
+ self.s[i] = np.empty((self.nvars[i], self.read_s[i][3], self.read_s[i][2]), dtype=np.float32)
+
+ self.logger.debug("Initialized {:d} subdomains".format(len(self.sims)))
+
+
+ def substep(self, dt, step_number):
+ self.exchange()
+
+ for i, sim in enumerate(self.sims):
+ sim.substep(dt, step_number)
+
+ def getOutput(self):
+ # XXX: Does not return what we would expect.
+ # Returns first subdomain, but we want the whole domain.
+ return self.sims[0].getOutput()
+
+ def synchronize(self):
+ for sim in self.sims:
+ sim.synchronize()
+
+ def check(self):
+ # XXX: Does not return what we would expect.
+ # Checks only first subdomain, but we want to check the whole domain.
+ return self.sims[0].check()
+
+ def computeDt(self):
+ global_dt = float("inf")
+
+ for sim in self.sims:
+ sim.context.synchronize()
+
+ for sim in self.sims:
+ local_dt = sim.computeDt()
+ if local_dt < global_dt:
+ global_dt = local_dt
+ self.logger.debug("Local dt: {:f}".format(local_dt))
+
+ self.logger.debug("Global dt: {:f}".format(global_dt))
+ return global_dt
+
+ def getExtent(self, index=0):
+ """
+ Function which returns the extent of the subdomain with index
+ index in the grid
+ """
+ width = self.sims[index].nx*self.sims[index].dx
+ height = self.sims[index].ny*self.sims[index].dy
+ i, j = self.grid.getCoordinate(index)
+ x0 = i * width
+ y0 = j * height
+ x1 = x0 + width
+ y1 = y0 + height
+ return [x0, x1, y0, y1]
+
+ def exchange(self):
+ ####
+ # First transfer internal cells north-south
+ ####
+ for i in range(len(self.sims)):
+ self.ns_download(i)
+
+ for i in range(len(self.sims)):
+ self.ns_upload(i)
+
+ ####
+ # Then transfer east-west including ghost cells that have been filled in by north-south transfer above
+ ####
+ for i in range(len(self.sims)):
+ self.ew_download(i)
+
+ for i in range(len(self.sims)):
+ self.ew_upload(i)
+
+ def ns_download(self, i):
+ #Download from the GPU
+ if self.north[i] is not None:
+ for k in range(self.nvars[i]):
+ # XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the north)
+ self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.n[i][k,:,:], extent=self.read_n[i])
+ if self.south[i] is not None:
+ for k in range(self.nvars[i]):
+ # XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the south)
+ self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.s[i][k,:,:], extent=self.read_s[i])
+ #self.sims[i].stream.synchronize()
+ hip_check(hip.hipStreamSynchronize(self.sims[i].stream))
+
+
+ def ns_upload(self, i):
+ #Upload to the GPU
+ if self.north[i] is not None:
+ for k in range(self.nvars[i]):
+ self.sims[i].u0[k].upload(self.sims[i].stream, self.s[self.north[i]][k,:,:], extent=self.write_n[i])
+ if self.south[i] is not None:
+ for k in range(self.nvars[i]):
+ self.sims[i].u0[k].upload(self.sims[i].stream, self.n[self.south[i]][k,:,:], extent=self.write_s[i])
+
+ def ew_download(self, i):
+ #Download from the GPU
+ if self.east[i] is not None:
+ for k in range(self.nvars[i]):
+ # XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the east)
+ self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.e[i][k,:,:], extent=self.read_e[i])
+ if self.west[i] is not None:
+ for k in range(self.nvars[i]):
+ # XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the west)
+ self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.w[i][k,:,:], extent=self.read_w[i])
+ #self.sims[i].stream.synchronize()
+ hip_check(hip.hipStreamSynchronize(self.sims[i].stream))
+
+ def ew_upload(self, i):
+ #Upload to the GPU
+ if self.east[i] is not None:
+ for k in range(self.nvars[i]):
+ self.sims[i].u0[k].upload(self.sims[i].stream, self.w[self.east[i]][k,:,:], extent=self.write_e[i])
+ #test_east = np.ones_like(self.e[self.east[i]][k,:,:])
+ #self.sims[i].u0[k].upload(self.sims[i].stream, test_east, extent=self.write_e[i])
+ if self.west[i] is not None:
+ for k in range(self.nvars[i]):
+ self.sims[i].u0[k].upload(self.sims[i].stream, self.e[self.west[i]][k,:,:], extent=self.write_w[i])
+ #test_west = np.ones_like(self.e[self.west[i]][k,:,:])
+ #self.sims[i].u0[k].upload(self.sims[i].stream, test_west, extent=self.write_w[i])
diff --git a/GPUSimulators/Simulator.py b/GPUSimulators/Simulator.py
new file mode 100644
index 0000000..b804d79
--- /dev/null
+++ b/GPUSimulators/Simulator.py
@@ -0,0 +1,286 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the classical Lax-Friedrichs numerical
+scheme for the shallow water equations
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+#Import packages we need
+import numpy as np
+import logging
+from enum import IntEnum
+
+#import pycuda.compiler as cuda_compiler
+#import pycuda.gpuarray
+#import pycuda.driver as cuda
+
+from hip import hip, hiprtc
+
+from GPUSimulators import Common
+
+
+class BoundaryCondition(object):
+ """
+ Class for holding boundary conditions for global boundaries
+ """
+
+
+ class Type(IntEnum):
+ """
+ Enum that describes the different types of boundary conditions
+ WARNING: MUST MATCH THAT OF common.h IN CUDA
+ """
+ Dirichlet = 0,
+ Neumann = 1,
+ Periodic = 2,
+ Reflective = 3
+
+ def __init__(self, types={
+ 'north': Type.Reflective,
+ 'south': Type.Reflective,
+ 'east': Type.Reflective,
+ 'west': Type.Reflective
+ }):
+ """
+ Constructor
+ """
+ self.north = types['north']
+ self.south = types['south']
+ self.east = types['east']
+ self.west = types['west']
+
+ if (self.north == BoundaryCondition.Type.Neumann \
+ or self.south == BoundaryCondition.Type.Neumann \
+ or self.east == BoundaryCondition.Type.Neumann \
+ or self.west == BoundaryCondition.Type.Neumann):
+ raise(NotImplementedError("Neumann boundary condition not supported"))
+
+ def __str__(self):
+ return '[north={:s}, south={:s}, east={:s}, west={:s}]'.format(str(self.north), str(self.south), str(self.east), str(self.west))
+
+
+ def asCodedInt(self):
+ """
+ Helper function which packs four boundary conditions into one integer
+ """
+ bc = 0
+ bc = bc | (self.north & 0x0000000F) << 24
+ bc = bc | (self.south & 0x0000000F) << 16
+ bc = bc | (self.east & 0x0000000F) << 8
+ bc = bc | (self.west & 0x0000000F) << 0
+
+ #for t in types:
+ # print("{0:s}, {1:d}, {1:032b}, {1:08b}".format(t, types[t]))
+ #print("bc: {0:032b}".format(bc))
+
+ return np.int32(bc)
+
+ def getTypes(bc):
+ types = {}
+ types['north'] = BoundaryCondition.Type((bc >> 24) & 0x0000000F)
+ types['south'] = BoundaryCondition.Type((bc >> 16) & 0x0000000F)
+ types['east'] = BoundaryCondition.Type((bc >> 8) & 0x0000000F)
+ types['west'] = BoundaryCondition.Type((bc >> 0) & 0x0000000F)
+ return types
+
+
+
+class BaseSimulator(object):
+
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ context,
+ nx, ny,
+ dx, dy,
+ boundary_conditions,
+ cfl_scale,
+ num_substeps,
+ block_width, block_height):
+ """
+ Initialization routine
+ context: GPU context to use
+ kernel_wrapper: wrapper function of GPU kernel
+ h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
+ hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
+ hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
+ nx: Number of cells along x-axis
+ ny: Number of cells along y-axis
+ dx: Grid cell spacing along x-axis (20 000 m)
+ dy: Grid cell spacing along y-axis (20 000 m)
+ dt: Size of each timestep (90 s)
+ cfl_scale: Courant number
+ num_substeps: Number of substeps to perform for a full step
+ """
+ #Get logger
+ self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
+
+ #Save input parameters
+ #Notice that we need to specify them in the correct dataformat for the
+ #GPU kernel
+ self.context = context
+ self.nx = np.int32(nx)
+ self.ny = np.int32(ny)
+ self.dx = np.float32(dx)
+ self.dy = np.float32(dy)
+ self.setBoundaryConditions(boundary_conditions)
+ self.cfl_scale = cfl_scale
+ self.num_substeps = num_substeps
+
+ #Handle autotuning block size
+ if (self.context.autotuner):
+ peak_configuration = self.context.autotuner.get_peak_performance(self.__class__)
+ block_width = int(peak_configuration["block_width"])
+ block_height = int(peak_configuration["block_height"])
+ self.logger.debug("Used autotuning to get block size [%d x %d]", block_width, block_height)
+
+ #Compute kernel launch parameters
+ self.block_size = (block_width, block_height, 1)
+ self.grid_size = (
+ int(np.ceil(self.nx / float(self.block_size[0]))),
+ int(np.ceil(self.ny / float(self.block_size[1])))
+ )
+
+ #Create a CUDA stream
+ #self.stream = cuda.Stream()
+ #self.internal_stream = cuda.Stream()
+ self.stream = hip_check(hip.hipStreamCreate())
+ self.internal_stream = hip_check(hip.hipStreamCreate())
+
+ #Keep track of simulation time and number of timesteps
+ self.t = 0.0
+ self.nt = 0
+
+
+ def __str__(self):
+ return "{:s} [{:d}x{:d}]".format(self.__class__.__name__, self.nx, self.ny)
+
+
+ def simulate(self, t, dt=None):
+ """
+ Function which simulates t_end seconds using the step function
+ Requires that the step() function is implemented in the subclasses
+ """
+
+ printer = Common.ProgressPrinter(t)
+
+ t_start = self.simTime()
+ t_end = t_start + t
+
+ update_dt = True
+ if (dt is not None):
+ update_dt = False
+ self.dt = dt
+
+ while(self.simTime() < t_end):
+ # Update dt every 100 timesteps and cross your fingers it works
+ # for the next 100
+ if (update_dt and (self.simSteps() % 100 == 0)):
+ self.dt = self.computeDt()*self.cfl_scale
+
+ # Compute timestep for "this" iteration (i.e., shorten last timestep)
+ current_dt = np.float32(min(self.dt, t_end-self.simTime()))
+
+ # Stop if end reached (should not happen)
+ if (current_dt <= 0.0):
+ self.logger.warning("Timestep size {:d} is less than or equal to zero!".format(self.simSteps()))
+ break
+
+ # Step forward in time
+ self.step(current_dt)
+
+ #Print info
+ print_string = printer.getPrintString(self.simTime() - t_start)
+ if (print_string):
+ self.logger.info("%s: %s", self, print_string)
+ try:
+ self.check()
+ except AssertionError as e:
+ e.args += ("Step={:d}, time={:f}".format(self.simSteps(), self.simTime()),)
+ raise
+
+
+ def step(self, dt):
+ """
+ Function which performs one single timestep of size dt
+ """
+ for i in range(self.num_substeps):
+ self.substep(dt, i)
+
+ self.t += dt
+ self.nt += 1
+
+ def download(self, variables=None):
+ return self.getOutput().download(self.stream, variables)
+
+ def synchronize(self):
+ #self.stream.synchronize()
+ #Synchronize the stream to ensure operations in the stream is complete
+ hip_check(hip.hipStreamSynchronize(self.stream))
+
+ def simTime(self):
+ return self.t
+
+ def simSteps(self):
+ return self.nt
+
+ def getExtent(self):
+ return [0, 0, self.nx*self.dx, self.ny*self.dy]
+
+ def setBoundaryConditions(self, boundary_conditions):
+ self.logger.debug("Boundary conditions set to {:s}".format(str(boundary_conditions)))
+ self.boundary_conditions = boundary_conditions.asCodedInt()
+
+ def getBoundaryConditions(self):
+ return BoundaryCondition(BoundaryCondition.getTypes(self.boundary_conditions))
+
+ def substep(self, dt, step_number):
+ """
+ Function which performs one single substep with stepsize dt
+ """
+ raise(NotImplementedError("Needs to be implemented in subclass"))
+
+ def getOutput(self):
+ raise(NotImplementedError("Needs to be implemented in subclass"))
+
+ def check(self):
+ self.logger.warning("check() is not implemented - please implement")
+ #raise(NotImplementedError("Needs to be implemented in subclass"))
+
+ def computeDt(self):
+ raise(NotImplementedError("Needs to be implemented in subclass"))
+
+
+
+
+def stepOrderToCodedInt(step, order):
+ """
+ Helper function which packs the step and order into a single integer
+ """
+ step_order = (step << 16) | (order & 0x0000ffff)
+ #print("Step: {0:032b}".format(step))
+ #print("Order: {0:032b}".format(order))
+ #print("Mix: {0:032b}".format(step_order))
+ return np.int32(step_order)
diff --git a/GPUSimulators/WAF.py b/GPUSimulators/WAF.py
new file mode 100644
index 0000000..7e2763c
--- /dev/null
+++ b/GPUSimulators/WAF.py
@@ -0,0 +1,241 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements the Weighted average flux (WAF) described in
+E. Toro, Shock-Capturing methods for free-surface shallow flows, 2001
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+#Import packages we need
+from GPUSimulators import Simulator, Common
+from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
+import numpy as np
+import ctypes
+
+#from pycuda import gpuarray
+from hip import hip,hiprtc
+
+
+
+"""
+Class that solves the SW equations using the Forward-Backward linear scheme
+"""
+class WAF (Simulator.BaseSimulator):
+
+ """
+ Initialization routine
+ h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
+ hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
+ hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
+ nx: Number of cells along x-axis
+ ny: Number of cells along y-axis
+ dx: Grid cell spacing along x-axis (20 000 m)
+ dy: Grid cell spacing along y-axis (20 000 m)
+ dt: Size of each timestep (90 s)
+ g: Gravitational accelleration (9.81 m/s^2)
+ """
+
+ def hip_check(call_result):
+ err = call_result[0]
+ result = call_result[1:]
+ if len(result) == 1:
+ result = result[0]
+ if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
+ raise RuntimeError(str(err))
+ elif (
+ isinstance(err, hiprtc.hiprtcResult)
+ and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
+ ):
+ raise RuntimeError(str(err))
+ return result
+
+ def __init__(self,
+ context,
+ h0, hu0, hv0,
+ nx, ny,
+ dx, dy,
+ g,
+ cfl_scale=0.9,
+ boundary_conditions=BoundaryCondition(),
+ block_width=16, block_height=16):
+
+ # Call super constructor
+ super().__init__(context,
+ nx, ny,
+ dx, dy,
+ boundary_conditions,
+ cfl_scale,
+ 2,
+ block_width, block_height);
+ self.g = np.float32(g)
+
+ #Get kernels
+# module = context.get_module("cuda/SWE2D_WAF.cu",
+# defines={
+# 'BLOCK_WIDTH': self.block_size[0],
+# 'BLOCK_HEIGHT': self.block_size[1]
+# },
+# compile_args={
+# 'no_extern_c': True,
+# 'options': ["--use_fast_math"],
+# },
+# jit_compile_args={})
+# self.kernel = module.get_function("WAFKernel")
+# self.kernel.prepare("iiffffiiPiPiPiPiPiPiP")
+
+ kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_WAF.cu.hip'))
+ with open(kernel_file_path, 'r') as file:
+ kernel_source = file.read()
+
+ prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"WAFKernel", 0, [], []))
+
+ props = hip.hipDeviceProp_t()
+ hip_check(hip.hipGetDeviceProperties(props,0))
+ arch = props.gcnArchName
+
+ print(f"Compiling kernel .WAFKernel. for {arch}")
+
+ cflags = [b"--offload-arch="+arch]
+ err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
+ if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
+ log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
+ log = bytearray(log_size)
+ hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
+ raise RuntimeError(log.decode())
+ code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
+ code = bytearray(code_size)
+ hip_check(hiprtc.hiprtcGetCode(prog, code))
+ module = hip_check(hip.hipModuleLoadData(code))
+
+ kernel = hip_check(hip.hipModuleGetFunction(module, b"WAFKernel"))
+
+ #Create data by uploading to device
+ self.u0 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 2, 2,
+ [h0, hu0, hv0])
+ self.u1 = Common.ArakawaA2D(self.stream,
+ nx, ny,
+ 2, 2,
+ [None, None, None])
+ #self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
+ data_h = np.empty(self.grid_size, dtype=np.float32)
+ num_bytes = data_h.size * data_h.itemsize
+ self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
+ typestr="float32",shape=self.grid_size)
+
+ dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
+ dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
+ dt = min(dt_x, dt_y)
+ self.cfl_data.fill(dt, stream=self.stream)
+
+ def substep(self, dt, step_number):
+ self.substepDimsplit(dt*0.5, step_number)
+
+ def substepDimsplit(self, dt, substep):
+# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
+# self.nx, self.ny,
+# self.dx, self.dy, dt,
+# self.g,
+# substep,
+# self.boundary_conditions,
+# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
+# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
+# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
+# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
+# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
+# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
+# self.cfl_data.gpudata)
+
+ #launch kernel
+ hip_check(
+ hip.hipModuleLaunchKernel(
+ kernel,
+ *self.grid_size,
+ *self.block_size,
+ sharedMemBytes=0,
+ stream=self.stream,
+ kernelParams=None,
+ extra=( # pass kernel's arguments
+ ctypes.c_int(self.nx), ctypes.c_int(self.ny),
+ ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
+ ctypes.c_float(self.g),
+ ctypes.c_int(substep),
+ ctypes.c_int(self.boundary_conditions),
+ ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
+ ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
+ ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
+ ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
+ ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
+ ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
+ self.cfl_data
+ )
+ )
+ )
+
+ hip_check(hip.hipDeviceSynchronize())
+
+ self.u0, self.u1 = self.u1, self.u0
+
+ hip_check(hip.hipModuleUnload(module))
+
+ hip_check(hip.hipFree(cfl_data))
+
+ print("--Launching Kernel .WAFKernel. is ok")
+
+ def getOutput(self):
+ return self.u0
+
+ def check(self):
+ self.u0.check()
+ self.u1.check()
+
+ # computing min with hipblas: the output is an index
+ def min_hipblas(self, num_elements, cfl_data, stream):
+ num_bytes = num_elements * np.dtype(np.float32).itemsize
+ num_bytes_i = np.dtype(np.int32).itemsize
+ indx_d = hip_check(hip.hipMalloc(num_bytes_i))
+ indx_h = np.zeros(1, dtype=np.int32)
+ x_temp = np.zeros(num_elements, dtype=np.float32)
+
+ #print("--size.data:", cfl_data.size)
+ handle = hip_check(hipblas.hipblasCreate())
+
+ #hip_check(hipblas.hipblasGetStream(handle, stream))
+ #"incx" [int] specifies the increment for the elements of x. incx must be > 0.
+ hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
+
+ # destruction of handle
+ hip_check(hipblas.hipblasDestroy(handle))
+
+ # copy result (stored in indx_d) back to the host (store in indx_h)
+ hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
+ #hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
+ hip_check(hip.hipStreamSynchronize(stream))
+
+ min_value = x_temp.flatten()[indx_h[0]-1]
+
+ # clean up
+ hip_check(hip.hipStreamDestroy(stream))
+ hip_check(hip.hipFree(cfl_data))
+ return min_value
+
+ def computeDt(self):
+ #max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
+ max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
+ return max_dt*0.5
diff --git a/GPUSimulators/__init__.py b/GPUSimulators/__init__.py
new file mode 100644
index 0000000..4e04e36
--- /dev/null
+++ b/GPUSimulators/__init__.py
@@ -0,0 +1,5 @@
+#!/bin/env python
+# -*- coding: utf-8 -*-
+
+
+# Nothing general to do
diff --git a/GPUSimulators/__pycache__/MPISimulator.cpython-39.pyc b/GPUSimulators/__pycache__/MPISimulator.cpython-39.pyc
new file mode 100644
index 0000000..da4daac
Binary files /dev/null and b/GPUSimulators/__pycache__/MPISimulator.cpython-39.pyc differ
diff --git a/GPUSimulators/__pycache__/Simulator.cpython-39.pyc b/GPUSimulators/__pycache__/Simulator.cpython-39.pyc
new file mode 100644
index 0000000..dc16706
Binary files /dev/null and b/GPUSimulators/__pycache__/Simulator.cpython-39.pyc differ
diff --git a/GPUSimulators/__pycache__/__init__.cpython-39.pyc b/GPUSimulators/__pycache__/__init__.cpython-39.pyc
new file mode 100644
index 0000000..a966589
Binary files /dev/null and b/GPUSimulators/__pycache__/__init__.cpython-39.pyc differ
diff --git a/GPUSimulators/cuda/EE2D_KP07_dimsplit.cu b/GPUSimulators/cuda/EE2D_KP07_dimsplit.cu
new file mode 100644
index 0000000..238718c
--- /dev/null
+++ b/GPUSimulators/cuda/EE2D_KP07_dimsplit.cu
@@ -0,0 +1,250 @@
+ /*
+This kernel implements the Central Upwind flux function to
+solve the Euler equations
+
+Copyright (C) 2018 SINTEF Digital
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "EulerCommon.h"
+#include "limiters.h"
+
+
+__device__
+void computeFluxF(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float Qx[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float F[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ const float gamma_, const float dx_, const float dt_) {
+ for (int j=threadIdx.y; j( rho0_ptr_, rho0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
+ readBlock(rho_u0_ptr_, rho_u0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
+ readBlock(rho_v0_ptr_, rho_v0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
+ readBlock( E0_ptr_, E0_pitch_, Q[3], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
+
+ //Step 0 => evolve x first, then y
+ if (step_ == 0) {
+ //Compute fluxes along the x axis and evolve
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the y axis and evolve
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Gravity source term
+ if (g_ > 0.0f) {
+ const int i = threadIdx.x + gc_x;
+ const int j = threadIdx.y + gc_y;
+ const float rho_v = Q[2][j][i];
+ Q[2][j][i] -= g_*Q[0][j][i]*dt_;
+ Q[3][j][i] -= g_*rho_v*dt_;
+ __syncthreads();
+ }
+ }
+ //Step 1 => evolve y first, then x
+ else {
+ //Compute fluxes along the y axis and evolve
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the x axis and evolve
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Gravity source term
+ if (g_ > 0.0f) {
+ const int i = threadIdx.x + gc_x;
+ const int j = threadIdx.y + gc_y;
+ const float rho_v = Q[2][j][i];
+ Q[2][j][i] -= g_*Q[0][j][i]*dt_;
+ Q[3][j][i] -= g_*rho_v*dt_;
+ __syncthreads();
+ }
+ }
+
+
+ // Write to main memory for all internal cells
+ writeBlock( rho1_ptr_, rho1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
+ writeBlock(rho_u1_ptr_, rho_u1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
+ writeBlock(rho_v1_ptr_, rho_v1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
+ writeBlock( E1_ptr_, E1_pitch_, Q[3], nx_, ny_, 0, 1, x0, y0, x1, y1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, gamma_, cfl_);
+ }
+}
+
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/EE2D_KP07_dimsplit.cu.hip b/GPUSimulators/cuda/EE2D_KP07_dimsplit.cu.hip
new file mode 100644
index 0000000..67b701b
--- /dev/null
+++ b/GPUSimulators/cuda/EE2D_KP07_dimsplit.cu.hip
@@ -0,0 +1,251 @@
+#include "hip/hip_runtime.h"
+ /*
+This kernel implements the Central Upwind flux function to
+solve the Euler equations
+
+Copyright (C) 2018 SINTEF Digital
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "EulerCommon.h"
+#include "limiters.h"
+
+
+__device__
+void computeFluxF(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float Qx[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float F[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ const float gamma_, const float dx_, const float dt_) {
+ for (int j=threadIdx.y; j( rho0_ptr_, rho0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
+ readBlock(rho_u0_ptr_, rho_u0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
+ readBlock(rho_v0_ptr_, rho_v0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
+ readBlock( E0_ptr_, E0_pitch_, Q[3], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
+
+ //Step 0 => evolve x first, then y
+ if (step_ == 0) {
+ //Compute fluxes along the x axis and evolve
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the y axis and evolve
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Gravity source term
+ if (g_ > 0.0f) {
+ const int i = threadIdx.x + gc_x;
+ const int j = threadIdx.y + gc_y;
+ const float rho_v = Q[2][j][i];
+ Q[2][j][i] -= g_*Q[0][j][i]*dt_;
+ Q[3][j][i] -= g_*rho_v*dt_;
+ __syncthreads();
+ }
+ }
+ //Step 1 => evolve y first, then x
+ else {
+ //Compute fluxes along the y axis and evolve
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the x axis and evolve
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Gravity source term
+ if (g_ > 0.0f) {
+ const int i = threadIdx.x + gc_x;
+ const int j = threadIdx.y + gc_y;
+ const float rho_v = Q[2][j][i];
+ Q[2][j][i] -= g_*Q[0][j][i]*dt_;
+ Q[3][j][i] -= g_*rho_v*dt_;
+ __syncthreads();
+ }
+ }
+
+
+ // Write to main memory for all internal cells
+ writeBlock( rho1_ptr_, rho1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
+ writeBlock(rho_u1_ptr_, rho_u1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
+ writeBlock(rho_v1_ptr_, rho_v1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
+ writeBlock( E1_ptr_, E1_pitch_, Q[3], nx_, ny_, 0, 1, x0, y0, x1, y1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, gamma_, cfl_);
+ }
+}
+
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/EulerCommon.h b/GPUSimulators/cuda/EulerCommon.h
new file mode 100644
index 0000000..cb22a53
--- /dev/null
+++ b/GPUSimulators/cuda/EulerCommon.h
@@ -0,0 +1,187 @@
+/*
+These CUDA functions implement different types of numerical flux
+functions for the shallow water equations
+
+Copyright (C) 2016, 2017, 2018 SINTEF Digital
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+#pragma once
+#include "limiters.h"
+
+
+
+template
+__device__ void writeCfl(float Q[vars][h+2*gc_y][w+2*gc_x],
+ float shmem[h+2*gc_y][w+2*gc_x],
+ const int nx_, const int ny_,
+ const float dx_, const float dy_, const float gamma_,
+ float* output_) {
+ //Index of thread within block
+ const int tx = threadIdx.x + gc_x;
+ const int ty = threadIdx.y + gc_y;
+
+ //Index of cell within domain
+ const int ti = blockDim.x*blockIdx.x + tx;
+ const int tj = blockDim.y*blockIdx.y + ty;
+
+ //Only internal cells
+ if (ti < nx_+gc_x && tj < ny_+gc_y) {
+ const float rho = Q[0][ty][tx];
+ const float u = Q[1][ty][tx] / rho;
+ const float v = Q[2][ty][tx] / rho;
+
+ const float max_u = dx_ / (fabsf(u) + sqrtf(gamma_*rho));
+ const float max_v = dy_ / (fabsf(v) + sqrtf(gamma_*rho));
+
+ shmem[ty][tx] = fminf(max_u, max_v);
+ }
+ __syncthreads();
+
+ //One row of threads loop over all rows
+ if (ti < nx_+gc_x && tj < ny_+gc_y) {
+ if (ty == gc_y) {
+ float min_val = shmem[ty][tx];
+ const int max_y = min(h, ny_+gc_y - tj);
+ for (int j=gc_y; j h_l) ? q_l_tmp : 1.0f;
+ const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
+
+ // Compute wave speed estimates
+ const float S_l = u_l - c_l*q_l;
+ const float S_r = u_r + c_r*q_r;
+
+ //Upwind selection
+ if (S_l >= 0.0f) {
+ return F_func(Q_l, P_l);
+ }
+ else if (S_r <= 0.0f) {
+ return F_func(Q_r, P_r);
+ }
+ //Or estimate flux in the star region
+ else {
+ const float4 F_l = F_func(Q_l, P_l);
+ const float4 F_r = F_func(Q_r, P_r);
+ const float4 flux = (S_r*F_l - S_l*F_r + S_r*S_l*(Q_r - Q_l)) / (S_r-S_l);
+ return flux;
+ }
+}
+
+
+
+
+
+
+
+/**
+ * Central upwind flux function
+ */
+__device__ float4 CentralUpwindFlux(const float4 Qm, const float4 Qp, const float gamma) {
+
+ const float Pp = pressure(Qp, gamma);
+ const float4 Fp = F_func(Qp, Pp);
+ const float up = Qp.y / Qp.x; // rho*u / rho
+ const float cp = sqrt(gamma*Pp/Qp.x); // sqrt(gamma*P/rho)
+
+ const float Pm = pressure(Qm, gamma);
+ const float4 Fm = F_func(Qm, Pm);
+ const float um = Qm.y / Qm.x; // rho*u / rho
+ const float cm = sqrt(gamma*Pm/Qm.x); // sqrt(gamma*P/rho)
+
+ const float am = min(min(um-cm, up-cp), 0.0f); // largest negative wave speed
+ const float ap = max(max(um+cm, up+cp), 0.0f); // largest positive wave speed
+
+ return ((ap*Fm - am*Fp) + ap*am*(Qp-Qm))/(ap-am);
+}
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_FORCE.cu b/GPUSimulators/cuda/SWE2D_FORCE.cu
new file mode 100644
index 0000000..dac46be
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_FORCE.cu
@@ -0,0 +1,143 @@
+/*
+This OpenCL kernel implements the classical Lax-Friedrichs scheme
+for the shallow water equations, with edge fluxes.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+#include "common.h"
+#include "SWECommon.h"
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ const float g_, const float dx_, const float dt_) {
+ //Compute fluxes along the x axis
+ for (int j=threadIdx.y; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+ __syncthreads();
+
+ //Compute flux along x, and evolve
+ computeFluxF(Q, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute flux along y, and evolve
+ computeFluxG(Q, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Write to main memory
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_FORCE.cu.hip b/GPUSimulators/cuda/SWE2D_FORCE.cu.hip
new file mode 100644
index 0000000..aa4e968
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_FORCE.cu.hip
@@ -0,0 +1,144 @@
+#include "hip/hip_runtime.h"
+/*
+This OpenCL kernel implements the classical Lax-Friedrichs scheme
+for the shallow water equations, with edge fluxes.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+#include "common.h"
+#include "SWECommon.h"
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ const float g_, const float dx_, const float dt_) {
+ //Compute fluxes along the x axis
+ for (int j=threadIdx.y; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+ __syncthreads();
+
+ //Compute flux along x, and evolve
+ computeFluxF(Q, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute flux along y, and evolve
+ computeFluxG(Q, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Write to main memory
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_HLL.cu b/GPUSimulators/cuda/SWE2D_HLL.cu
new file mode 100644
index 0000000..3ed6b35
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_HLL.cu
@@ -0,0 +1,161 @@
+/*
+This GPU kernel implements the HLL flux
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "SWECommon.h"
+
+
+
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ const float g_) {
+ for (int j=threadIdx.y; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+ //Compute F flux
+ computeFluxF(Q, F, g_);
+ __syncthreads();
+
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute G flux
+ computeFluxG(Q, F, g_);
+ __syncthreads();
+
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ // Write to main memory for all internal cells
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_HLL.cu.hip b/GPUSimulators/cuda/SWE2D_HLL.cu.hip
new file mode 100644
index 0000000..c2f449d
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_HLL.cu.hip
@@ -0,0 +1,162 @@
+#include "hip/hip_runtime.h"
+/*
+This GPU kernel implements the HLL flux
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "SWECommon.h"
+
+
+
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ const float g_) {
+ for (int j=threadIdx.y; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+ //Compute F flux
+ computeFluxF(Q, F, g_);
+ __syncthreads();
+
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute G flux
+ computeFluxG(Q, F, g_);
+ __syncthreads();
+
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ // Write to main memory for all internal cells
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_HLL2.cu b/GPUSimulators/cuda/SWE2D_HLL2.cu
new file mode 100644
index 0000000..94f92b5
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_HLL2.cu
@@ -0,0 +1,216 @@
+/*
+This OpenCL kernel implements the second order HLL flux
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+#include "common.h"
+#include "SWECommon.h"
+#include "limiters.h"
+
+
+
+
+
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float Qx[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ const float g_, const float dx_, const float dt_) {
+ for (int j=threadIdx.y; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+ //Step 0 => evolve x first, then y
+ if (step_ == 0) {
+ //Compute fluxes along the x axis and evolve
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the y axis and evolve
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+ }
+ //Step 1 => evolve y first, then x
+ else {
+ //Compute fluxes along the y axis and evolve
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the x axis and evolve
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+ }
+
+
+
+
+ // Write to main memory for all internal cells
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_HLL2.cu.hip b/GPUSimulators/cuda/SWE2D_HLL2.cu.hip
new file mode 100644
index 0000000..c0bc9d1
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_HLL2.cu.hip
@@ -0,0 +1,217 @@
+#include "hip/hip_runtime.h"
+/*
+This OpenCL kernel implements the second order HLL flux
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+#include "common.h"
+#include "SWECommon.h"
+#include "limiters.h"
+
+
+
+
+
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float Qx[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ const float g_, const float dx_, const float dt_) {
+ for (int j=threadIdx.y; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+ //Step 0 => evolve x first, then y
+ if (step_ == 0) {
+ //Compute fluxes along the x axis and evolve
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the y axis and evolve
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+ }
+ //Step 1 => evolve y first, then x
+ else {
+ //Compute fluxes along the y axis and evolve
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the x axis and evolve
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+ }
+
+
+
+
+ // Write to main memory for all internal cells
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_KP07.cu b/GPUSimulators/cuda/SWE2D_KP07.cu
new file mode 100644
index 0000000..6fa6154
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_KP07.cu
@@ -0,0 +1,233 @@
+/*
+This OpenCL kernel implements the Kurganov-Petrova numerical scheme
+for the shallow water equations, described in
+A. Kurganov & Guergana Petrova
+A Second-Order Well-Balanced Positivity Preserving Central-Upwind
+Scheme for the Saint-Venant System Communications in Mathematical
+Sciences, 5 (2007), 133-160.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "SWECommon.h"
+#include "limiters.h"
+
+
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
+ const float g_) {
+ //Index of thread within block
+ const int tx = threadIdx.x;
+ const int ty = threadIdx.y;
+
+ {
+ int j=ty;
+ const int l = j + 2; //Skip ghost cells
+ for (int i=tx; i( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+
+ //Reconstruct slopes along x and axis
+ minmodSlopeX(Q, Qx, theta_);
+ minmodSlopeY(Q, Qy, theta_);
+ __syncthreads();
+
+
+ //Compute fluxes along the x and y axis
+ computeFluxF(Q, Qx, F, g_);
+ computeFluxG(Q, Qy, G, g_);
+ __syncthreads();
+
+
+ //Sum fluxes and advance in time for all internal cells
+ if (ti > 1 && ti < nx_+2 && tj > 1 && tj < ny_+2) {
+ const int i = tx + 2; //Skip local ghost cells, i.e., +2
+ const int j = ty + 2;
+
+ Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
+ + (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
+ Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
+ + (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
+ Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
+ + (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
+
+ float* const h_row = (float*) ((char*) h1_ptr_ + h1_pitch_*tj);
+ float* const hu_row = (float*) ((char*) hu1_ptr_ + hu1_pitch_*tj);
+ float* const hv_row = (float*) ((char*) hv1_ptr_ + hv1_pitch_*tj);
+
+ if (getOrder(step_order_) == 2 && getStep(step_order_) == 1) {
+ //Write to main memory
+ h_row[ti] = 0.5f*(h_row[ti] + Q[0][j][i]);
+ hu_row[ti] = 0.5f*(hu_row[ti] + Q[1][j][i]);
+ hv_row[ti] = 0.5f*(hv_row[ti] + Q[2][j][i]);
+ }
+ else {
+ h_row[ti] = Q[0][j][i];
+ hu_row[ti] = Q[1][j][i];
+ hv_row[ti] = Q[2][j][i];
+ }
+ }
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+} //extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_KP07.cu.hip b/GPUSimulators/cuda/SWE2D_KP07.cu.hip
new file mode 100644
index 0000000..fd9ef0d
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_KP07.cu.hip
@@ -0,0 +1,234 @@
+#include "hip/hip_runtime.h"
+/*
+This OpenCL kernel implements the Kurganov-Petrova numerical scheme
+for the shallow water equations, described in
+A. Kurganov & Guergana Petrova
+A Second-Order Well-Balanced Positivity Preserving Central-Upwind
+Scheme for the Saint-Venant System Communications in Mathematical
+Sciences, 5 (2007), 133-160.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "SWECommon.h"
+#include "limiters.h"
+
+
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
+ float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
+ const float g_) {
+ //Index of thread within block
+ const int tx = threadIdx.x;
+ const int ty = threadIdx.y;
+
+ {
+ int j=ty;
+ const int l = j + 2; //Skip ghost cells
+ for (int i=tx; i( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+
+ //Reconstruct slopes along x and axis
+ minmodSlopeX(Q, Qx, theta_);
+ minmodSlopeY(Q, Qy, theta_);
+ __syncthreads();
+
+
+ //Compute fluxes along the x and y axis
+ computeFluxF(Q, Qx, F, g_);
+ computeFluxG(Q, Qy, G, g_);
+ __syncthreads();
+
+
+ //Sum fluxes and advance in time for all internal cells
+ if (ti > 1 && ti < nx_+2 && tj > 1 && tj < ny_+2) {
+ const int i = tx + 2; //Skip local ghost cells, i.e., +2
+ const int j = ty + 2;
+
+ Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
+ + (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
+ Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
+ + (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
+ Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
+ + (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
+
+ float* const h_row = (float*) ((char*) h1_ptr_ + h1_pitch_*tj);
+ float* const hu_row = (float*) ((char*) hu1_ptr_ + hu1_pitch_*tj);
+ float* const hv_row = (float*) ((char*) hv1_ptr_ + hv1_pitch_*tj);
+
+ if (getOrder(step_order_) == 2 && getStep(step_order_) == 1) {
+ //Write to main memory
+ h_row[ti] = 0.5f*(h_row[ti] + Q[0][j][i]);
+ hu_row[ti] = 0.5f*(hu_row[ti] + Q[1][j][i]);
+ hv_row[ti] = 0.5f*(hv_row[ti] + Q[2][j][i]);
+ }
+ else {
+ h_row[ti] = Q[0][j][i];
+ hu_row[ti] = Q[1][j][i];
+ hv_row[ti] = Q[2][j][i];
+ }
+ }
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+} //extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_KP07_dimsplit.cu b/GPUSimulators/cuda/SWE2D_KP07_dimsplit.cu
new file mode 100644
index 0000000..ac256e3
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_KP07_dimsplit.cu
@@ -0,0 +1,216 @@
+/*
+This OpenCL kernel implements the Kurganov-Petrova numerical scheme
+for the shallow water equations, described in
+A. Kurganov & Guergana Petrova
+A Second-Order Well-Balanced Positivity Preserving Central-Upwind
+Scheme for the Saint-Venant System Communications in Mathematical
+Sciences, 5 (2007), 133-160.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "SWECommon.h"
+#include "limiters.h"
+
+
+template
+__device__
+void computeFluxF(float Q[3][h+2*gc_y][w+2*gc_x],
+ float Qx[3][h+2*gc_y][w+2*gc_x],
+ float F[3][h+2*gc_y][w+2*gc_x],
+ const float g_, const float dx_, const float dt_) {
+ for (int j=threadIdx.y; j
+__device__
+void computeFluxG(float Q[3][h+2*gc_y][w+2*gc_x],
+ float Qy[3][h+2*gc_y][w+2*gc_x],
+ float G[3][h+2*gc_y][w+2*gc_x],
+ const float g_, const float dy_, const float dt_) {
+ for (int j=threadIdx.y+1; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+ if (step_ == 0) {
+ //Along X
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Along Y
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+ }
+ else {
+ //Along Y
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Along X
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+ }
+
+ // Write to main memory for all internal cells
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+
+
+
+
+
+
+
+
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_KP07_dimsplit.cu.hip b/GPUSimulators/cuda/SWE2D_KP07_dimsplit.cu.hip
new file mode 100644
index 0000000..f366b0a
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_KP07_dimsplit.cu.hip
@@ -0,0 +1,217 @@
+#include "hip/hip_runtime.h"
+/*
+This OpenCL kernel implements the Kurganov-Petrova numerical scheme
+for the shallow water equations, described in
+A. Kurganov & Guergana Petrova
+A Second-Order Well-Balanced Positivity Preserving Central-Upwind
+Scheme for the Saint-Venant System Communications in Mathematical
+Sciences, 5 (2007), 133-160.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "SWECommon.h"
+#include "limiters.h"
+
+
+template
+__device__
+void computeFluxF(float Q[3][h+2*gc_y][w+2*gc_x],
+ float Qx[3][h+2*gc_y][w+2*gc_x],
+ float F[3][h+2*gc_y][w+2*gc_x],
+ const float g_, const float dx_, const float dt_) {
+ for (int j=threadIdx.y; j
+__device__
+void computeFluxG(float Q[3][h+2*gc_y][w+2*gc_x],
+ float Qy[3][h+2*gc_y][w+2*gc_x],
+ float G[3][h+2*gc_y][w+2*gc_x],
+ const float g_, const float dy_, const float dt_) {
+ for (int j=threadIdx.y+1; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+ if (step_ == 0) {
+ //Along X
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Along Y
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+ }
+ else {
+ //Along Y
+ minmodSlopeY(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxG(Q, Qx, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Along X
+ minmodSlopeX(Q, Qx, theta_);
+ __syncthreads();
+ computeFluxF(Q, Qx, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+ }
+
+ // Write to main memory for all internal cells
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+
+
+
+
+
+
+
+
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_LxF.cu b/GPUSimulators/cuda/SWE2D_LxF.cu
new file mode 100644
index 0000000..1f197fd
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_LxF.cu
@@ -0,0 +1,168 @@
+/*
+This OpenCL kernel implements the classical Lax-Friedrichs scheme
+for the shallow water equations, with edge fluxes.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+#include "common.h"
+#include "SWECommon.h"
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+template
+__device__
+void computeFluxF(float Q[3][block_height+2][block_width+2],
+ float F[3][block_height][block_width+1],
+ const float g_, const float dx_, const float dt_) {
+ //Index of thread within block
+ const int tx = threadIdx.x;
+ const int ty = threadIdx.y;
+
+ {
+ const int j=ty;
+ const int l = j + 1; //Skip ghost cells
+ for (int i=tx; i
+__device__
+void computeFluxG(float Q[3][block_height+2][block_width+2],
+ float G[3][block_height+1][block_width],
+ const float g_, const float dy_, const float dt_) {
+ //Index of thread within block
+ const int tx = threadIdx.x;
+ const int ty = threadIdx.y;
+
+ for (int j=ty; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+ //Compute fluxes along the x and y axis
+ computeFluxF(Q, F, g_, dx_, dt_);
+ computeFluxG(Q, G, g_, dy_, dt_);
+ __syncthreads();
+
+ //Evolve for all cells
+ const int tx = threadIdx.x;
+ const int ty = threadIdx.y;
+ const int i = tx + 1; //Skip local ghost cells, i.e., +1
+ const int j = ty + 1;
+
+ Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
+ + (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
+ Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
+ + (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
+ Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
+ + (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
+ __syncthreads();
+
+ //Write to main memory
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+} // extern "C"
+
diff --git a/GPUSimulators/cuda/SWE2D_LxF.cu.hip b/GPUSimulators/cuda/SWE2D_LxF.cu.hip
new file mode 100644
index 0000000..588d691
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_LxF.cu.hip
@@ -0,0 +1,169 @@
+#include "hip/hip_runtime.h"
+/*
+This OpenCL kernel implements the classical Lax-Friedrichs scheme
+for the shallow water equations, with edge fluxes.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+#include "common.h"
+#include "SWECommon.h"
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+template
+__device__
+void computeFluxF(float Q[3][block_height+2][block_width+2],
+ float F[3][block_height][block_width+1],
+ const float g_, const float dx_, const float dt_) {
+ //Index of thread within block
+ const int tx = threadIdx.x;
+ const int ty = threadIdx.y;
+
+ {
+ const int j=ty;
+ const int l = j + 1; //Skip ghost cells
+ for (int i=tx; i
+__device__
+void computeFluxG(float Q[3][block_height+2][block_width+2],
+ float G[3][block_height+1][block_width],
+ const float g_, const float dy_, const float dt_) {
+ //Index of thread within block
+ const int tx = threadIdx.x;
+ const int ty = threadIdx.y;
+
+ for (int j=ty; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+
+ //Compute fluxes along the x and y axis
+ computeFluxF(Q, F, g_, dx_, dt_);
+ computeFluxG(Q, G, g_, dy_, dt_);
+ __syncthreads();
+
+ //Evolve for all cells
+ const int tx = threadIdx.x;
+ const int ty = threadIdx.y;
+ const int i = tx + 1; //Skip local ghost cells, i.e., +1
+ const int j = ty + 1;
+
+ Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
+ + (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
+ Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
+ + (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
+ Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
+ + (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
+ __syncthreads();
+
+ //Write to main memory
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+
+ //Compute the CFL for this block
+ if (cfl_ != NULL) {
+ writeCfl(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
+ }
+}
+
+} // extern "C"
+
diff --git a/GPUSimulators/cuda/SWE2D_WAF.cu b/GPUSimulators/cuda/SWE2D_WAF.cu
new file mode 100644
index 0000000..2c38cdf
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_WAF.cu
@@ -0,0 +1,178 @@
+/*
+This OpenCL kernel implements the Kurganov-Petrova numerical scheme
+for the shallow water equations, described in
+A. Kurganov & Guergana Petrova
+A Second-Order Well-Balanced Positivity Preserving Central-Upwind
+Scheme for the Saint-Venant System Communications in Mathematical
+Sciences, 5 (2007), 133-160.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "SWECommon.h"
+
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ const float g_, const float dx_, const float dt_) {
+ for (int j=threadIdx.y; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+ __syncthreads();
+
+
+
+ //Step 0 => evolve x first, then y
+ if (step_ == 0) {
+ //Compute fluxes along the x axis and evolve
+ computeFluxF(Q, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the y axis and evolve
+ computeFluxG(Q, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+ }
+ //Step 1 => evolve y first, then x
+ else {
+ //Compute fluxes along the y axis and evolve
+ computeFluxG(Q, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the x axis and evolve
+ computeFluxF(Q, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+ }
+
+
+
+ // Write to main memory for all internal cells
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+}
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWE2D_WAF.cu.hip b/GPUSimulators/cuda/SWE2D_WAF.cu.hip
new file mode 100644
index 0000000..ddfad9d
--- /dev/null
+++ b/GPUSimulators/cuda/SWE2D_WAF.cu.hip
@@ -0,0 +1,179 @@
+#include "hip/hip_runtime.h"
+/*
+This OpenCL kernel implements the Kurganov-Petrova numerical scheme
+for the shallow water equations, described in
+A. Kurganov & Guergana Petrova
+A Second-Order Well-Balanced Positivity Preserving Central-Upwind
+Scheme for the Saint-Venant System Communications in Mathematical
+Sciences, 5 (2007), 133-160.
+
+Copyright (C) 2016 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+
+
+#include "common.h"
+#include "SWECommon.h"
+
+
+
+/**
+ * Computes the flux along the x axis for all faces
+ */
+__device__
+void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
+ const float g_, const float dx_, const float dt_) {
+ for (int j=threadIdx.y; j( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
+ readBlock(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
+ readBlock(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
+ __syncthreads();
+
+
+
+ //Step 0 => evolve x first, then y
+ if (step_ == 0) {
+ //Compute fluxes along the x axis and evolve
+ computeFluxF(Q, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the y axis and evolve
+ computeFluxG(Q, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+ }
+ //Step 1 => evolve y first, then x
+ else {
+ //Compute fluxes along the y axis and evolve
+ computeFluxG(Q, F, g_, dy_, dt_);
+ __syncthreads();
+ evolveG(Q, F, dy_, dt_);
+ __syncthreads();
+
+ //Compute fluxes along the x axis and evolve
+ computeFluxF(Q, F, g_, dx_, dt_);
+ __syncthreads();
+ evolveF(Q, F, dx_, dt_);
+ __syncthreads();
+ }
+
+
+
+ // Write to main memory for all internal cells
+ writeBlock( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
+ writeBlock(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
+ writeBlock(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
+}
+
+} // extern "C"
\ No newline at end of file
diff --git a/GPUSimulators/cuda/SWECommon.h b/GPUSimulators/cuda/SWECommon.h
new file mode 100644
index 0000000..52f8b31
--- /dev/null
+++ b/GPUSimulators/cuda/SWECommon.h
@@ -0,0 +1,533 @@
+/*
+These CUDA functions implement different types of numerical flux
+functions for the shallow water equations
+
+Copyright (C) 2016, 2017, 2018 SINTEF Digital
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+#pragma once
+#include "limiters.h"
+
+
+
+
+
+
+__device__ float3 F_func(const float3 Q, const float g) {
+ float3 F;
+
+ F.x = Q.y; //hu
+ F.y = Q.y*Q.y / Q.x + 0.5f*g*Q.x*Q.x; //hu*hu/h + 0.5f*g*h*h;
+ F.z = Q.y*Q.z / Q.x; //hu*hv/h;
+
+ return F;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Superbee flux limiter for WAF.
+ * Related to superbee limiter so that WAF_superbee(r, c) = 1 - (1-|c|)*superbee(r)
+ * @param r_ the ratio of upwind change (see Toro 2001, p. 203/204)
+ * @param c_ the courant number for wave k, dt*S_k/dx
+ */
+__device__ float WAF_superbee(float r_, float c_) {
+ // r <= 0.0
+ if (r_ <= 0.0f) {
+ return 1.0f;
+ }
+ // 0.0 <= r <= 1/2
+ else if (r_ <= 0.5f) {
+ return 1.0f - 2.0f*(1.0f - fabsf(c_))*r_;
+ }
+ // 1/2 <= r <= 1
+ else if (r_ <= 1.0f) {
+ return fabs(c_);
+ }
+ // 1 <= r <= 2
+ else if (r_ <= 2.0f) {
+ return 1.0f - (1.0f - fabsf(c_))*r_;
+ }
+ // r >= 2
+ else {
+ return 2.0f*fabsf(c_) - 1.0f;
+ }
+}
+
+
+
+
+__device__ float WAF_albada(float r_, float c_) {
+ if (r_ <= 0.0f) {
+ return 1.0f;
+ }
+ else {
+ return 1.0f - (1.0f - fabsf(c_)) * r_ * (1.0f + r_) / (1.0f + r_*r_);
+ }
+}
+
+__device__ float WAF_minbee(float r_, float c_) {
+ r_ = fmaxf(-1.0f, fminf(2.0f, r_));
+ if (r_ <= 0.0f) {
+ return 1.0f;
+ }
+ if (r_ >= 0.0f && r_ <= 1.0f) {
+ return 1.0f - (1.0f - fabsf(c_)) * r_;
+ }
+ else {
+ return fabsf(c_);
+ }
+}
+
+__device__ float WAF_minmod(float r_, float c_) {
+ return 1.0f - (1.0f - fabsf(c_)) * fmaxf(0.0f, fminf(1.0f, r_));
+}
+
+__device__ float limiterToWAFLimiter(float r_, float c_) {
+ return 1.0f - (1.0f - fabsf(c_))*r_;
+}
+
+// Compute h in the "star region", h^dagger
+__device__ __inline__ float computeHStar(float h_l, float h_r, float u_l, float u_r, float c_l, float c_r, float g_) {
+
+ //This estimate for the h* gives rise to spurious oscillations.
+ //return 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
+
+ const float h_tmp = 0.5f * (c_l + c_r) + 0.25f * (u_l - u_r);
+ return h_tmp*h_tmp / g_;
+}
+
+/**
+ * Weighted average flux (Toro 2001, p 200) for interface {i+1/2}
+ * @param r_ The flux limiter parameter (see Toro 2001, p. 203)
+ * @param Q_l2 Q_{i-1}
+ * @param Q_l1 Q_{i}
+ * @param Q_r1 Q_{i+1}
+ * @param Q_r2 Q_{i+2}
+ */
+__device__ float3 WAF_1D_flux(const float3 Q_l2, const float3 Q_l1, const float3 Q_r1, const float3 Q_r2, const float g_, const float dx_, const float dt_) {
+ const float h_l = Q_l1.x;
+ const float h_r = Q_r1.x;
+
+ const float h_l2 = Q_l2.x;
+ const float h_r2 = Q_r2.x;
+
+ // Calculate velocities
+ const float u_l = Q_l1.y / h_l;
+ const float u_r = Q_r1.y / h_r;
+
+ const float u_l2 = Q_l2.y / h_l2;
+ const float u_r2 = Q_r2.y / h_r2;
+
+ const float v_l = Q_l1.z / h_l;
+ const float v_r = Q_r1.z / h_r;
+
+ const float v_l2 = Q_l2.z / h_l2;
+ const float v_r2 = Q_r2.z / h_r2;
+
+ // Estimate the potential wave speeds
+ const float c_l = sqrt(g_*h_l);
+ const float c_r = sqrt(g_*h_r);
+
+ const float c_l2 = sqrt(g_*h_l2);
+ const float c_r2 = sqrt(g_*h_r2);
+
+ // Compute h in the "star region", h^dagger
+ const float h_dag_l = computeHStar(h_l2, h_l, u_l2, u_l, c_l2, c_l, g_);
+ const float h_dag = computeHStar( h_l, h_r, u_l, u_r, c_l, c_r, g_);
+ const float h_dag_r = computeHStar( h_r, h_r2, u_r, u_r2, c_r, c_r2, g_);
+
+ const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag ) ) / h_l;
+ const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag ) ) / h_r;
+
+ const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
+ const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
+
+ // Compute wave speed estimates
+ const float S_l = u_l - c_l*q_l;
+ const float S_r = u_r + c_r*q_r;
+ const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
+
+ const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1.0, S_star, v_l);
+ const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1.0, S_star, v_r);
+
+ // Estimate the fluxes in the four regions
+ const float3 F_1 = F_func(Q_l1, g_);
+ const float3 F_4 = F_func(Q_r1, g_);
+
+ const float3 F_2 = F_1 + S_l*(Q_star_l - Q_l1);
+ const float3 F_3 = F_4 + S_r*(Q_star_r - Q_r1);
+ //const float3 F_2 = F_func(Q_star_l, g_);
+ //const float3 F_3 = F_func(Q_star_r, g_);
+
+ // Compute the courant numbers for the waves
+ const float c_1 = S_l * dt_ / dx_;
+ const float c_2 = S_star * dt_ / dx_;
+ const float c_3 = S_r * dt_ / dx_;
+
+ // Compute the "upwind change" vectors for the i-3/2 and i+3/2 interfaces
+ const float eps = 1.0e-6f;
+ const float r_1 = desingularize( (c_1 > 0.0f) ? (h_dag_l - h_l2) : (h_dag_r - h_r), eps) / desingularize((h_dag - h_l), eps);
+ const float r_2 = desingularize( (c_2 > 0.0f) ? (v_l - v_l2) : (v_r2 - v_r), eps ) / desingularize((v_r - v_l), eps);
+ const float r_3 = desingularize( (c_3 > 0.0f) ? (h_l - h_dag_l) : (h_r2 - h_dag_r), eps ) / desingularize((h_r - h_dag), eps);
+
+ // Compute the limiter
+ // We use h for the nonlinear waves, and v for the middle shear wave
+ const float A_1 = copysign(1.0f, c_1) * limiterToWAFLimiter(generalized_minmod(r_1, 1.9f), c_1);
+ const float A_2 = copysign(1.0f, c_2) * limiterToWAFLimiter(generalized_minmod(r_2, 1.9f), c_2);
+ const float A_3 = copysign(1.0f, c_3) * limiterToWAFLimiter(generalized_minmod(r_3, 1.9f), c_3);
+
+ //Average the fluxes
+ const float3 flux = 0.5f*( F_1 + F_4 )
+ - 0.5f*( A_1 * (F_2 - F_1)
+ + A_2 * (F_3 - F_2)
+ + A_3 * (F_4 - F_3) );
+
+ return flux;
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Central upwind flux function
+ */
+__device__ float3 CentralUpwindFlux(const float3 Qm, float3 Qp, const float g) {
+ const float3 Fp = F_func(Qp, g);
+ const float up = Qp.y / Qp.x; // hu / h
+ const float cp = sqrt(g*Qp.x); // sqrt(g*h)
+
+ const float3 Fm = F_func(Qm, g);
+ const float um = Qm.y / Qm.x; // hu / h
+ const float cm = sqrt(g*Qm.x); // sqrt(g*h)
+
+ const float am = min(min(um-cm, up-cp), 0.0f); // largest negative wave speed
+ const float ap = max(max(um+cm, up+cp), 0.0f); // largest positive wave speed
+
+ return ((ap*Fm - am*Fp) + ap*am*(Qp-Qm))/(ap-am);
+}
+
+
+
+
+
+
+
+
+
+/**
+ * Godunovs centered scheme (Toro 2001, p 165)
+ */
+__device__ float3 GodC_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
+ const float3 F_l = F_func(Q_l, g_);
+ const float3 F_r = F_func(Q_r, g_);
+
+ const float3 Q_godc = 0.5f*(Q_l + Q_r) + (dt_/dx_)*(F_l - F_r);
+
+ return F_func(Q_godc, g_);
+}
+
+
+
+
+
+
+
+
+
+/**
+ * Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 180)
+ */
+__device__ float3 HLL_flux(const float3 Q_l, const float3 Q_r, const float g_) {
+ const float h_l = Q_l.x;
+ const float h_r = Q_r.x;
+
+ // Calculate velocities
+ const float u_l = Q_l.y / h_l;
+ const float u_r = Q_r.y / h_r;
+
+ // Estimate the potential wave speeds
+ const float c_l = sqrt(g_*h_l);
+ const float c_r = sqrt(g_*h_r);
+
+ // Compute h in the "star region", h^dagger
+ const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
+
+ const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
+ const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
+
+ const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
+ const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
+
+ // Compute wave speed estimates
+ const float S_l = u_l - c_l*q_l;
+ const float S_r = u_r + c_r*q_r;
+
+ //Upwind selection
+ if (S_l >= 0.0f) {
+ return F_func(Q_l, g_);
+ }
+ else if (S_r <= 0.0f) {
+ return F_func(Q_r, g_);
+ }
+ //Or estimate flux in the star region
+ else {
+ const float3 F_l = F_func(Q_l, g_);
+ const float3 F_r = F_func(Q_r, g_);
+ const float3 flux = (S_r*F_l - S_l*F_r + S_r*S_l*(Q_r - Q_l)) / (S_r-S_l);
+ return flux;
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 181)
+ */
+__device__ float3 HLLC_flux(const float3 Q_l, const float3 Q_r, const float g_) {
+ const float h_l = Q_l.x;
+ const float h_r = Q_r.x;
+
+ // Calculate velocities
+ const float u_l = Q_l.y / h_l;
+ const float u_r = Q_r.y / h_r;
+
+ // Estimate the potential wave speeds
+ const float c_l = sqrt(g_*h_l);
+ const float c_r = sqrt(g_*h_r);
+
+ // Compute h in the "star region", h^dagger
+ const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
+
+ const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
+ const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
+
+ const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
+ const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
+
+ // Compute wave speed estimates
+ const float S_l = u_l - c_l*q_l;
+ const float S_r = u_r + c_r*q_r;
+ const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
+
+ const float3 F_l = F_func(Q_l, g_);
+ const float3 F_r = F_func(Q_r, g_);
+
+ //Upwind selection
+ if (S_l >= 0.0f) {
+ return F_l;
+ }
+ else if (S_r <= 0.0f) {
+ return F_r;
+ }
+ //Or estimate flux in the "left star" region
+ else if (S_l <= 0.0f && 0.0f <=S_star) {
+ const float v_l = Q_l.z / h_l;
+ const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1, S_star, v_l);
+ const float3 flux = F_l + S_l*(Q_star_l - Q_l);
+ return flux;
+ }
+ //Or estimate flux in the "righ star" region
+ else if (S_star <= 0.0f && 0.0f <=S_r) {
+ const float v_r = Q_r.z / h_r;
+ const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1, S_star, v_r);
+ const float3 flux = F_r + S_r*(Q_star_r - Q_r);
+ return flux;
+ }
+ else {
+ return make_float3(-99999.9f, -99999.9f, -99999.9f); //Something wrong here
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Lax-Friedrichs flux (Toro 2001, p 163)
+ */
+__device__ float3 LxF_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
+ const float3 F_l = F_func(Q_l, g_);
+ const float3 F_r = F_func(Q_r, g_);
+
+ return 0.5f*(F_l + F_r) + (dx_/(2.0f*dt_))*(Q_l - Q_r);
+}
+
+
+
+
+
+
+
+
+/**
+ * Lax-Friedrichs extended to 2D
+ */
+__device__ float3 LxF_2D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
+ const float3 F_l = F_func(Q_l, g_);
+ const float3 F_r = F_func(Q_r, g_);
+
+ //Note numerical diffusion for 2D here (0.25)
+ return 0.5f*(F_l + F_r) + (dx_/(4.0f*dt_))*(Q_l - Q_r);
+}
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * Richtmeyer / Two-step Lax-Wendroff flux (Toro 2001, p 164)
+ */
+__device__ float3 LxW2_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
+ const float3 F_l = F_func(Q_l, g_);
+ const float3 F_r = F_func(Q_r, g_);
+
+ const float3 Q_lw2 = 0.5f*(Q_l + Q_r) + (dt_/(2.0f*dx_))*(F_l - F_r);
+
+ return F_func(Q_lw2, g_);
+}
+
+
+
+
+
+
+
+
+
+
+
+
+/**
+ * First Ordered Centered (Toro 2001, p.163)
+ */
+__device__ float3 FORCE_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
+ const float3 F_lf = LxF_1D_flux(Q_l, Q_r, g_, dx_, dt_);
+ const float3 F_lw2 = LxW2_1D_flux(Q_l, Q_r, g_, dx_, dt_);
+ return 0.5f*(F_lf + F_lw2);
+}
+
+
+
+
+
+
+
+
+
+
+template
+__device__ void writeCfl(float Q[vars][h+2*gc_y][w+2*gc_x],
+ float shmem[h+2*gc_y][w+2*gc_x],
+ const int nx_, const int ny_,
+ const float dx_, const float dy_, const float g_,
+ float* output_) {
+ //Index of thread within block
+ const int tx = threadIdx.x + gc_x;
+ const int ty = threadIdx.y + gc_y;
+
+ //Index of cell within domain
+ const int ti = blockDim.x*blockIdx.x + tx;
+ const int tj = blockDim.y*blockIdx.y + ty;
+
+ //Only internal cells
+ if (ti < nx_+gc_x && tj < ny_+gc_y) {
+ const float h = Q[0][ty][tx];
+ const float u = Q[1][ty][tx] / h;
+ const float v = Q[2][ty][tx] / h;
+
+ const float max_u = dx_ / (fabsf(u) + sqrtf(g_*h));
+ const float max_v = dy_ / (fabsf(v) + sqrtf(g_*h));
+
+ shmem[ty][tx] = fminf(max_u, max_v);
+ }
+ __syncthreads();
+
+ //One row of threads loop over all rows
+ if (ti < nx_+gc_x && tj < ny_+gc_y) {
+ if (ty == gc_y) {
+ float min_val = shmem[ty][tx];
+ const int max_y = min(h, ny_+gc_y - tj);
+ for (int j=gc_y; j.
+*/
+
+#pragma once
+
+
+/**
+ * Float3 operators
+ */
+inline __device__ float3 operator*(const float a, const float3 b) {
+ return make_float3(a*b.x, a*b.y, a*b.z);
+}
+
+inline __device__ float3 operator/(const float3 a, const float b) {
+ return make_float3(a.x/b, a.y/b, a.z/b);
+}
+
+inline __device__ float3 operator-(const float3 a, const float3 b) {
+ return make_float3(a.x-b.x, a.y-b.y, a.z-b.z);
+}
+
+inline __device__ float3 operator+(const float3 a, const float3 b) {
+ return make_float3(a.x+b.x, a.y+b.y, a.z+b.z);
+}
+
+/**
+ * Float4 operators
+ */
+inline __device__ float4 operator*(const float a, const float4 b) {
+ return make_float4(a*b.x, a*b.y, a*b.z, a*b.w);
+}
+
+inline __device__ float4 operator/(const float4 a, const float b) {
+ return make_float4(a.x/b, a.y/b, a.z/b, a.w/b);
+}
+
+inline __device__ float4 operator-(const float4 a, const float4 b) {
+ return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);
+}
+
+inline __device__ float4 operator+(const float4 a, const float4 b) {
+ return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
+}
+
+
+
+
+inline __device__ __host__ float clamp(const float f, const float a, const float b) {
+ return fmaxf(a, fminf(f, b));
+}
+
+inline __device__ __host__ int clamp(const int f, const int a, const int b) {
+ return (f < b) ? ( (f > a) ? f : a) : b;
+}
+
+
+
+
+
+__device__ float desingularize(float x_, float eps_) {
+ return copysign(1.0f, x_)*fmaxf(fabsf(x_), fminf(x_*x_/(2.0f*eps_)+0.5f*eps_, eps_));
+}
+
+
+
+
+
+
+
+
+/**
+ * Returns the step stored in the leftmost 16 bits
+ * of the 32 bit step-order integer
+ */
+inline __device__ int getStep(int step_order_) {
+ return step_order_ >> 16;
+}
+
+/**
+ * Returns the order stored in the rightmost 16 bits
+ * of the 32 bit step-order integer
+ */
+inline __device__ int getOrder(int step_order_) {
+ return step_order_ & 0x0000FFFF;
+}
+
+
+enum BoundaryCondition {
+ Dirichlet = 0,
+ Neumann = 1,
+ Periodic = 2,
+ Reflective = 3
+};
+
+inline __device__ BoundaryCondition getBCNorth(int bc_) {
+ return static_cast((bc_ >> 24) & 0x0000000F);
+}
+
+inline __device__ BoundaryCondition getBCSouth(int bc_) {
+ return static_cast((bc_ >> 16) & 0x0000000F);
+}
+
+inline __device__ BoundaryCondition getBCEast(int bc_) {
+ return static_cast((bc_ >> 8) & 0x0000000F);
+}
+
+inline __device__ BoundaryCondition getBCWest(int bc_) {
+ return static_cast((bc_ >> 0) & 0x0000000F);
+}
+
+
+// West boundary
+template
+__device__ void bcWestReflective(float Q[h+2*gc_y][w+2*gc_x],
+ const int nx_, const int ny_) {
+ for (int j=threadIdx.y; j= 1 && ti == gc_x) {
+ Q[j][i-1] = sign*Q[j][i];
+ }
+ if (gc_x >= 2 && ti == gc_x + 1) {
+ Q[j][i-3] = sign*Q[j][i];
+ }
+ if (gc_x >= 3 && ti == gc_x + 2) {
+ Q[j][i-5] = sign*Q[j][i];
+ }
+ if (gc_x >= 4 && ti == gc_x + 3) {
+ Q[j][i-7] = sign*Q[j][i];
+ }
+ if (gc_x >= 5 && ti == gc_x + 4) {
+ Q[j][i-9] = sign*Q[j][i];
+ }
+ }
+}
+
+
+// East boundary
+template
+__device__ void bcEastReflective(float Q[h+2*gc_y][w+2*gc_x],
+ const int nx_, const int ny_) {
+ for (int j=threadIdx.y; j= 1 && ti == nx_ + gc_x - 1) {
+ Q[j][i+1] = sign*Q[j][i];
+ }
+ if (gc_x >= 2 && ti == nx_ + gc_x - 2) {
+ Q[j][i+3] = sign*Q[j][i];
+ }
+ if (gc_x >= 3 && ti == nx_ + gc_x - 3) {
+ Q[j][i+5] = sign*Q[j][i];
+ }
+ if (gc_x >= 4 && ti == nx_ + gc_x - 4) {
+ Q[j][i+7] = sign*Q[j][i];
+ }
+ if (gc_x >= 5 && ti == nx_ + gc_x - 5) {
+ Q[j][i+9] = sign*Q[j][i];
+ }
+ }
+}
+
+
+// South boundary
+template
+__device__ void bcSouthReflective(float Q[h+2*gc_y][w+2*gc_x],
+ const int nx_, const int ny_) {
+ for (int i=threadIdx.x; i= 1 && tj == gc_y) {
+ Q[j-1][i] = sign*Q[j][i];
+ }
+ if (gc_y >= 2 && tj == gc_y + 1) {
+ Q[j-3][i] = sign*Q[j][i];
+ }
+ if (gc_y >= 3 && tj == gc_y + 2) {
+ Q[j-5][i] = sign*Q[j][i];
+ }
+ if (gc_y >= 4 && tj == gc_y + 3) {
+ Q[j-7][i] = sign*Q[j][i];
+ }
+ if (gc_y >= 5 && tj == gc_y + 4) {
+ Q[j-9][i] = sign*Q[j][i];
+ }
+ }
+}
+
+
+
+
+// North boundary
+template
+__device__ void bcNorthReflective(float Q[h+2*gc_y][w+2*gc_x], const int nx_, const int ny_) {
+ for (int i=threadIdx.x; i= 1 && tj == ny_ + gc_y - 1) {
+ Q[j+1][i] = sign*Q[j][i];
+ }
+ if (gc_y >= 2 && tj == ny_ + gc_y - 2) {
+ Q[j+3][i] = sign*Q[j][i];
+ }
+ if (gc_y >= 3 && tj == ny_ + gc_y - 3) {
+ Q[j+5][i] = sign*Q[j][i];
+ }
+ if (gc_y >= 4 && tj == ny_ + gc_y - 4) {
+ Q[j+7][i] = sign*Q[j][i];
+ }
+ if (gc_y >= 5 && tj == ny_ + gc_y - 5) {
+ Q[j+9][i] = sign*Q[j][i];
+ }
+ }
+}
+
+
+
+
+/**
+ * Alter the index l so that it gives periodic boundary conditions when reading
+ */
+template
+inline __device__ int handlePeriodicBoundaryX(int k, int nx_, int boundary_conditions_) {
+ const int gc_pad = gc_x;
+
+ //West boundary: add an offset to read from east of domain
+ if (gc_x > 0) {
+ if ((k < gc_pad)
+ && getBCWest(boundary_conditions_) == Periodic) {
+ k += (nx_+2*gc_x - 2*gc_pad);
+ }
+ //East boundary: subtract an offset to read from west of domain
+ else if ((k >= nx_+2*gc_x-gc_pad)
+ && getBCEast(boundary_conditions_) == Periodic) {
+ k -= (nx_+2*gc_x - 2*gc_pad);
+ }
+ }
+
+ return k;
+}
+
+/**
+ * Alter the index l so that it gives periodic boundary conditions when reading
+ */
+template
+inline __device__ int handlePeriodicBoundaryY(int l, int ny_, int boundary_conditions_) {
+ const int gc_pad = gc_y;
+
+ //South boundary: add an offset to read from north of domain
+ if (gc_y > 0) {
+ if ((l < gc_pad)
+ && getBCSouth(boundary_conditions_) == Periodic) {
+ l += (ny_+2*gc_y - 2*gc_pad);
+ }
+ //North boundary: subtract an offset to read from south of domain
+ else if ((l >= ny_+2*gc_y-gc_pad)
+ && getBCNorth(boundary_conditions_) == Periodic) {
+ l -= (ny_+2*gc_y - 2*gc_pad);
+ }
+ }
+
+ return l;
+}
+
+
+template
+inline __device__
+void handleReflectiveBoundary(
+ float Q[h+2*gc_y][w+2*gc_x],
+ const int nx_, const int ny_,
+ const int boundary_conditions_) {
+
+ //Handle reflective boundary conditions
+ if (getBCNorth(boundary_conditions_) == Reflective) {
+ bcNorthReflective(Q, nx_, ny_);
+ __syncthreads();
+ }
+ if (getBCSouth(boundary_conditions_) == Reflective) {
+ bcSouthReflective(Q, nx_, ny_);
+ __syncthreads();
+ }
+ if (getBCEast(boundary_conditions_) == Reflective) {
+ bcEastReflective(Q, nx_, ny_);
+ __syncthreads();
+ }
+ if (getBCWest(boundary_conditions_) == Reflective) {
+ bcWestReflective(Q, nx_, ny_);
+ __syncthreads();
+ }
+}
+
+/**
+ * Reads a block of data with ghost cells
+ */
+template
+inline __device__ void readBlock(float* ptr_, int pitch_,
+ float Q[h+2*gc_y][w+2*gc_x],
+ const int nx_, const int ny_,
+ const int boundary_conditions_,
+ int x0, int y0,
+ int x1, int y1) {
+ //Index of block within domain
+ const int bx = blockDim.x * blockIdx.x;
+ const int by = blockDim.y * blockIdx.y;
+
+ //Read into shared memory
+ //Loop over all variables
+ for (int j=threadIdx.y; j(by + j + y0, ny_, boundary_conditions_);
+ l = min(l, min(ny_+2*gc_y-1, y1+2*gc_y-1));
+ float* row = (float*) ((char*) ptr_ + pitch_*l);
+
+ for (int i=threadIdx.x; i(bx + i + x0, nx_, boundary_conditions_);
+ k = min(k, min(nx_+2*gc_x-1, x1+2*gc_x-1));
+
+ //Read from global memory
+ Q[j][i] = row[k];
+ }
+ }
+ __syncthreads();
+
+ handleReflectiveBoundary(Q, nx_, ny_, boundary_conditions_);
+}
+
+
+
+
+/**
+ * Writes a block of data to global memory for the shallow water equations.
+ */
+template
+inline __device__ void writeBlock(float* ptr_, int pitch_,
+ float shmem[h+2*gc_y][w+2*gc_x],
+ const int nx_, const int ny_,
+ int rk_step_, int rk_order_,
+ int x0, int y0,
+ int x1, int y1) {
+
+ //Index of cell within domain
+ const int ti = blockDim.x*blockIdx.x + threadIdx.x + gc_x + x0;
+ const int tj = blockDim.y*blockIdx.y + threadIdx.y + gc_y + y0;
+
+ //In case we are writing only to a subarea given by (x0, y0) x (x1, y1)
+ const int max_ti = min(nx_+gc_x, x1+gc_x);
+ const int max_tj = min(ny_+gc_y, y1+gc_y);
+
+ //Only write internal cells
+ if ((x0+gc_x <= ti) && (ti < max_ti) && (y0+gc_y <= tj) && (tj < max_tj)) {
+ //Index of thread within block
+ const int tx = threadIdx.x + gc_x;
+ const int ty = threadIdx.y + gc_y;
+
+ float* const row = (float*) ((char*) ptr_ + pitch_*tj);
+
+ //Handle runge-kutta timestepping here
+ row[ti] = shmem[ty][tx];
+
+
+
+ /**
+ * SSPRK1 (forward Euler)
+ * u^1 = u^n + dt*f(u^n)
+ */
+ if (rk_order_ == 1) {
+ row[ti] = shmem[ty][tx];
+ }
+ /**
+ * SSPRK2
+ * u^1 = u^n + dt*f(u^n)
+ * u^n+1 = 1/2*u^n + 1/2*(u^1 + dt*f(u^1))
+ */
+ else if (rk_order_ == 2) {
+ if (rk_step_ == 0) {
+ row[ti] = shmem[ty][tx];
+ }
+ else if (rk_step_ == 1) {
+ row[ti] = 0.5f*row[ti] + 0.5f*shmem[ty][tx];
+ }
+ }
+ /**
+ * SSPRK3
+ * u^1 = u^n + dt*f(u^n)
+ * u^2 = 3/4 * u^n + 1/4 * (u^1 + dt*f(u^1))
+ * u^n+1 = 1/3 * u^n + 2/3 * (u^2 + dt*f(u^2))
+ * FIXME: This is not correct now, need a temporary to hold intermediate step u^2
+ */
+ else if (rk_order_ == 3) {
+ if (rk_step_ == 0) {
+ row[ti] = shmem[ty][tx];
+ }
+ else if (rk_step_ == 1) {
+ row[ti] = 0.75f*row[ti] + 0.25f*shmem[ty][tx];
+ }
+ else if (rk_step_ == 2) {
+ const float t = 1.0f / 3.0f; //Not representable in base 2
+ row[ti] = t*row[ti] + (1.0f-t)*shmem[ty][tx];
+ }
+ }
+
+ // DEBUG
+ //row[ti] = 99.0;
+ }
+}
+
+
+
+
+
+
+
+
+
+
+
+template
+__device__ void evolveF(float Q[vars][h+2*gc_y][w+2*gc_x],
+ float F[vars][h+2*gc_y][w+2*gc_x],
+ const float dx_, const float dt_) {
+ for (int var=0; var < vars; ++var) {
+ for (int j=threadIdx.y; j
+__device__ void evolveG(float Q[vars][h+2*gc_y][w+2*gc_x],
+ float G[vars][h+2*gc_y][w+2*gc_x],
+ const float dy_, const float dt_) {
+ for (int var=0; var < vars; ++var) {
+ for (int j=threadIdx.y+gc_y; j
+__device__ void memset(float Q[vars][shmem_height][shmem_width], float value) {
+ for (int k=0; k
+__device__ void reduce_max(float* data, unsigned int n) {
+ __shared__ float sdata[threads];
+ unsigned int tid = threadIdx.x;
+
+ //Reduce to "threads" elements
+ sdata[tid] = FLT_MIN;
+ for (unsigned int i=tid; i= 512) {
+ if (tid < 256) {
+ sdata[tid] = max(sdata[tid], sdata[tid + 256]);
+ }
+ __syncthreads();
+ }
+ if (threads >= 256) {
+ if (tid < 128) {
+ sdata[tid] = max(sdata[tid], sdata[tid + 128]);
+ }
+ __syncthreads();
+ }
+ if (threads >= 128) {
+ if (tid < 64) {
+ sdata[tid] = max(sdata[tid], sdata[tid + 64]);
+ }
+ __syncthreads();
+ }
+ if (tid < 32) {
+ volatile float* sdata_volatile = sdata;
+ if (threads >= 64) {
+ sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 32]);
+ }
+ if (tid < 16) {
+ if (threads >= 32) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 16]);
+ if (threads >= 16) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 8]);
+ if (threads >= 8) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 4]);
+ if (threads >= 4) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 2]);
+ if (threads >= 2) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 1]);
+ }
+
+ if (tid == 0) {
+ return sdata_volatile[0];
+ }
+ }
+}
+
+
+
+
+
+
+
+
+
+
diff --git a/GPUSimulators/cuda/limiters.h b/GPUSimulators/cuda/limiters.h
new file mode 100644
index 0000000..c2effa7
--- /dev/null
+++ b/GPUSimulators/cuda/limiters.h
@@ -0,0 +1,118 @@
+/*
+This file implements different flux and slope limiters
+
+Copyright (C) 2016, 2017, 2018 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+*/
+
+#pragma once
+
+
+
+
+
+
+/**
+ * Reconstructs a slope using the generalized minmod limiter based on three
+ * consecutive values
+ */
+__device__ __inline__ float minmodSlope(float left, float center, float right, float theta) {
+ const float backward = (center - left) * theta;
+ const float central = (right - left) * 0.5f;
+ const float forward = (right - center) * theta;
+
+ return 0.25f
+ *copysign(1.0f, backward)
+ *(copysign(1.0f, backward) + copysign(1.0f, central))
+ *(copysign(1.0f, central) + copysign(1.0f, forward))
+ *min( min(fabs(backward), fabs(central)), fabs(forward) );
+}
+
+
+
+
+/**
+ * Reconstructs a minmod slope for a whole block along the abscissa
+ */
+template
+__device__ void minmodSlopeX(float Q[vars][h+2*gc_y][w+2*gc_x],
+ float Qx[vars][h+2*gc_y][w+2*gc_x],
+ const float theta_) {
+ //Reconstruct slopes along x axis
+ for (int p=0; p
+__device__ void minmodSlopeY(float Q[vars][h+2*gc_y][w+2*gc_x],
+ float Qy[vars][h+2*gc_y][w+2*gc_x],
+ const float theta_) {
+ //Reconstruct slopes along y axis
+ for (int p=0; p.
+"""
+
+
+from GPUSimulators.Simulator import BoundaryCondition
+import numpy as np
+import gc
+
+
+def getExtent(width, height, nx, ny, grid, index=None):
+ if grid is not None:
+ gx = grid.grid[0]
+ gy = grid.grid[1]
+ if index is not None:
+ i, j = grid.getCoordinate(index)
+ else:
+ i, j = grid.getCoordinate()
+
+ dx = (width / gx) / nx
+ dy = (height / gy) / ny
+
+ x0 = width*i/gx + 0.5*dx
+ y0 = height*j/gy + 0.5*dy
+ x1 = width*(i+1)/gx - 0.5*dx
+ y1 = height*(j+1)/gy - 0.5*dx
+
+ else:
+ dx = width / nx
+ dy = height / ny
+
+ x0 = 0.5*dx
+ y0 = 0.5*dy
+ x1 = width-0.5*dx
+ y1 = height-0.5*dy
+
+ return [x0, x1, y0, y1, dx, dy]
+
+
+def downsample(highres_solution, x_factor, y_factor=None):
+ if (y_factor == None):
+ y_factor = x_factor
+
+ assert(highres_solution.shape[1] % x_factor == 0)
+ assert(highres_solution.shape[0] % y_factor == 0)
+
+ if (x_factor*y_factor == 1):
+ return highres_solution
+
+ if (len(highres_solution.shape) == 1):
+ highres_solution = highres_solution.reshape((1, highres_solution.size))
+
+ nx = highres_solution.shape[1] / x_factor
+ ny = highres_solution.shape[0] / y_factor
+
+ return highres_solution.reshape([int(ny), int(y_factor), int(nx), int(x_factor)]).mean(3).mean(1)
+
+
+
+
+
+def bump(nx, ny, width, height,
+ bump_size=None,
+ ref_nx=None, ref_ny=None,
+ x_center=0.5, y_center=0.5,
+ h_ref=0.5, h_amp=0.1, u_ref=0.0, u_amp=0.1, v_ref=0.0, v_amp=0.1):
+
+ if (ref_nx == None):
+ ref_nx = nx
+ assert(ref_nx >= nx)
+
+ if (ref_ny == None):
+ ref_ny = ny
+ assert(ref_ny >= ny)
+
+ if (bump_size == None):
+ bump_size = width/5.0
+
+ ref_dx = width / float(ref_nx)
+ ref_dy = height / float(ref_ny)
+
+ x_center = ref_dx*ref_nx*x_center
+ y_center = ref_dy*ref_ny*y_center
+
+ x = ref_dx*(np.arange(0, ref_nx, dtype=np.float32)+0.5) - x_center
+ y = ref_dy*(np.arange(0, ref_ny, dtype=np.float32)+0.5) - y_center
+ xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
+ r = np.sqrt(xv**2 + yv**2)
+ xv = None
+ yv = None
+ gc.collect()
+
+ #Generate highres then downsample
+ #h_highres = 0.5 + 0.1*np.exp(-(xv**2/size + yv**2/size))
+ h_highres = h_ref + h_amp*0.5*(1.0 + np.cos(np.pi*r/bump_size)) * (r < bump_size)
+ h = downsample(h_highres, ref_nx/nx, ref_ny/ny)
+ h_highres = None
+ gc.collect()
+
+ #hu_highres = 0.1*np.exp(-(xv**2/size + yv**2/size))
+ u_highres = u_ref + u_amp*0.5*(1.0 + np.cos(np.pi*r/bump_size)) * (r < bump_size)
+ hu = downsample(u_highres, ref_nx/nx, ref_ny/ny)*h
+ u_highres = None
+ gc.collect()
+
+ #hu_highres = 0.1*np.exp(-(xv**2/size + yv**2/size))
+ v_highres = v_ref + v_amp*0.5*(1.0 + np.cos(np.pi*r/bump_size)) * (r < bump_size)
+ hv = downsample(v_highres, ref_nx/nx, ref_ny/ny)*h
+ v_highres = None
+ gc.collect()
+
+ dx = width/nx
+ dy = height/ny
+
+ return h, hu, hv, dx, dy
+
+
+def genShockBubble(nx, ny, gamma, grid=None):
+ """
+ Generate Shock-bubble interaction case for the Euler equations
+ """
+
+ width = 4.0
+ height = 1.0
+ g = 0.0
+
+
+ rho = np.ones((ny, nx), dtype=np.float32)
+ u = np.zeros((ny, nx), dtype=np.float32)
+ v = np.zeros((ny, nx), dtype=np.float32)
+ E = np.zeros((ny, nx), dtype=np.float32)
+ p = np.ones((ny, nx), dtype=np.float32)
+
+
+ x0, x1, y0, y1, dx, dy = getExtent(width, height, nx, ny, grid)
+ x = np.linspace(x0, x1, nx, dtype=np.float32)
+ y = np.linspace(y0, y1, ny, dtype=np.float32)
+ xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
+
+ #Bubble
+ radius = 0.25
+ x_center = 0.5
+ y_center = 0.5
+ bubble = np.sqrt((xv-x_center)**2+(yv-y_center)**2) <= radius
+ rho = np.where(bubble, 0.1, rho)
+
+ #Left boundary
+ left = (xv < 0.1)
+ rho = np.where(left, 3.81250, rho)
+ u = np.where(left, 2.57669, u)
+
+ #Energy
+ p = np.where(left, 10.0, p)
+ E = 0.5*rho*(u**2+v**2) + p/(gamma-1.0)
+
+
+ bc = BoundaryCondition({
+ 'north': BoundaryCondition.Type.Reflective,
+ 'south': BoundaryCondition.Type.Reflective,
+ 'east': BoundaryCondition.Type.Periodic,
+ 'west': BoundaryCondition.Type.Periodic
+ })
+
+ #Construct simulator
+ arguments = {
+ 'rho': rho, 'rho_u': rho*u, 'rho_v': rho*v, 'E': E,
+ 'nx': nx, 'ny': ny,
+ 'dx': dx, 'dy': dy,
+ 'g': g,
+ 'gamma': gamma,
+ 'boundary_conditions': bc
+ }
+ return arguments
+
+
+
+
+
+
+
+def genKelvinHelmholtz(nx, ny, gamma, roughness=0.125, grid=None, index=None):
+ """
+ Roughness parameter in (0, 1.0] determines how "squiggly"
+ the interface betweeen the zones is
+ """
+
+ def genZones(nx, ny, n):
+ """
+ Generates the zones of the two fluids of K-H
+ """
+ zone = np.zeros((ny, nx), dtype=np.int32)
+
+
+ def genSmoothRandom(nx, n):
+ n = max(1, min(n, nx))
+
+ if n == nx:
+ return np.random.random(nx)-0.5
+ else:
+ from scipy.interpolate import interp1d
+
+ #Control points and interpolator
+ xp = np.linspace(0.0, 1.0, n)
+ yp = np.random.random(n) - 0.5
+
+ if (n == 1):
+ kind = 'nearest'
+ elif (n == 2):
+ kind = 'linear'
+ elif (n == 3):
+ kind = 'quadratic'
+ else:
+ kind = 'cubic'
+
+ f = interp1d(xp, yp, kind=kind)
+
+ #Interpolation points
+ x = np.linspace(0.0, 1.0, nx)
+ return f(x)
+
+
+
+ x0, x1, y0, y1, _, dy = getExtent(1.0, 1.0, nx, ny, grid, index)
+ x = np.linspace(x0, x1, nx)
+ y = np.linspace(y0, y1, ny)
+ _, y = np.meshgrid(x, y)
+
+ #print(y+a[0])
+
+ a = genSmoothRandom(nx, n)*dy
+ zone = np.where(y > 0.25+a, zone, 1)
+
+ a = genSmoothRandom(nx, n)*dy
+ zone = np.where(y < 0.75+a, zone, 1)
+
+ return zone
+
+ width = 2.0
+ height = 1.0
+ g = 0.0
+ gamma = 1.4
+
+ rho = np.empty((ny, nx), dtype=np.float32)
+ u = np.empty((ny, nx), dtype=np.float32)
+ v = np.zeros((ny, nx), dtype=np.float32)
+ p = 2.5*np.ones((ny, nx), dtype=np.float32)
+
+ #Generate the different zones
+ zones = genZones(nx, ny, max(1, min(nx, int(nx*roughness))))
+
+ #Zone 0
+ zone0 = zones == 0
+ rho = np.where(zone0, 1.0, rho)
+ u = np.where(zone0, 0.5, u)
+
+ #Zone 1
+ zone1 = zones == 1
+ rho = np.where(zone1, 2.0, rho)
+ u = np.where(zone1, -0.5, u)
+
+ E = 0.5*rho*(u**2+v**2) + p/(gamma-1.0)
+
+ _, _, _, _, dx, dy = getExtent(width, height, nx, ny, grid, index)
+
+
+ bc = BoundaryCondition({
+ 'north': BoundaryCondition.Type.Periodic,
+ 'south': BoundaryCondition.Type.Periodic,
+ 'east': BoundaryCondition.Type.Periodic,
+ 'west': BoundaryCondition.Type.Periodic
+ })
+
+ #Construct simulator
+ arguments = {
+ 'rho': rho, 'rho_u': rho*u, 'rho_v': rho*v, 'E': E,
+ 'nx': nx, 'ny': ny,
+ 'dx': dx, 'dy': dy,
+ 'g': g,
+ 'gamma': gamma,
+ 'boundary_conditions': bc
+ }
+
+ return arguments
+
+
+
+def genRayleighTaylor(nx, ny, gamma, version=0, grid=None):
+ """
+ Generates Rayleigh-Taylor instability case
+ """
+ width = 0.5
+ height = 1.5
+ g = 0.1
+
+ rho = np.zeros((ny, nx), dtype=np.float32)
+ u = np.zeros((ny, nx), dtype=np.float32)
+ v = np.zeros((ny, nx), dtype=np.float32)
+ p = np.zeros((ny, nx), dtype=np.float32)
+
+
+ x0, x1, y0, y1, dx, dy = getExtent(width, height, nx, ny, grid)
+ x = np.linspace(x0, x1, nx, dtype=np.float32)-width*0.5
+ y = np.linspace(y0, y1, ny, dtype=np.float32)-height*0.5
+ xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
+
+ #This gives a squigly interfact
+ if (version == 0):
+ y_threshold = 0.01*np.cos(2*np.pi*np.abs(x)/0.5)
+ rho = np.where(yv <= y_threshold, 1.0, rho)
+ rho = np.where(yv > y_threshold, 2.0, rho)
+ elif (version == 1):
+ rho = np.where(yv <= 0.0, 1.0, rho)
+ rho = np.where(yv > 0.0, 2.0, rho)
+ v = 0.01*(1.0 + np.cos(2*np.pi*xv/0.5))/4
+ else:
+ assert False, "Invalid version"
+
+ p = 2.5 - rho*g*yv
+ E = 0.5*rho*(u**2+v**2) + p/(gamma-1.0)
+
+ bc = BoundaryCondition({
+ 'north': BoundaryCondition.Type.Reflective,
+ 'south': BoundaryCondition.Type.Reflective,
+ 'east': BoundaryCondition.Type.Reflective,
+ 'west': BoundaryCondition.Type.Reflective
+ })
+
+ #Construct simulator
+ arguments = {
+ 'rho': rho, 'rho_u': rho*u, 'rho_v': rho*v, 'E': E,
+ 'nx': nx, 'ny': ny,
+ 'dx': dx, 'dy': dy,
+ 'g': g,
+ 'gamma': gamma,
+ 'boundary_conditions': bc
+ }
+
+ return arguments
\ No newline at end of file
diff --git a/GPUSimulators/helpers/Visualization.py b/GPUSimulators/helpers/Visualization.py
new file mode 100644
index 0000000..a2ff8f1
--- /dev/null
+++ b/GPUSimulators/helpers/Visualization.py
@@ -0,0 +1,61 @@
+# -*- coding: utf-8 -*-
+
+"""
+This python module implements visualization techniques/modes
+
+Copyright (C) 2018 SINTEF ICT
+
+This program is free software: you can redistribute it and/or modify
+it under the terms of the GNU General Public License as published by
+the Free Software Foundation, either version 3 of the License, or
+(at your option) any later version.
+
+This program is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+GNU General Public License for more details.
+
+You should have received a copy of the GNU General Public License
+along with this program. If not, see .
+"""
+
+
+
+import numpy as np
+
+from matplotlib.colors import Normalize
+
+
+
+def genSchlieren(rho):
+ #Compute length of z-component of normalized gradient vector
+ normal = np.gradient(rho) #[x, y, 1]
+ length = 1.0 / np.sqrt(normal[0]**2 + normal[1]**2 + 1.0)
+ schlieren = np.power(length, 128)
+ return schlieren
+
+
+def genVorticity(rho, rho_u, rho_v):
+ u = rho_u / rho
+ v = rho_v / rho
+ u = np.sqrt(u**2 + v**2)
+ u_max = u.max()
+
+ du_dy, _ = np.gradient(u)
+ _, dv_dx = np.gradient(v)
+
+ #Length of curl
+ curl = dv_dx - du_dy
+ return curl
+
+
+def genColors(rho, rho_u, rho_v, cmap, vmax, vmin):
+ schlieren = genSchlieren(rho)
+ curl = genVorticity(rho, rho_u, rho_v)
+
+ colors = Normalize(vmin, vmax, clip=True)(curl)
+ colors = cmap(colors)
+ for k in range(3):
+ colors[:,:,k] = colors[:,:,k]*schlieren
+
+ return colors
\ No newline at end of file