mirror of
https://github.com/smyalygames/FiniteVolumeGPU_HIP.git
synced 2025-05-18 06:24:11 +02:00
update streams with hip-python
This commit is contained in:
parent
e2b1281f5b
commit
80ffaf9b44
@ -1,304 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the different helper functions and
|
||||
classes
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
import os
|
||||
import gc
|
||||
import numpy as np
|
||||
import logging
|
||||
from socket import gethostname
|
||||
|
||||
#import pycuda.driver as cuda
|
||||
from hip import hip,hiprtc
|
||||
|
||||
from GPUSimulators import Common, Simulator, CudaContext
|
||||
|
||||
class Autotuner:
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
nx=2048, ny=2048,
|
||||
block_widths=range(8, 32, 1),
|
||||
block_heights=range(8, 32, 1)):
|
||||
logger = logging.getLogger(__name__)
|
||||
self.filename = "autotuning_data_" + gethostname() + ".npz"
|
||||
self.nx = nx
|
||||
self.ny = ny
|
||||
self.block_widths = block_widths
|
||||
self.block_heights = block_heights
|
||||
self.performance = {}
|
||||
|
||||
|
||||
def benchmark(self, simulator, force=False):
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
#Run through simulators and benchmark
|
||||
key = str(simulator.__name__)
|
||||
logger.info("Benchmarking %s to %s", key, self.filename)
|
||||
|
||||
#If this simulator has been benchmarked already, skip it
|
||||
if (force==False and os.path.isfile(self.filename)):
|
||||
with np.load(self.filename) as data:
|
||||
if key in data["simulators"]:
|
||||
logger.info("%s already benchmarked - skipping", key)
|
||||
return
|
||||
|
||||
# Set arguments to send to the simulators during construction
|
||||
context = CudaContext.CudaContext(autotuning=False)
|
||||
g = 9.81
|
||||
h0, hu0, hv0, dx, dy, dt = Autotuner.gen_test_data(nx=self.nx, ny=self.ny, g=g)
|
||||
arguments = {
|
||||
'context': context,
|
||||
'h0': h0, 'hu0': hu0, 'hv0': hv0,
|
||||
'nx': self.nx, 'ny': self.ny,
|
||||
'dx': dx, 'dy': dy, 'dt': 0.9*dt,
|
||||
'g': g
|
||||
}
|
||||
|
||||
# Load existing data into memory
|
||||
benchmark_data = {
|
||||
"simulators": [],
|
||||
}
|
||||
if (os.path.isfile(self.filename)):
|
||||
with np.load(self.filename) as data:
|
||||
for k, v in data.items():
|
||||
benchmark_data[k] = v
|
||||
|
||||
# Run benchmark
|
||||
benchmark_data[key + "_megacells"] = Autotuner.benchmark_single_simulator(simulator, arguments, self.block_widths, self.block_heights)
|
||||
benchmark_data[key + "_block_widths"] = self.block_widths
|
||||
benchmark_data[key + "_block_heights"] = self.block_heights
|
||||
benchmark_data[key + "_arguments"] = str(arguments)
|
||||
|
||||
existing_sims = benchmark_data["simulators"]
|
||||
if (isinstance(existing_sims, np.ndarray)):
|
||||
existing_sims = existing_sims.tolist()
|
||||
if (key not in existing_sims):
|
||||
benchmark_data["simulators"] = existing_sims + [key]
|
||||
|
||||
# Save to file
|
||||
np.savez_compressed(self.filename, **benchmark_data)
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Function which reads a numpy file with autotuning data
|
||||
and reports the maximum performance and block size
|
||||
"""
|
||||
def get_peak_performance(self, simulator):
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
assert issubclass(simulator, Simulator.BaseSimulator)
|
||||
key = simulator.__name__
|
||||
|
||||
if (key in self.performance):
|
||||
return self.performance[key]
|
||||
else:
|
||||
#Run simulation if required
|
||||
if (not os.path.isfile(self.filename)):
|
||||
logger.debug("Could not get autotuned peak performance for %s: benchmarking", key)
|
||||
self.benchmark(simulator)
|
||||
|
||||
with np.load(self.filename) as data:
|
||||
if key not in data['simulators']:
|
||||
logger.debug("Could not get autotuned peak performance for %s: benchmarking", key)
|
||||
data.close()
|
||||
self.benchmark(simulator)
|
||||
data = np.load(self.filename)
|
||||
|
||||
def find_max_index(megacells):
|
||||
max_index = np.nanargmax(megacells)
|
||||
return np.unravel_index(max_index, megacells.shape)
|
||||
|
||||
megacells = data[key + '_megacells']
|
||||
block_widths = data[key + '_block_widths']
|
||||
block_heights = data[key + '_block_heights']
|
||||
j, i = find_max_index(megacells)
|
||||
|
||||
self.performance[key] = { "block_width": block_widths[i],
|
||||
"block_height": block_heights[j],
|
||||
"megacells": megacells[j, i] }
|
||||
logger.debug("Returning %s as peak performance parameters", self.performance[key])
|
||||
return self.performance[key]
|
||||
|
||||
#This should never happen
|
||||
raise "Something wrong: Could not get autotuning data!"
|
||||
return None
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Runs a set of benchmarks for a single simulator
|
||||
"""
|
||||
def benchmark_single_simulator(simulator, arguments, block_widths, block_heights):
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
megacells = np.empty((len(block_heights), len(block_widths)))
|
||||
megacells.fill(np.nan)
|
||||
|
||||
logger.debug("Running %d benchmarks with %s", len(block_heights)*len(block_widths), simulator.__name__)
|
||||
|
||||
sim_arguments = arguments.copy()
|
||||
|
||||
with Common.Timer(simulator.__name__) as t:
|
||||
for j, block_height in enumerate(block_heights):
|
||||
sim_arguments.update({'block_height': block_height})
|
||||
for i, block_width in enumerate(block_widths):
|
||||
sim_arguments.update({'block_width': block_width})
|
||||
megacells[j, i] = Autotuner.run_benchmark(simulator, sim_arguments)
|
||||
|
||||
|
||||
logger.debug("Completed %s in %f seconds", simulator.__name__, t.secs)
|
||||
|
||||
return megacells
|
||||
|
||||
|
||||
"""
|
||||
Runs a benchmark, and returns the number of megacells achieved
|
||||
"""
|
||||
def run_benchmark(simulator, arguments, timesteps=10, warmup_timesteps=2):
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
#Initialize simulator
|
||||
try:
|
||||
sim = simulator(**arguments)
|
||||
except:
|
||||
#An exception raised - not possible to continue
|
||||
logger.debug("Failed creating %s with arguments %s", simulator.__name__, str(arguments))
|
||||
return np.nan
|
||||
|
||||
#Create timer events
|
||||
#start = cuda.Event()
|
||||
#end = cuda.Event()
|
||||
stream = hip_check(hip.hipStreamCreate())
|
||||
|
||||
start = hip_check(hip.hipEventCreate())
|
||||
end = hip_check(hip.hipEventCreate())
|
||||
|
||||
#Warmup
|
||||
for i in range(warmup_timesteps):
|
||||
sim.stepEuler(sim.dt)
|
||||
|
||||
#Run simulation with timer
|
||||
#start.record(sim.stream)
|
||||
#start recording
|
||||
hip_check(hip.hipEventRecord(start, stream))
|
||||
for i in range(timesteps):
|
||||
sim.stepEuler(sim.dt)
|
||||
#end.record(sim.stream)
|
||||
#stop recording and synchronize
|
||||
hip_check(hip.hipEventRecord(end, stream))
|
||||
|
||||
#Synchronize end event
|
||||
#end.synchronize()
|
||||
hip_check(hip.hipEventSynchronize(end))
|
||||
|
||||
#Compute megacells
|
||||
#gpu_elapsed = end.time_since(start)*1.0e-3
|
||||
gpu_elapsed = hip_check(hip.hipEventElapsedTime(start, end))
|
||||
|
||||
megacells = (sim.nx*sim.ny*timesteps / (1000*1000)) / gpu_elapsed
|
||||
|
||||
#Sanity check solution
|
||||
h, hu, hv = sim.download()
|
||||
sane = True
|
||||
sane = sane and Autotuner.sanity_check(h, 0.3, 0.7)
|
||||
sane = sane and Autotuner.sanity_check(hu, -0.2, 0.2)
|
||||
sane = sane and Autotuner.sanity_check(hv, -0.2, 0.2)
|
||||
|
||||
if (sane):
|
||||
logger.debug("%s [%d x %d] succeeded: %f megacells, gpu elapsed %f", simulator.__name__, arguments["block_width"], arguments["block_height"], megacells, gpu_elapsed)
|
||||
return megacells
|
||||
else:
|
||||
logger.debug("%s [%d x %d] failed: gpu elapsed %f", simulator.__name__, arguments["block_width"], arguments["block_height"], gpu_elapsed)
|
||||
return np.nan
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Generates test dataset
|
||||
"""
|
||||
def gen_test_data(nx, ny, g):
|
||||
width = 100.0
|
||||
height = 100.0
|
||||
dx = width / float(nx)
|
||||
dy = height / float(ny)
|
||||
|
||||
x_center = dx*nx/2.0
|
||||
y_center = dy*ny/2.0
|
||||
|
||||
#Create a gaussian "dam break" that will not form shocks
|
||||
size = width / 5.0
|
||||
dt = 10**10
|
||||
|
||||
h = np.zeros((ny, nx), dtype=np.float32);
|
||||
hu = np.zeros((ny, nx), dtype=np.float32);
|
||||
hv = np.zeros((ny, nx), dtype=np.float32);
|
||||
|
||||
extent = 1.0/np.sqrt(2.0)
|
||||
x = (dx*(np.arange(0, nx, dtype=np.float32)+0.5) - x_center) / size
|
||||
y = (dy*(np.arange(0, ny, dtype=np.float32)+0.5) - y_center) / size
|
||||
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
|
||||
r = np.minimum(1.0, np.sqrt(xv**2 + yv**2))
|
||||
xv = None
|
||||
yv = None
|
||||
gc.collect()
|
||||
|
||||
#Generate highres
|
||||
cos = np.cos(np.pi*r)
|
||||
h = 0.5 + 0.1*0.5*(1.0 + cos)
|
||||
hu = 0.1*0.5*(1.0 + cos)
|
||||
hv = hu.copy()
|
||||
|
||||
scale = 0.7
|
||||
max_h_estimate = 0.6
|
||||
max_u_estimate = 0.1*np.sqrt(2.0)
|
||||
dx = width/nx
|
||||
dy = height/ny
|
||||
dt = scale * min(dx, dy) / (max_u_estimate + np.sqrt(g*max_h_estimate))
|
||||
|
||||
return h, hu, hv, dx, dy, dt
|
||||
|
||||
"""
|
||||
Checks that a variable is "sane"
|
||||
"""
|
||||
def sanity_check(variable, bound_min, bound_max):
|
||||
maxval = np.amax(variable)
|
||||
minval = np.amin(variable)
|
||||
if (np.isnan(maxval)
|
||||
or np.isnan(minval)
|
||||
or maxval > bound_max
|
||||
or minval < bound_min):
|
||||
return False
|
||||
else:
|
||||
return True
|
@ -1,879 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the different helper functions and
|
||||
classes
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import time
|
||||
import signal
|
||||
import subprocess
|
||||
import tempfile
|
||||
import re
|
||||
import io
|
||||
import hashlib
|
||||
import logging
|
||||
import gc
|
||||
import netCDF4
|
||||
import json
|
||||
|
||||
#import pycuda.compiler as cuda_compiler
|
||||
#import pycuda.gpuarray
|
||||
#import pycuda.driver as cuda
|
||||
#from pycuda.tools import PageLockedMemoryPool
|
||||
|
||||
from hip import hip, hiprtc
|
||||
from hip import hipblas
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def safeCall(cmd):
|
||||
logger = logging.getLogger(__name__)
|
||||
try:
|
||||
#git rev-parse HEAD
|
||||
current_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
params = dict()
|
||||
params['stderr'] = subprocess.STDOUT
|
||||
params['cwd'] = current_dir
|
||||
params['universal_newlines'] = True #text=True in more recent python
|
||||
params['shell'] = False
|
||||
if os.name == 'nt':
|
||||
params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
stdout = subprocess.check_output(cmd, **params)
|
||||
except subprocess.CalledProcessError as e:
|
||||
output = e.output
|
||||
logger.error("Git failed, \nReturn code: " + str(e.returncode) + "\nOutput: " + output)
|
||||
raise e
|
||||
|
||||
return stdout
|
||||
|
||||
def getGitHash():
|
||||
return safeCall(["git", "rev-parse", "HEAD"])
|
||||
|
||||
def getGitStatus():
|
||||
return safeCall(["git", "status", "--porcelain", "-uno"])
|
||||
|
||||
def toJson(in_dict, compressed=True):
|
||||
"""
|
||||
Creates JSON string from a dictionary
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
out_dict = in_dict.copy()
|
||||
for key in out_dict:
|
||||
if isinstance(out_dict[key], np.ndarray):
|
||||
out_dict[key] = out_dict[key].tolist()
|
||||
else:
|
||||
try:
|
||||
json.dumps(out_dict[key])
|
||||
except:
|
||||
value = str(out_dict[key])
|
||||
logger.warning("JSON: Converting {:s} to string ({:s})".format(key, value))
|
||||
out_dict[key] = value
|
||||
return json.dumps(out_dict)
|
||||
|
||||
def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names=[], dt=None):
|
||||
"""
|
||||
Runs a simulation, and stores output in netcdf file. Stores the times given in
|
||||
save_times, and saves all of the variables in list save_var_names. Elements in
|
||||
save_var_names can be set to None if you do not want to save them
|
||||
"""
|
||||
profiling_data_sim_runner = { 'start': {}, 'end': {} }
|
||||
profiling_data_sim_runner["start"]["t_sim_init"] = 0
|
||||
profiling_data_sim_runner["end"]["t_sim_init"] = 0
|
||||
profiling_data_sim_runner["start"]["t_nc_write"] = 0
|
||||
profiling_data_sim_runner["end"]["t_nc_write"] = 0
|
||||
profiling_data_sim_runner["start"]["t_full_step"] = 0
|
||||
profiling_data_sim_runner["end"]["t_full_step"] = 0
|
||||
|
||||
profiling_data_sim_runner["start"]["t_sim_init"] = time.time()
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
assert len(save_times) > 0, "Need to specify which times to save"
|
||||
|
||||
with Timer("construct") as t:
|
||||
sim = simulator(**simulator_args)
|
||||
logger.info("Constructed in " + str(t.secs) + " seconds")
|
||||
|
||||
#Create netcdf file and simulate
|
||||
with DataDumper(outfile, mode='w', clobber=False) as outdata:
|
||||
|
||||
#Create attributes (metadata)
|
||||
outdata.ncfile.created = time.ctime(time.time())
|
||||
outdata.ncfile.git_hash = getGitHash()
|
||||
outdata.ncfile.git_status = getGitStatus()
|
||||
outdata.ncfile.simulator = str(simulator)
|
||||
|
||||
# do not write fields to attributes (they are to large)
|
||||
simulator_args_for_ncfile = simulator_args.copy()
|
||||
del simulator_args_for_ncfile["rho"]
|
||||
del simulator_args_for_ncfile["rho_u"]
|
||||
del simulator_args_for_ncfile["rho_v"]
|
||||
del simulator_args_for_ncfile["E"]
|
||||
outdata.ncfile.sim_args = toJson(simulator_args_for_ncfile)
|
||||
|
||||
#Create dimensions
|
||||
outdata.ncfile.createDimension('time', len(save_times))
|
||||
outdata.ncfile.createDimension('x', simulator_args['nx'])
|
||||
outdata.ncfile.createDimension('y', simulator_args['ny'])
|
||||
|
||||
#Create variables for dimensions
|
||||
ncvars = {}
|
||||
ncvars['time'] = outdata.ncfile.createVariable('time', np.dtype('float32').char, 'time')
|
||||
ncvars['x'] = outdata.ncfile.createVariable( 'x', np.dtype('float32').char, 'x')
|
||||
ncvars['y'] = outdata.ncfile.createVariable( 'y', np.dtype('float32').char, 'y')
|
||||
|
||||
#Fill variables with proper values
|
||||
ncvars['time'][:] = save_times
|
||||
extent = sim.getExtent()
|
||||
ncvars['x'][:] = np.linspace(extent[0], extent[1], simulator_args['nx'])
|
||||
ncvars['y'][:] = np.linspace(extent[2], extent[3], simulator_args['ny'])
|
||||
|
||||
#Choose which variables to download (prune None from list, but keep the index)
|
||||
download_vars = []
|
||||
for i, var_name in enumerate(save_var_names):
|
||||
if var_name is not None:
|
||||
download_vars += [i]
|
||||
save_var_names = list(save_var_names[i] for i in download_vars)
|
||||
|
||||
#Create variables
|
||||
for var_name in save_var_names:
|
||||
ncvars[var_name] = outdata.ncfile.createVariable(var_name, np.dtype('float32').char, ('time', 'y', 'x'), zlib=True, least_significant_digit=3)
|
||||
|
||||
#Create step sizes between each save
|
||||
t_steps = np.empty_like(save_times)
|
||||
t_steps[0] = save_times[0]
|
||||
t_steps[1:] = save_times[1:] - save_times[0:-1]
|
||||
|
||||
profiling_data_sim_runner["end"]["t_sim_init"] = time.time()
|
||||
|
||||
#Start simulation loop
|
||||
progress_printer = ProgressPrinter(save_times[-1], print_every=10)
|
||||
for k in range(len(save_times)):
|
||||
#Get target time and step size there
|
||||
t_step = t_steps[k]
|
||||
t_end = save_times[k]
|
||||
|
||||
#Sanity check simulator
|
||||
try:
|
||||
sim.check()
|
||||
except AssertionError as e:
|
||||
logger.error("Error after {:d} steps (t={:f}: {:s}".format(sim.simSteps(), sim.simTime(), str(e)))
|
||||
return outdata.filename
|
||||
|
||||
profiling_data_sim_runner["start"]["t_full_step"] += time.time()
|
||||
|
||||
#Simulate
|
||||
if (t_step > 0.0):
|
||||
sim.simulate(t_step, dt)
|
||||
|
||||
profiling_data_sim_runner["end"]["t_full_step"] += time.time()
|
||||
|
||||
profiling_data_sim_runner["start"]["t_nc_write"] += time.time()
|
||||
|
||||
#Download
|
||||
save_vars = sim.download(download_vars)
|
||||
|
||||
#Save to file
|
||||
for i, var_name in enumerate(save_var_names):
|
||||
ncvars[var_name][k, :] = save_vars[i]
|
||||
|
||||
profiling_data_sim_runner["end"]["t_nc_write"] += time.time()
|
||||
|
||||
#Write progress to screen
|
||||
print_string = progress_printer.getPrintString(t_end)
|
||||
if (print_string):
|
||||
logger.debug(print_string)
|
||||
|
||||
logger.debug("Simulated to t={:f} in {:d} timesteps (average dt={:f})".format(t_end, sim.simSteps(), sim.simTime() / sim.simSteps()))
|
||||
|
||||
return outdata.filename, profiling_data_sim_runner, sim.profiling_data_mpi
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class Timer(object):
|
||||
"""
|
||||
Class which keeps track of time spent for a section of code
|
||||
"""
|
||||
def __init__(self, tag, log_level=logging.DEBUG):
|
||||
self.tag = tag
|
||||
self.log_level = log_level
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
def __enter__(self):
|
||||
self.start = time.time()
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.end = time.time()
|
||||
self.secs = self.end - self.start
|
||||
self.msecs = self.secs * 1000 # millisecs
|
||||
self.logger.log(self.log_level, "%s: %f ms", self.tag, self.msecs)
|
||||
|
||||
def elapsed(self):
|
||||
return time.time() - self.start
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class PopenFileBuffer(object):
|
||||
"""
|
||||
Simple class for holding a set of tempfiles
|
||||
for communicating with a subprocess
|
||||
"""
|
||||
def __init__(self):
|
||||
self.stdout = tempfile.TemporaryFile(mode='w+t')
|
||||
self.stderr = tempfile.TemporaryFile(mode='w+t')
|
||||
|
||||
def __del__(self):
|
||||
self.stdout.close()
|
||||
self.stderr.close()
|
||||
|
||||
def read(self):
|
||||
self.stdout.seek(0)
|
||||
cout = self.stdout.read()
|
||||
self.stdout.seek(0, 2)
|
||||
|
||||
self.stderr.seek(0)
|
||||
cerr = self.stderr.read()
|
||||
self.stderr.seek(0, 2)
|
||||
|
||||
return cout, cerr
|
||||
|
||||
class IPEngine(object):
|
||||
"""
|
||||
Class for starting IPEngines for MPI processing in IPython
|
||||
"""
|
||||
def __init__(self, n_engines):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
#Start ipcontroller
|
||||
self.logger.info("Starting IPController")
|
||||
self.c_buff = PopenFileBuffer()
|
||||
c_cmd = ["ipcontroller", "--ip='*'"]
|
||||
c_params = dict()
|
||||
c_params['stderr'] = self.c_buff.stderr
|
||||
c_params['stdout'] = self.c_buff.stdout
|
||||
c_params['shell'] = False
|
||||
if os.name == 'nt':
|
||||
c_params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
self.c = subprocess.Popen(c_cmd, **c_params)
|
||||
|
||||
#Wait until controller is running
|
||||
time.sleep(3)
|
||||
|
||||
#Start engines
|
||||
self.logger.info("Starting IPEngines")
|
||||
self.e_buff = PopenFileBuffer()
|
||||
e_cmd = ["mpiexec", "-n", str(n_engines), "ipengine", "--mpi"]
|
||||
e_params = dict()
|
||||
e_params['stderr'] = self.e_buff.stderr
|
||||
e_params['stdout'] = self.e_buff.stdout
|
||||
e_params['shell'] = False
|
||||
if os.name == 'nt':
|
||||
e_params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
self.e = subprocess.Popen(e_cmd, **e_params)
|
||||
|
||||
# attach to a running cluster
|
||||
import ipyparallel
|
||||
self.cluster = ipyparallel.Client()#profile='mpi')
|
||||
time.sleep(3)
|
||||
while(len(self.cluster.ids) != n_engines):
|
||||
time.sleep(0.5)
|
||||
self.logger.info("Waiting for cluster...")
|
||||
self.cluster = ipyparallel.Client()#profile='mpi')
|
||||
|
||||
self.logger.info("Done")
|
||||
|
||||
def __del__(self):
|
||||
self.shutdown()
|
||||
|
||||
def shutdown(self):
|
||||
if (self.e is not None):
|
||||
if (os.name == 'nt'):
|
||||
self.logger.warn("Sending CTRL+C to IPEngine")
|
||||
self.e.send_signal(signal.CTRL_C_EVENT)
|
||||
|
||||
try:
|
||||
self.e.communicate(timeout=3)
|
||||
self.e.kill()
|
||||
except subprocess.TimeoutExpired:
|
||||
self.logger.warn("Killing IPEngine")
|
||||
self.e.kill()
|
||||
self.e.communicate()
|
||||
self.e = None
|
||||
|
||||
cout, cerr = self.e_buff.read()
|
||||
self.logger.info("IPEngine cout: {:s}".format(cout))
|
||||
self.logger.info("IPEngine cerr: {:s}".format(cerr))
|
||||
self.e_buff = None
|
||||
|
||||
gc.collect()
|
||||
|
||||
if (self.c is not None):
|
||||
if (os.name == 'nt'):
|
||||
self.logger.warn("Sending CTRL+C to IPController")
|
||||
self.c.send_signal(signal.CTRL_C_EVENT)
|
||||
|
||||
try:
|
||||
self.c.communicate(timeout=3)
|
||||
self.c.kill()
|
||||
except subprocess.TimeoutExpired:
|
||||
self.logger.warn("Killing IPController")
|
||||
self.c.kill()
|
||||
self.c.communicate()
|
||||
self.c = None
|
||||
|
||||
cout, cerr = self.c_buff.read()
|
||||
self.logger.info("IPController cout: {:s}".format(cout))
|
||||
self.logger.info("IPController cerr: {:s}".format(cerr))
|
||||
self.c_buff = None
|
||||
|
||||
gc.collect()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class DataDumper(object):
|
||||
"""
|
||||
Simple class for holding a netCDF4 object
|
||||
(handles opening and closing in a nice way)
|
||||
Use as
|
||||
with DataDumper("filename") as data:
|
||||
...
|
||||
"""
|
||||
def __init__(self, filename, *args, **kwargs):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
#Create directory if needed
|
||||
filename = os.path.abspath(filename)
|
||||
dirname = os.path.dirname(filename)
|
||||
if dirname and not os.path.isdir(dirname):
|
||||
self.logger.info("Creating directory " + dirname)
|
||||
os.makedirs(dirname)
|
||||
|
||||
#Get mode of file if we have that
|
||||
mode = None
|
||||
if (args):
|
||||
mode = args[0]
|
||||
elif (kwargs and 'mode' in kwargs.keys()):
|
||||
mode = kwargs['mode']
|
||||
|
||||
#Create new unique file if writing
|
||||
if (mode):
|
||||
if (("w" in mode) or ("+" in mode) or ("a" in mode)):
|
||||
i = 0
|
||||
stem, ext = os.path.splitext(filename)
|
||||
while (os.path.isfile(filename)):
|
||||
filename = "{:s}_{:04d}{:s}".format(stem, i, ext)
|
||||
i = i+1
|
||||
self.filename = os.path.abspath(filename)
|
||||
|
||||
#Save arguments
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
#Log output
|
||||
self.logger.info("Initialized " + self.filename)
|
||||
|
||||
|
||||
def __enter__(self):
|
||||
self.logger.info("Opening " + self.filename)
|
||||
if (self.args):
|
||||
self.logger.info("Arguments: " + str(self.args))
|
||||
if (self.kwargs):
|
||||
self.logger.info("Keyword arguments: " + str(self.kwargs))
|
||||
self.ncfile = netCDF4.Dataset(self.filename, *self.args, **self.kwargs)
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.logger.info("Closing " + self.filename)
|
||||
self.ncfile.close()
|
||||
|
||||
|
||||
def toJson(in_dict):
|
||||
out_dict = in_dict.copy()
|
||||
|
||||
for key in out_dict:
|
||||
if isinstance(out_dict[key], np.ndarray):
|
||||
out_dict[key] = out_dict[key].tolist()
|
||||
else:
|
||||
try:
|
||||
json.dumps(out_dict[key])
|
||||
except:
|
||||
out_dict[key] = str(out_dict[key])
|
||||
|
||||
return json.dumps(out_dict)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class ProgressPrinter(object):
|
||||
"""
|
||||
Small helper class for
|
||||
"""
|
||||
def __init__(self, total_steps, print_every=5):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.start = time.time()
|
||||
self.total_steps = total_steps
|
||||
self.print_every = print_every
|
||||
self.next_print_time = self.print_every
|
||||
self.last_step = 0
|
||||
self.secs_per_iter = None
|
||||
|
||||
def getPrintString(self, step):
|
||||
elapsed = time.time() - self.start
|
||||
if (elapsed > self.next_print_time):
|
||||
dt = elapsed - (self.next_print_time - self.print_every)
|
||||
dsteps = step - self.last_step
|
||||
steps_remaining = self.total_steps - step
|
||||
|
||||
if (dsteps == 0):
|
||||
return
|
||||
|
||||
self.last_step = step
|
||||
self.next_print_time = elapsed + self.print_every
|
||||
|
||||
if not self.secs_per_iter:
|
||||
self.secs_per_iter = dt / dsteps
|
||||
self.secs_per_iter = 0.2*self.secs_per_iter + 0.8*(dt / dsteps)
|
||||
|
||||
remaining_time = steps_remaining * self.secs_per_iter
|
||||
|
||||
return "{:s}. Total: {:s}, elapsed: {:s}, remaining: {:s}".format(
|
||||
ProgressPrinter.progressBar(step, self.total_steps),
|
||||
ProgressPrinter.timeString(elapsed + remaining_time),
|
||||
ProgressPrinter.timeString(elapsed),
|
||||
ProgressPrinter.timeString(remaining_time))
|
||||
|
||||
def timeString(seconds):
|
||||
seconds = int(max(seconds, 1))
|
||||
minutes, seconds = divmod(seconds, 60)
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
periods = [('h', hours), ('m', minutes), ('s', seconds)]
|
||||
time_string = ' '.join('{}{}'.format(value, name)
|
||||
for name, value in periods
|
||||
if value)
|
||||
return time_string
|
||||
|
||||
def progressBar(step, total_steps, width=30):
|
||||
progress = np.round(width * step / total_steps).astype(np.int32)
|
||||
progressbar = "0% [" + "#"*(progress) + "="*(width-progress) + "] 100%"
|
||||
return progressbar
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that holds 2D data
|
||||
"""
|
||||
class CudaArray2D:
|
||||
"""
|
||||
Uploads initial data to the CUDA device
|
||||
"""
|
||||
def __init__(self, stream, nx, ny, x_halo, y_halo, cpu_data=None, dtype=np.float32):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.nx = nx
|
||||
self.ny = ny
|
||||
self.x_halo = x_halo
|
||||
self.y_halo = y_halo
|
||||
|
||||
nx_halo = nx + 2*x_halo
|
||||
ny_halo = ny + 2*y_halo
|
||||
|
||||
#self.logger.debug("Allocating [%dx%d] buffer", self.nx, self.ny)
|
||||
#Should perhaps use pycuda.driver.mem_alloc_data.pitch() here
|
||||
#Initialize an array on GPU with zeros
|
||||
#self.data = pycuda.gpuarray.zeros((ny_halo, nx_halo), dtype)
|
||||
self.data_h = np.zeros((ny_halo, nx_halo), dtype="float32")
|
||||
num_bytes = self.data_h.size * self.data_h.itemsize
|
||||
|
||||
# init device array and upload host data
|
||||
self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(ny_halo, nx_halo))
|
||||
|
||||
# copy data from host to device
|
||||
hip_check(hip.hipMemcpy(self.data,self.data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
|
||||
|
||||
#For returning to download (No counterpart in hip-python)
|
||||
#self.memorypool = PageLockedMemoryPool()
|
||||
|
||||
#If we don't have any data, just allocate and return
|
||||
if cpu_data is None:
|
||||
return
|
||||
|
||||
#Make sure data is in proper format
|
||||
assert cpu_data.shape == (ny_halo, nx_halo) or cpu_data.shape == (self.ny, self.nx), "Wrong shape of data %s vs %s / %s" % (str(cpu_data.shape), str((self.ny, self.nx)), str((ny_halo, nx_halo)))
|
||||
assert cpu_data.itemsize == 4, "Wrong size of data type"
|
||||
assert not np.isfortran(cpu_data), "Wrong datatype (Fortran, expected C)"
|
||||
|
||||
#Create copy object from host to device
|
||||
x = (nx_halo - cpu_data.shape[1]) // 2
|
||||
y = (ny_halo - cpu_data.shape[0]) // 2
|
||||
self.upload(stream, cpu_data, extent=[x, y, cpu_data.shape[1], cpu_data.shape[0]])
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data.gpudata), self.nx, self.ny)
|
||||
|
||||
|
||||
def __del__(self, *args):
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data.gpudata), self.nx, self.ny)
|
||||
self.data.gpudata.free()
|
||||
self.data = None
|
||||
|
||||
"""
|
||||
Enables downloading data from GPU to Python
|
||||
"""
|
||||
def download(self, stream, cpu_data=None, asynch=False, extent=None):
|
||||
if (extent is None):
|
||||
x = self.x_halo
|
||||
y = self.y_halo
|
||||
nx = self.nx
|
||||
ny = self.ny
|
||||
else:
|
||||
x, y, nx, ny = extent
|
||||
|
||||
if (cpu_data is None):
|
||||
#self.logger.debug("Downloading [%dx%d] buffer", self.nx, self.ny)
|
||||
#Allocate host memory
|
||||
#The following fails, don't know why (crashes python)
|
||||
#cpu_data = cuda.pagelocked_empty((int(ny), int(nx)), dtype=np.float32, mem_flags=cuda.host_alloc_flags.PORTABLE)
|
||||
#see here type of memory: https://rocm.docs.amd.com/projects/hip-python/en/latest/python_api/hip.html#hip.hip.hipMemoryType
|
||||
cpu_data = np.empty((ny, nx), dtype=np.float32)
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
#hipHostMalloc allocates pinned host memory which is mapped into the address space of all GPUs in the system, the memory can #be accessed directly by the GPU device
|
||||
#hipHostMallocDefault:Memory is mapped and portable (default allocation)
|
||||
#hipHostMallocPortable: memory is explicitely portable across different devices
|
||||
cpu_data = hip_check(hip.hipHostMalloc(num_bytes,hip.hipHostMallocPortable))
|
||||
#Non-pagelocked: cpu_data = np.empty((ny, nx), dtype=np.float32)
|
||||
#cpu_data = self.memorypool.allocate((ny, nx), dtype=np.float32)
|
||||
|
||||
assert nx == cpu_data.shape[1]
|
||||
assert ny == cpu_data.shape[0]
|
||||
assert x+nx <= self.nx + 2*self.x_halo
|
||||
assert y+ny <= self.ny + 2*self.y_halo
|
||||
|
||||
#Create copy object from device to host
|
||||
#copy = cuda.Memcpy2D()
|
||||
#copy.set_src_device(self.data.gpudata)
|
||||
#copy.set_dst_host(cpu_data)
|
||||
|
||||
#Set offsets and pitch of source
|
||||
#copy.src_x_in_bytes = int(x)*self.data.strides[1]
|
||||
#copy.src_y = int(y)
|
||||
#copy.src_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
#copy.width_in_bytes = int(nx)*cpu_data.itemsize
|
||||
#copy.height = int(ny)
|
||||
|
||||
#The equivalent of cuda.Memcpy2D in hip-python would be: but it fails with an error pointing to cpu_data
|
||||
#and a message: "RuntimeError: hipError_t.hipErrorInvalidValue"
|
||||
#shape = (nx,ny)
|
||||
#num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
#dst_pitch_bytes = cpu_data.strides[0]
|
||||
#src_pitch_bytes = num_bytes // shape[0]
|
||||
#src_pitch_bytes = data.strides[0]
|
||||
#width_bytes = int(nx)*cpu_data.itemsize
|
||||
#height_Nrows = int(ny)
|
||||
#hipMemcpy2D(dst, unsigned long dpitch, src, unsigned long spitch, unsigned long width, unsigned long height, kind)
|
||||
#copy = hip_check(hip.hipMemcpy2D(cpu_data, #pointer to destination
|
||||
# dst_pitch_bytes, #pitch of destination array
|
||||
# data, #pointer to source
|
||||
# src_pitch_bytes, #pitch of source array
|
||||
# width_bytes, #number of bytes in each row
|
||||
# height_Nrows, #number of rows to copy
|
||||
# hip.hipMemcpyKind.hipMemcpyDeviceToHost)) #kind
|
||||
|
||||
#this is an alternative:
|
||||
#copy from device to host
|
||||
cpu_data = np.empty((ny, nx), dtype=np.float32)
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
#hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
|
||||
copy = hip_check(hip.hipMemcpy(cpu_data,self.data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
|
||||
|
||||
copy(stream)
|
||||
if asynch==False:
|
||||
stream.synchronize()
|
||||
|
||||
return cpu_data
|
||||
|
||||
|
||||
def upload(self, stream, cpu_data, extent=None):
|
||||
if (extent is None):
|
||||
x = self.x_halo
|
||||
y = self.y_halo
|
||||
nx = self.nx
|
||||
ny = self.ny
|
||||
else:
|
||||
x, y, nx, ny = extent
|
||||
|
||||
assert(nx == cpu_data.shape[1])
|
||||
assert(ny == cpu_data.shape[0])
|
||||
assert(x+nx <= self.nx + 2*self.x_halo)
|
||||
assert(y+ny <= self.ny + 2*self.y_halo)
|
||||
|
||||
#Create copy object from device to host
|
||||
#Well this copy from src:host to dst:device AND NOT from device to host
|
||||
#copy = cuda.Memcpy2D()
|
||||
#copy.set_dst_device(self.data.gpudata)
|
||||
#copy.set_src_host(cpu_data)
|
||||
|
||||
#Set offsets and pitch of source
|
||||
#copy.dst_x_in_bytes = int(x)*self.data.strides[1]
|
||||
#copy.dst_y = int(y)
|
||||
#copy.dst_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
#copy.width_in_bytes = int(nx)*cpu_data.itemsize
|
||||
#copy.height = int(ny)
|
||||
|
||||
#copy from host de device
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=cpu_data.shape)
|
||||
#hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
|
||||
copy = hip_check(hip.hipMemcpy(self.data,cpu_data,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
|
||||
|
||||
copy(stream)
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that holds 2D data
|
||||
"""
|
||||
class CudaArray3D:
|
||||
"""
|
||||
Uploads initial data to the CL device
|
||||
"""
|
||||
def __init__(self, stream, nx, ny, nz, x_halo, y_halo, z_halo, cpu_data=None, dtype=np.float32):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.nx = nx
|
||||
self.ny = ny
|
||||
self.nz = nz
|
||||
self.x_halo = x_halo
|
||||
self.y_halo = y_halo
|
||||
self.z_halo = z_halo
|
||||
|
||||
nx_halo = nx + 2*x_halo
|
||||
ny_halo = ny + 2*y_halo
|
||||
nz_halo = nz + 2*z_halo
|
||||
|
||||
#self.logger.debug("Allocating [%dx%dx%d] buffer", self.nx, self.ny, self.nz)
|
||||
#Should perhaps use pycuda.driver.mem_alloc_data.pitch() here
|
||||
#self.data = pycuda.gpuarray.zeros((nz_halo, ny_halo, nx_halo), dtype)
|
||||
|
||||
self.data_h = np.zeros((nz_halo, ny_halo, nx_halo), dtype="float32")
|
||||
num_bytes = self.data_h.size * self.data_h.itemsize
|
||||
|
||||
# init device array and upload host data
|
||||
self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(nz_halo, ny_halo, nx_halo))
|
||||
|
||||
# copy data from host to device
|
||||
hip_check(hip.hipMemcpy(self.data,self.data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
|
||||
|
||||
#For returning to download
|
||||
#self.memorypool = PageLockedMemoryPool()
|
||||
|
||||
#If we don't have any data, just allocate and return
|
||||
if cpu_data is None:
|
||||
return
|
||||
|
||||
#Make sure data is in proper format
|
||||
assert cpu_data.shape == (nz_halo, ny_halo, nx_halo) or cpu_data.shape == (self.nz, self.ny, self.nx), "Wrong shape of data %s vs %s / %s" % (str(cpu_data.shape), str((self.nz, self.ny, self.nx)), str((nz_halo, ny_halo, nx_halo)))
|
||||
assert cpu_data.itemsize == 4, "Wrong size of data type"
|
||||
assert not np.isfortran(cpu_data), "Wrong datatype (Fortran, expected C)"
|
||||
|
||||
#Create copy object from host to device
|
||||
#copy = cuda.Memcpy3D()
|
||||
#copy.set_src_host(cpu_data)
|
||||
#copy.set_dst_device(self.data.gpudata)
|
||||
|
||||
#Set offsets of destination
|
||||
#x_offset = (nx_halo - cpu_data.shape[2]) // 2
|
||||
#y_offset = (ny_halo - cpu_data.shape[1]) // 2
|
||||
#z_offset = (nz_halo - cpu_data.shape[0]) // 2
|
||||
#copy.dst_x_in_bytes = x_offset*self.data.strides[1]
|
||||
#copy.dst_y = y_offset
|
||||
#copy.dst_z = z_offset
|
||||
|
||||
#Set pitch of destination
|
||||
#copy.dst_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
#width = max(self.nx, cpu_data.shape[2])
|
||||
#height = max(self.ny, cpu_data.shape[1])
|
||||
#depth = max(self.nz, cpu-data.shape[0])
|
||||
#copy.width_in_bytes = width*cpu_data.itemsize
|
||||
#copy.height = height
|
||||
#copy.depth = depth
|
||||
|
||||
#copy from host to device
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=cpu_data.shape)
|
||||
#hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
|
||||
copy = hip_check(hip.hipMemcpy(self.data,cpu_data,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
|
||||
|
||||
#Perform the copy
|
||||
copy(stream)
|
||||
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data.gpudata), self.nx, self.ny)
|
||||
|
||||
|
||||
def __del__(self, *args):
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data.gpudata), self.nx, self.ny)
|
||||
self.data.gpudata.free()
|
||||
self.data = None
|
||||
|
||||
"""
|
||||
Enables downloading data from GPU to Python
|
||||
"""
|
||||
def download(self, stream, asynch=False):
|
||||
#self.logger.debug("Downloading [%dx%d] buffer", self.nx, self.ny)
|
||||
#Allocate host memory
|
||||
#cpu_data = cuda.pagelocked_empty((self.ny, self.nx), np.float32)
|
||||
cpu_data = np.empty((self.nz, self.ny, self.nx), dtype=np.float32)
|
||||
#cpu_data = self.memorypool.allocate((self.nz, self.ny, self.nx), dtype=np.float32)
|
||||
|
||||
#Create copy object from device to host
|
||||
#copy = cuda.Memcpy2D()
|
||||
#copy.set_src_device(self.data.gpudata)
|
||||
#copy.set_dst_host(cpu_data)
|
||||
|
||||
#Set offsets and pitch of source
|
||||
#copy.src_x_in_bytes = self.x_halo*self.data.strides[1]
|
||||
#copy.src_y = self.y_halo
|
||||
#copy.src_z = self.z_halo
|
||||
#copy.src_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
#copy.width_in_bytes = self.nx*cpu_data.itemsize
|
||||
#copy.height = self.ny
|
||||
#copy.depth = self.nz
|
||||
|
||||
#copy from device to host
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
#hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
|
||||
copy = hip_check(hip.hipMemcpy(cpu_data,self.data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
|
||||
|
||||
copy(stream)
|
||||
if asynch==False:
|
||||
stream.synchronize()
|
||||
|
||||
return cpu_data
|
||||
|
||||
|
||||
"""
|
||||
A class representing an Arakawa A type (unstaggered, logically Cartesian) grid
|
||||
"""
|
||||
class ArakawaA2D:
|
||||
def __init__(self, stream, nx, ny, halo_x, halo_y, cpu_variables):
|
||||
"""
|
||||
Uploads initial data to the GPU device
|
||||
"""
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.gpu_variables = []
|
||||
for cpu_variable in cpu_variables:
|
||||
self.gpu_variables += [CudaArray2D(stream, nx, ny, halo_x, halo_y, cpu_variable)]
|
||||
|
||||
def __getitem__(self, key):
|
||||
assert type(key) == int, "Indexing is int based"
|
||||
if (key > len(self.gpu_variables) or key < 0):
|
||||
raise IndexError("Out of bounds")
|
||||
return self.gpu_variables[key]
|
||||
|
||||
def download(self, stream, variables=None):
|
||||
"""
|
||||
Enables downloading data from the GPU device to Python
|
||||
"""
|
||||
if variables is None:
|
||||
variables=range(len(self.gpu_variables))
|
||||
|
||||
cpu_variables = []
|
||||
for i in variables:
|
||||
assert i < len(self.gpu_variables), "Variable {:d} is out of range".format(i)
|
||||
cpu_variables += [self.gpu_variables[i].download(stream, asynch=True)]
|
||||
|
||||
#stream.synchronize()
|
||||
return cpu_variables
|
||||
|
||||
#hipblas
|
||||
def sum_hipblas(self, num_elements, data):
|
||||
num_bytes_r = np.dtype(np.float32).itemsize
|
||||
result_d = hip_check(hip.hipMalloc(num_bytes_r))
|
||||
result_h = np.zeros(1, dtype=np.float32)
|
||||
print("--bytes:", num_bytes_r)
|
||||
|
||||
# call hipblasSaxpy + initialization
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
#hip_check(hipblas.hipblasSaxpy(handle, num_elements, ctypes.addressof(alpha), x_d, 1, y_d, 1))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasSasum(handle, num_elements, data, 1, result_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in result_d) back to host (store in result_h)
|
||||
hip_check(hip.hipMemcpy(result_h,result_d,num_bytes_r,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipFree(data))
|
||||
return result_h
|
||||
|
||||
def check(self):
|
||||
"""
|
||||
Checks that data is still sane
|
||||
"""
|
||||
for i, gpu_variable in enumerate(self.gpu_variables):
|
||||
#compute sum with hipblas
|
||||
#var_sum = pycuda.gpuarray.sum(gpu_variable.data).get()
|
||||
var_sum = self.sum_hipblas(gpu_variable.ny,gpu_variable.data)
|
||||
|
||||
self.logger.debug("Data %d with size [%d x %d] has average %f", i, gpu_variable.nx, gpu_variable.ny, var_sum / (gpu_variable.nx * gpu_variable.ny))
|
||||
assert np.isnan(var_sum) == False, "Data contains NaN values!"
|
||||
|
@ -1,328 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements Cuda context handling
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import time
|
||||
import re
|
||||
import io
|
||||
import hashlib
|
||||
import logging
|
||||
import gc
|
||||
|
||||
#import pycuda.compiler as cuda_compiler
|
||||
#import pycuda.gpuarray
|
||||
#import pycuda.driver as cuda
|
||||
|
||||
from hip import hip,hiprtc
|
||||
from hip import rccl
|
||||
|
||||
from GPUSimulators import Autotuner, Common
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
"""
|
||||
Class which keeps track of the CUDA context and some helper functions
|
||||
"""
|
||||
class CudaContext(object):
|
||||
|
||||
def __init__(self, device=None, context_flags=None, use_cache=True, autotuning=True):
|
||||
"""
|
||||
Create a new CUDA context
|
||||
Set device to an id or pci_bus_id to select a specific GPU
|
||||
Set context_flags to cuda.ctx_flags.SCHED_BLOCKING_SYNC for a blocking context
|
||||
"""
|
||||
self.use_cache = use_cache
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.modules = {}
|
||||
|
||||
self.module_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
#Initialize cuda (must be first call to PyCUDA)
|
||||
##cuda.init(flags=0)
|
||||
|
||||
##self.logger.info("PyCUDA version %s", str(pycuda.VERSION_TEXT))
|
||||
|
||||
#Print some info about CUDA
|
||||
##self.logger.info("CUDA version %s", str(cuda.get_version()))
|
||||
#self.logger.info("Driver version %s", str(cuda.get_driver_version()))
|
||||
self.logger.info("Driver version %s", str(hip_check(hip.hipDriverGetVersion())))
|
||||
|
||||
if device is None:
|
||||
device = 0
|
||||
|
||||
self.cuda_device = hip.Device(device)
|
||||
#self.logger.info("Using device %d/%d '%s' (%s) GPU", device, cuda.Device.count(), self.cuda_device.name(), self.cuda_device.pci_bus_id())
|
||||
self.logger.info("Using device %d/%d '%s' (%s) GPU", device, hip_check(hip.hipGetDeviceCount()))
|
||||
#self.logger.debug(" => compute capability: %s", str(self.cuda_device.compute_capability()))
|
||||
self.logger.debug(" => compute capability: %s", str(self.hip.hipDeviceComputeCapability(device)))
|
||||
|
||||
# Create the CUDA context
|
||||
#In HIP there is no need to specify a scheduling policy (it is abstracted). Here the HIP runtime system manages the workload to fit a specifc target architecture
|
||||
#if context_flags is None:
|
||||
# context_flags=cuda.ctx_flags.SCHED_AUTO
|
||||
|
||||
#self.cuda_context = self.cuda_device.make_context(flags=context_flags)
|
||||
|
||||
#free, total = cuda.mem_get_info()
|
||||
total = hip_check(hip.hipDeviceTotalMem(device))
|
||||
#self.logger.debug(" => memory: %d / %d MB available", int(free/(1024*1024)), int(total/(1024*1024)))
|
||||
self.logger.debug(" => memory: %d / %d MB available", int(total/(1024*1024)))
|
||||
|
||||
#self.logger.info("Created context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
#Create cache dir for cubin files
|
||||
self.cache_path = os.path.join(self.module_path, "cuda_cache")
|
||||
if (self.use_cache):
|
||||
if not os.path.isdir(self.cache_path):
|
||||
os.mkdir(self.cache_path)
|
||||
self.logger.info("Using CUDA cache dir %s", self.cache_path)
|
||||
|
||||
self.autotuner = None
|
||||
if (autotuning):
|
||||
self.logger.info("Autotuning enabled. It may take several minutes to run the code the first time: have patience")
|
||||
self.autotuner = Autotuner.Autotuner()
|
||||
|
||||
|
||||
# def __del__(self, *args):
|
||||
# self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
# Loop over all contexts in stack, and remove "this"
|
||||
# other_contexts = []
|
||||
# while (cuda.Context.get_current() != None):
|
||||
# context = cuda.Context.get_current()
|
||||
# if (context.handle != self.cuda_context.handle):
|
||||
# self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
# other_contexts = [context] + other_contexts
|
||||
# cuda.Context.pop()
|
||||
# else:
|
||||
# self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
# cuda.Context.pop()
|
||||
|
||||
# Add all the contexts we popped that were not our own
|
||||
# for context in other_contexts:
|
||||
# self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context.handle), str(context.handle))
|
||||
# cuda.Context.push(context)
|
||||
|
||||
# self.logger.debug("<%s> Detaching", str(self.cuda_context.handle))
|
||||
# self.cuda_context.detach()
|
||||
|
||||
|
||||
# def __str__(self):
|
||||
# return "CudaContext id " + str(self.cuda_context.handle)
|
||||
|
||||
|
||||
def hash_kernel(kernel_filename, include_dirs):
|
||||
# Generate a kernel ID for our caches
|
||||
num_includes = 0
|
||||
max_includes = 100
|
||||
kernel_hasher = hashlib.md5()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Loop over file and includes, and check if something has changed
|
||||
files = [kernel_filename]
|
||||
while len(files):
|
||||
|
||||
if (num_includes > max_includes):
|
||||
raise("Maximum number of includes reached - circular include in {:}?".format(kernel_filename))
|
||||
|
||||
filename = files.pop()
|
||||
|
||||
#logger.debug("Hashing %s", filename)
|
||||
|
||||
modified = os.path.getmtime(filename)
|
||||
|
||||
# Open the file
|
||||
with io.open(filename, "r") as file:
|
||||
|
||||
# Search for #inclue <something> and also hash the file
|
||||
file_str = file.read()
|
||||
kernel_hasher.update(file_str.encode('utf-8'))
|
||||
kernel_hasher.update(str(modified).encode('utf-8'))
|
||||
|
||||
#Find all includes
|
||||
includes = re.findall('^\W*#include\W+(.+?)\W*$', file_str, re.M)
|
||||
|
||||
# Loop over everything that looks like an include
|
||||
for include_file in includes:
|
||||
|
||||
#Search through include directories for the file
|
||||
file_path = os.path.dirname(filename)
|
||||
for include_path in [file_path] + include_dirs:
|
||||
|
||||
# If we find it, add it to list of files to check
|
||||
temp_path = os.path.join(include_path, include_file)
|
||||
if (os.path.isfile(temp_path)):
|
||||
files = files + [temp_path]
|
||||
num_includes = num_includes + 1 #For circular includes...
|
||||
break
|
||||
|
||||
return kernel_hasher.hexdigest()
|
||||
|
||||
|
||||
"""
|
||||
Reads a text file and creates an OpenCL kernel from that
|
||||
"""
|
||||
def get_module(self, kernel_filename,
|
||||
include_dirs=[], \
|
||||
defines={}, \
|
||||
compile_args={'no_extern_c', True}, jit_compile_args={}):
|
||||
"""
|
||||
Helper function to print compilation output
|
||||
"""
|
||||
def cuda_compile_message_handler(compile_success_bool, info_str, error_str):
|
||||
self.logger.debug("Compilation returned %s", str(compile_success_bool))
|
||||
if info_str:
|
||||
self.logger.debug("Info: %s", info_str)
|
||||
if error_str:
|
||||
self.logger.debug("Error: %s", error_str)
|
||||
|
||||
kernel_filename = os.path.normpath(kernel_filename)
|
||||
kernel_path = os.path.abspath(os.path.join(self.module_path, kernel_filename))
|
||||
#self.logger.debug("Getting %s", kernel_filename)
|
||||
|
||||
# Create a hash of the kernel options
|
||||
options_hasher = hashlib.md5()
|
||||
options_hasher.update(str(defines).encode('utf-8') + str(compile_args).encode('utf-8'));
|
||||
options_hash = options_hasher.hexdigest()
|
||||
|
||||
# Create hash of kernel souce
|
||||
source_hash = CudaContext.hash_kernel( \
|
||||
kernel_path, \
|
||||
include_dirs=[self.module_path] + include_dirs)
|
||||
|
||||
# Create final hash
|
||||
root, ext = os.path.splitext(kernel_filename)
|
||||
kernel_hash = root \
|
||||
+ "_" + source_hash \
|
||||
+ "_" + options_hash \
|
||||
+ ext
|
||||
cached_kernel_filename = os.path.join(self.cache_path, kernel_hash)
|
||||
|
||||
# If we have the kernel in our hashmap, return it
|
||||
if (kernel_hash in self.modules.keys()):
|
||||
self.logger.debug("Found kernel %s cached in hashmap (%s)", kernel_filename, kernel_hash)
|
||||
return self.modules[kernel_hash]
|
||||
|
||||
# If we have it on disk, return it
|
||||
elif (self.use_cache and os.path.isfile(cached_kernel_filename)):
|
||||
self.logger.debug("Found kernel %s cached on disk (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
with io.open(cached_kernel_filename, "rb") as file:
|
||||
file_str = file.read()
|
||||
#No hip counterpart of module_from_buffer
|
||||
module = cuda.module_from_buffer(file_str, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
|
||||
self.modules[kernel_hash] = module
|
||||
return module
|
||||
|
||||
# Otherwise, compile it from source
|
||||
else:
|
||||
self.logger.debug("Compiling %s (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
#Create kernel string
|
||||
kernel_string = ""
|
||||
for key, value in defines.items():
|
||||
kernel_string += "#define {:s} {:s}\n".format(str(key), str(value))
|
||||
kernel_string += '#include "{:s}"'.format(os.path.join(self.module_path, kernel_filename))
|
||||
if (self.use_cache):
|
||||
cached_kernel_dir = os.path.dirname(cached_kernel_filename)
|
||||
if not os.path.isdir(cached_kernel_dir):
|
||||
os.mkdir(cached_kernel_dir)
|
||||
with io.open(cached_kernel_filename + ".txt", "w") as file:
|
||||
file.write(kernel_string)
|
||||
|
||||
|
||||
with Common.Timer("compiler") as timer:
|
||||
import warnings
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore", message="The CUDA compiler succeeded, but said the following:\nkernel.cu", category=UserWarning)
|
||||
|
||||
#cubin = cuda_compiler.compile(kernel_string, include_dirs=include_dirs, cache_dir=False, **compile_args)
|
||||
#module = cuda.module_from_buffer(cubin, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
|
||||
#HIP version of compilation: but "name_of_fct" needs to be defined. e.g.
|
||||
#source = b"""\
|
||||
#extern "C" __global__ void name_of_fct(float factor, int n, short unused1, int unused2, float unused3, float *x) {
|
||||
#int tid = threadIdx.x + blockIdx.x * blockDim.x;
|
||||
#if (tid < n) {
|
||||
#x[tid] *= factor;
|
||||
# }
|
||||
#}
|
||||
#"""
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_string, b"name_of_fct", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
#kernel = hip_check(hip.hipModuleGetFunction(module, b"name_of_fct"))
|
||||
|
||||
if (self.use_cache):
|
||||
with io.open(cached_kernel_filename, "wb") as file:
|
||||
file.write(cubin)
|
||||
|
||||
self.modules[kernel_hash] = module
|
||||
return module
|
||||
|
||||
"""
|
||||
Clears the kernel cache (useful for debugging & development)
|
||||
"""
|
||||
def clear_kernel_cache(self):
|
||||
self.logger.debug("Clearing cache")
|
||||
self.modules = {}
|
||||
gc.collect()
|
||||
|
||||
"""
|
||||
Synchronizes all streams etc
|
||||
"""
|
||||
def synchronize(self):
|
||||
self.cuda_context.synchronize()
|
@ -1,272 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements Cuda context handling
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import time
|
||||
import re
|
||||
import io
|
||||
import hashlib
|
||||
import logging
|
||||
import gc
|
||||
|
||||
import pycuda.compiler as cuda_compiler
|
||||
import pycuda.gpuarray
|
||||
import pycuda.driver as cuda
|
||||
|
||||
from GPUSimulators import Autotuner, Common
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class which keeps track of the CUDA context and some helper functions
|
||||
"""
|
||||
class CudaContext(object):
|
||||
|
||||
def __init__(self, device=None, context_flags=None, use_cache=True, autotuning=True):
|
||||
"""
|
||||
Create a new CUDA context
|
||||
Set device to an id or pci_bus_id to select a specific GPU
|
||||
Set context_flags to cuda.ctx_flags.SCHED_BLOCKING_SYNC for a blocking context
|
||||
"""
|
||||
self.use_cache = use_cache
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.modules = {}
|
||||
|
||||
self.module_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
#Initialize cuda (must be first call to PyCUDA)
|
||||
cuda.init(flags=0)
|
||||
|
||||
self.logger.info("PyCUDA version %s", str(pycuda.VERSION_TEXT))
|
||||
|
||||
#Print some info about CUDA
|
||||
self.logger.info("CUDA version %s", str(cuda.get_version()))
|
||||
self.logger.info("Driver version %s", str(cuda.get_driver_version()))
|
||||
|
||||
if device is None:
|
||||
device = 0
|
||||
|
||||
self.cuda_device = cuda.Device(device)
|
||||
self.logger.info("Using device %d/%d '%s' (%s) GPU", device, cuda.Device.count(), self.cuda_device.name(), self.cuda_device.pci_bus_id())
|
||||
self.logger.debug(" => compute capability: %s", str(self.cuda_device.compute_capability()))
|
||||
|
||||
# Create the CUDA context
|
||||
if context_flags is None:
|
||||
context_flags=cuda.ctx_flags.SCHED_AUTO
|
||||
|
||||
self.cuda_context = self.cuda_device.make_context(flags=context_flags)
|
||||
|
||||
free, total = cuda.mem_get_info()
|
||||
self.logger.debug(" => memory: %d / %d MB available", int(free/(1024*1024)), int(total/(1024*1024)))
|
||||
|
||||
self.logger.info("Created context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
#Create cache dir for cubin files
|
||||
self.cache_path = os.path.join(self.module_path, "cuda_cache")
|
||||
if (self.use_cache):
|
||||
if not os.path.isdir(self.cache_path):
|
||||
os.mkdir(self.cache_path)
|
||||
self.logger.info("Using CUDA cache dir %s", self.cache_path)
|
||||
|
||||
self.autotuner = None
|
||||
if (autotuning):
|
||||
self.logger.info("Autotuning enabled. It may take several minutes to run the code the first time: have patience")
|
||||
self.autotuner = Autotuner.Autotuner()
|
||||
|
||||
|
||||
def __del__(self, *args):
|
||||
self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
# Loop over all contexts in stack, and remove "this"
|
||||
other_contexts = []
|
||||
while (cuda.Context.get_current() != None):
|
||||
context = cuda.Context.get_current()
|
||||
if (context.handle != self.cuda_context.handle):
|
||||
self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
other_contexts = [context] + other_contexts
|
||||
cuda.Context.pop()
|
||||
else:
|
||||
self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
cuda.Context.pop()
|
||||
|
||||
# Add all the contexts we popped that were not our own
|
||||
for context in other_contexts:
|
||||
self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context.handle), str(context.handle))
|
||||
cuda.Context.push(context)
|
||||
|
||||
self.logger.debug("<%s> Detaching", str(self.cuda_context.handle))
|
||||
self.cuda_context.detach()
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return "CudaContext id " + str(self.cuda_context.handle)
|
||||
|
||||
|
||||
def hash_kernel(kernel_filename, include_dirs):
|
||||
# Generate a kernel ID for our caches
|
||||
num_includes = 0
|
||||
max_includes = 100
|
||||
kernel_hasher = hashlib.md5()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Loop over file and includes, and check if something has changed
|
||||
files = [kernel_filename]
|
||||
while len(files):
|
||||
|
||||
if (num_includes > max_includes):
|
||||
raise("Maximum number of includes reached - circular include in {:}?".format(kernel_filename))
|
||||
|
||||
filename = files.pop()
|
||||
|
||||
#logger.debug("Hashing %s", filename)
|
||||
|
||||
modified = os.path.getmtime(filename)
|
||||
|
||||
# Open the file
|
||||
with io.open(filename, "r") as file:
|
||||
|
||||
# Search for #inclue <something> and also hash the file
|
||||
file_str = file.read()
|
||||
kernel_hasher.update(file_str.encode('utf-8'))
|
||||
kernel_hasher.update(str(modified).encode('utf-8'))
|
||||
|
||||
#Find all includes
|
||||
includes = re.findall('^\W*#include\W+(.+?)\W*$', file_str, re.M)
|
||||
|
||||
# Loop over everything that looks like an include
|
||||
for include_file in includes:
|
||||
|
||||
#Search through include directories for the file
|
||||
file_path = os.path.dirname(filename)
|
||||
for include_path in [file_path] + include_dirs:
|
||||
|
||||
# If we find it, add it to list of files to check
|
||||
temp_path = os.path.join(include_path, include_file)
|
||||
if (os.path.isfile(temp_path)):
|
||||
files = files + [temp_path]
|
||||
num_includes = num_includes + 1 #For circular includes...
|
||||
break
|
||||
|
||||
return kernel_hasher.hexdigest()
|
||||
|
||||
|
||||
"""
|
||||
Reads a text file and creates an OpenCL kernel from that
|
||||
"""
|
||||
def get_module(self, kernel_filename,
|
||||
include_dirs=[], \
|
||||
defines={}, \
|
||||
compile_args={'no_extern_c', True}, jit_compile_args={}):
|
||||
"""
|
||||
Helper function to print compilation output
|
||||
"""
|
||||
def cuda_compile_message_handler(compile_success_bool, info_str, error_str):
|
||||
self.logger.debug("Compilation returned %s", str(compile_success_bool))
|
||||
if info_str:
|
||||
self.logger.debug("Info: %s", info_str)
|
||||
if error_str:
|
||||
self.logger.debug("Error: %s", error_str)
|
||||
|
||||
kernel_filename = os.path.normpath(kernel_filename)
|
||||
kernel_path = os.path.abspath(os.path.join(self.module_path, kernel_filename))
|
||||
#self.logger.debug("Getting %s", kernel_filename)
|
||||
|
||||
# Create a hash of the kernel options
|
||||
options_hasher = hashlib.md5()
|
||||
options_hasher.update(str(defines).encode('utf-8') + str(compile_args).encode('utf-8'));
|
||||
options_hash = options_hasher.hexdigest()
|
||||
|
||||
# Create hash of kernel souce
|
||||
source_hash = CudaContext.hash_kernel( \
|
||||
kernel_path, \
|
||||
include_dirs=[self.module_path] + include_dirs)
|
||||
|
||||
# Create final hash
|
||||
root, ext = os.path.splitext(kernel_filename)
|
||||
kernel_hash = root \
|
||||
+ "_" + source_hash \
|
||||
+ "_" + options_hash \
|
||||
+ ext
|
||||
cached_kernel_filename = os.path.join(self.cache_path, kernel_hash)
|
||||
|
||||
# If we have the kernel in our hashmap, return it
|
||||
if (kernel_hash in self.modules.keys()):
|
||||
self.logger.debug("Found kernel %s cached in hashmap (%s)", kernel_filename, kernel_hash)
|
||||
return self.modules[kernel_hash]
|
||||
|
||||
# If we have it on disk, return it
|
||||
elif (self.use_cache and os.path.isfile(cached_kernel_filename)):
|
||||
self.logger.debug("Found kernel %s cached on disk (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
with io.open(cached_kernel_filename, "rb") as file:
|
||||
file_str = file.read()
|
||||
module = cuda.module_from_buffer(file_str, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
|
||||
self.modules[kernel_hash] = module
|
||||
return module
|
||||
|
||||
# Otherwise, compile it from source
|
||||
else:
|
||||
self.logger.debug("Compiling %s (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
#Create kernel string
|
||||
kernel_string = ""
|
||||
for key, value in defines.items():
|
||||
kernel_string += "#define {:s} {:s}\n".format(str(key), str(value))
|
||||
kernel_string += '#include "{:s}"'.format(os.path.join(self.module_path, kernel_filename))
|
||||
if (self.use_cache):
|
||||
cached_kernel_dir = os.path.dirname(cached_kernel_filename)
|
||||
if not os.path.isdir(cached_kernel_dir):
|
||||
os.mkdir(cached_kernel_dir)
|
||||
with io.open(cached_kernel_filename + ".txt", "w") as file:
|
||||
file.write(kernel_string)
|
||||
|
||||
|
||||
with Common.Timer("compiler") as timer:
|
||||
import warnings
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore", message="The CUDA compiler succeeded, but said the following:\nkernel.cu", category=UserWarning)
|
||||
cubin = cuda_compiler.compile(kernel_string, include_dirs=include_dirs, cache_dir=False, **compile_args)
|
||||
module = cuda.module_from_buffer(cubin, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
if (self.use_cache):
|
||||
with io.open(cached_kernel_filename, "wb") as file:
|
||||
file.write(cubin)
|
||||
|
||||
self.modules[kernel_hash] = module
|
||||
return module
|
||||
|
||||
"""
|
||||
Clears the kernel cache (useful for debugging & development)
|
||||
"""
|
||||
def clear_kernel_cache(self):
|
||||
self.logger.debug("Clearing cache")
|
||||
self.modules = {}
|
||||
gc.collect()
|
||||
|
||||
"""
|
||||
Synchronizes all streams etc
|
||||
"""
|
||||
def synchronize(self):
|
||||
self.cuda_context.synchronize()
|
@ -1,575 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the 2nd order HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Forward-Backward linear scheme
|
||||
"""
|
||||
class EE2D_KP07_dimsplit (BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
rho: Density
|
||||
rho_u: Momentum along x-axis
|
||||
rho_v: Momentum along y-axis
|
||||
E: energy
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis
|
||||
dy: Grid cell spacing along y-axis
|
||||
dt: Size of each timestep
|
||||
g: Gravitational constant
|
||||
gamma: Gas constant
|
||||
p: pressure
|
||||
"""
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
rho, rho_u, rho_v, E,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
gamma,
|
||||
theta=1.3,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=8):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
self.gamma = np.float32(gamma)
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
#Get kernels
|
||||
#module = context.get_module("cuda/EE2D_KP07_dimsplit.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
#self.kernel = module.get_function("KP07DimsplitKernel")
|
||||
#self.kernel.prepare("iiffffffiiPiPiPiPiPiPiPiPiPiiii")
|
||||
#
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'EE2D_KP07_dimsplit.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07DimsplitKernel", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"KP07DimsplitKernel"))
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[rho, rho_u, rho_v, E])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
# init device array cfl_data
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(rho_u/rho) + np.sqrt(gamma*rho)))
|
||||
dt_y = np.min(self.dy / (np.abs(rho_v/rho) + np.sqrt(gamma*rho)))
|
||||
self.dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
|
||||
|
||||
def substep(self, dt, step_number, external=True, internal=True):
|
||||
self.substepDimsplit(0.5*dt, step_number, external, internal)
|
||||
|
||||
def substepDimsplit(self, dt, substep, external, internal):
|
||||
if external and internal:
|
||||
#print("COMPLETE DOMAIN (dt=" + str(dt) + ")")
|
||||
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# 0, 0,
|
||||
# self.nx, self.ny)
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.cfl_data,
|
||||
0, 0,
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--External & Internal: Launching Kernel is ok")
|
||||
|
||||
return
|
||||
|
||||
if external and not internal:
|
||||
###################################
|
||||
# XXX: Corners are treated twice! #
|
||||
###################################
|
||||
|
||||
ns_grid_size = (self.grid_size[0], 1)
|
||||
|
||||
# NORTH
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (0, ny-y_halo) x (nx, ny)
|
||||
# self.kernel.prepared_async_call(ns_grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# 0, self.ny - int(self.u0[0].y_halo),
|
||||
# self.nx, self.ny)
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*ns_grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.cfl_data,
|
||||
0, ctypes.c_int(self.ny) - ctypes.c_int(self.u0[0].y_halo),
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# SOUTH
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (0, 0) x (nx, y_halo)
|
||||
# self.kernel.prepared_async_call(ns_grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# 0, 0,
|
||||
# self.nx, int(self.u0[0].y_halo))
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*ns_grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.cfl_data,
|
||||
0, 0,
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.u0[0].y_halo)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
we_grid_size = (1, self.grid_size[1])
|
||||
|
||||
# WEST
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (0, 0) x (x_halo, ny)
|
||||
# self.kernel.prepared_async_call(we_grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# 0, 0,
|
||||
# int(self.u0[0].x_halo), self.ny)
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*we_grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.cfl_data,
|
||||
0, 0,
|
||||
ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.ny)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
# EAST
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (nx-x_halo, 0) x (nx, ny)
|
||||
# self.kernel.prepared_async_call(we_grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# self.nx - int(self.u0[0].x_halo), 0,
|
||||
# self.nx, self.ny)
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*we_grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.cfl_data,
|
||||
ctypes.c_int(self.nx) - ctypes.c_int(self.u0[0].x_halo), 0,
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--External and not Internal: Launching Kernel is ok")
|
||||
|
||||
return
|
||||
|
||||
if internal and not external:
|
||||
|
||||
# INTERNAL DOMAIN
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (x_halo, y_halo) x (nx - x_halo, ny - y_halo)
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.internal_stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.gamma,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
self.cfl_data.gpudata,
|
||||
int(self.u0[0].x_halo), int(self.u0[0].y_halo),
|
||||
self.nx - int(self.u0[0].x_halo), self.ny - int(self.u0[0].y_halo))
|
||||
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.internal_stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.cfl_data,
|
||||
ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.u0[0].y_halo),
|
||||
ctypes.c_int(self.nx) - ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.ny) - ctypes.c_int(self.u0[0].y_halo)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Internal and not External: Launching Kernel is ok")
|
||||
return
|
||||
|
||||
def swapBuffers(self):
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
return
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
return
|
||||
|
||||
# computing min with hipblas: the output is an index
|
||||
def min_hipblas(self, num_elements, cfl_data, stream):
|
||||
num_bytes = num_elements * np.dtype(np.float32).itemsize
|
||||
num_bytes_i = np.dtype(np.int32).itemsize
|
||||
indx_d = hip_check(hip.hipMalloc(num_bytes_i))
|
||||
indx_h = np.zeros(1, dtype=np.int32)
|
||||
x_temp = np.zeros(num_elements, dtype=np.float32)
|
||||
|
||||
#print("--size.data:", cfl_data.size)
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
|
||||
#hip_check(hipblas.hipblasGetStream(handle, stream))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in indx_d) back to the host (store in indx_h)
|
||||
hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
#hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
min_value = x_temp.flatten()[indx_h[0]-1]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipStreamDestroy(stream))
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt*0.5
|
@ -1,242 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the FORCE flux
|
||||
for the shallow water equations
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
import ctypes
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that solves the SW equations
|
||||
"""
|
||||
class FORCE (Simulator.BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
1,
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_FORCE.cu.hip",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("FORCEKernel")
|
||||
# self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_FORCE.cu'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"FORCEKernel", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel .FORCEKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"FORCEKernel"))
|
||||
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
# self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .FORCEKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
# computing min with hipblas: the output is an index
|
||||
def min_hipblas(self, num_elements, cfl_data, stream):
|
||||
num_bytes = num_elements * np.dtype(np.float32).itemsize
|
||||
num_bytes_i = np.dtype(np.int32).itemsize
|
||||
indx_d = hip_check(hip.hipMalloc(num_bytes_i))
|
||||
indx_h = np.zeros(1, dtype=np.int32)
|
||||
x_temp = np.zeros(num_elements, dtype=np.float32)
|
||||
|
||||
#print("--size.data:", cfl_data.size)
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
|
||||
#hip_check(hipblas.hipblasGetStream(handle, stream))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in indx_d) back to the host (store in indx_h)
|
||||
hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
#hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
min_value = x_temp.flatten()[indx_h[0]-1]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipStreamDestroy(stream))
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt
|
@ -1,235 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Harten-Lax -van Leer approximate Riemann solver
|
||||
"""
|
||||
class HLL (Simulator.BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
1,
|
||||
block_width, block_height);
|
||||
self.g = np.float32(g)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_HLL.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("HLLKernel")
|
||||
# self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_HLL.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"HLLKernel", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel .HLLKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"HLLKernel"))
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .HLLKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
# computing min with hipblas: the output is an index
|
||||
def min_hipblas(self, num_elements, cfl_data, stream):
|
||||
num_bytes = num_elements * np.dtype(np.float32).itemsize
|
||||
num_bytes_i = np.dtype(np.int32).itemsize
|
||||
indx_d = hip_check(hip.hipMalloc(num_bytes_i))
|
||||
indx_h = np.zeros(1, dtype=np.int32)
|
||||
x_temp = np.zeros(num_elements, dtype=np.float32)
|
||||
|
||||
#print("--size.data:", cfl_data.size)
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
|
||||
#hip_check(hipblas.hipblasGetStream(handle, stream))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in indx_d) back to the host (store in indx_h)
|
||||
hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
#hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
min_value = x_temp.flatten()[indx_h[0]-1]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipStreamDestroy(stream))
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt*0.5
|
@ -1,247 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the 2nd order HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Forward-Backward linear scheme
|
||||
"""
|
||||
class HLL2 (Simulator.BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
theta=1.8,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height);
|
||||
self.g = np.float32(g)
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_HLL2.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("HLL2Kernel")
|
||||
# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_HLL2.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"HLL2Kernel", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel .HLL2Kernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"HLL2Kernel"))
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .HLL2Kernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
# computing min with hipblas: the output is an index
|
||||
def min_hipblas(self, num_elements, cfl_data, stream):
|
||||
num_bytes = num_elements * np.dtype(np.float32).itemsize
|
||||
num_bytes_i = np.dtype(np.int32).itemsize
|
||||
indx_d = hip_check(hip.hipMalloc(num_bytes_i))
|
||||
indx_h = np.zeros(1, dtype=np.int32)
|
||||
x_temp = np.zeros(num_elements, dtype=np.float32)
|
||||
|
||||
#print("--size.data:", cfl_data.size)
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
|
||||
#hip_check(hipblas.hipblasGetStream(handle, stream))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in indx_d) back to the host (store in indx_h)
|
||||
hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
#hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
min_value = x_temp.flatten()[indx_h[0]-1]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipStreamDestroy(stream))
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt*0.5
|
||||
|
@ -1,193 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements helpers for IPython / Jupyter and CUDA
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import gc
|
||||
|
||||
from IPython.core import magic_arguments
|
||||
from IPython.core.magic import line_magic, Magics, magics_class
|
||||
import pycuda.driver as cuda
|
||||
|
||||
from GPUSimulators import Common, CudaContext
|
||||
|
||||
|
||||
@magics_class
|
||||
class MagicCudaContext(Magics):
|
||||
@line_magic
|
||||
@magic_arguments.magic_arguments()
|
||||
@magic_arguments.argument(
|
||||
'name', type=str, help='Name of context to create')
|
||||
@magic_arguments.argument(
|
||||
'--blocking', '-b', action="store_true", help='Enable blocking context')
|
||||
@magic_arguments.argument(
|
||||
'--no_cache', '-nc', action="store_true", help='Disable caching of kernels')
|
||||
@magic_arguments.argument(
|
||||
'--no_autotuning', '-na', action="store_true", help='Disable autotuning of kernels')
|
||||
def cuda_context_handler(self, line):
|
||||
args = magic_arguments.parse_argstring(self.cuda_context_handler, line)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
self.logger.info("Registering %s in user workspace", args.name)
|
||||
|
||||
context_flags = None
|
||||
if (args.blocking):
|
||||
context_flags = cuda.ctx_flags.SCHED_BLOCKING_SYNC
|
||||
|
||||
if args.name in self.shell.user_ns.keys():
|
||||
self.logger.debug("Context already registered! Ignoring")
|
||||
return
|
||||
else:
|
||||
self.logger.debug("Creating context")
|
||||
use_cache = False if args.no_cache else True
|
||||
use_autotuning = False if args.no_autotuning else True
|
||||
self.shell.user_ns[args.name] = CudaContext.CudaContext(context_flags=context_flags, use_cache=use_cache, autotuning=use_autotuning)
|
||||
|
||||
# this function will be called on exceptions in any cell
|
||||
def custom_exc(shell, etype, evalue, tb, tb_offset=None):
|
||||
self.logger.exception("Exception caught: Resetting to CUDA context %s", args.name)
|
||||
while (cuda.Context.get_current() != None):
|
||||
context = cuda.Context.get_current()
|
||||
self.logger.info("Popping <%s>", str(context.handle))
|
||||
cuda.Context.pop()
|
||||
|
||||
if args.name in self.shell.user_ns.keys():
|
||||
self.logger.info("Pushing <%s>", str(self.shell.user_ns[args.name].cuda_context.handle))
|
||||
self.shell.user_ns[args.name].cuda_context.push()
|
||||
else:
|
||||
self.logger.error("No CUDA context called %s found (something is wrong)", args.name)
|
||||
self.logger.error("CUDA will not work now")
|
||||
|
||||
self.logger.debug("==================================================================")
|
||||
|
||||
# still show the error within the notebook, don't just swallow it
|
||||
shell.showtraceback((etype, evalue, tb), tb_offset=tb_offset)
|
||||
|
||||
# this registers a custom exception handler for the whole current notebook
|
||||
get_ipython().set_custom_exc((Exception,), custom_exc)
|
||||
|
||||
|
||||
# Handle CUDA context when exiting python
|
||||
import atexit
|
||||
def exitfunc():
|
||||
self.logger.info("Exitfunc: Resetting CUDA context stack")
|
||||
while (cuda.Context.get_current() != None):
|
||||
context = cuda.Context.get_current()
|
||||
self.logger.info("`-> Popping <%s>", str(context.handle))
|
||||
cuda.Context.pop()
|
||||
self.logger.debug("==================================================================")
|
||||
atexit.register(exitfunc)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@magics_class
|
||||
class MagicLogger(Magics):
|
||||
logger_initialized = False
|
||||
|
||||
@line_magic
|
||||
@magic_arguments.magic_arguments()
|
||||
@magic_arguments.argument(
|
||||
'name', type=str, help='Name of context to create')
|
||||
@magic_arguments.argument(
|
||||
'--out', '-o', type=str, default='output.log', help='The filename to store the log to')
|
||||
@magic_arguments.argument(
|
||||
'--level', '-l', type=int, default=20, help='The level of logging to screen [0, 50]')
|
||||
@magic_arguments.argument(
|
||||
'--file_level', '-f', type=int, default=10, help='The level of logging to file [0, 50]')
|
||||
def setup_logging(self, line):
|
||||
if (self.logger_initialized):
|
||||
logging.getLogger('GPUSimulators').info("Global logger already initialized!")
|
||||
return;
|
||||
else:
|
||||
self.logger_initialized = True
|
||||
|
||||
args = magic_arguments.parse_argstring(self.setup_logging, line)
|
||||
import sys
|
||||
|
||||
#Get root logger
|
||||
logger = logging.getLogger('GPUSimulators')
|
||||
logger.setLevel(min(args.level, args.file_level))
|
||||
|
||||
#Add log to screen
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(args.level)
|
||||
logger.addHandler(ch)
|
||||
logger.log(args.level, "Console logger using level %s", logging.getLevelName(args.level))
|
||||
|
||||
#Get the outfilename (try to evaluate if Python expression...)
|
||||
try:
|
||||
outfile = eval(args.out, self.shell.user_global_ns, self.shell.user_ns)
|
||||
except:
|
||||
outfile = args.out
|
||||
|
||||
#Add log to file
|
||||
logger.log(args.level, "File logger using level %s to %s", logging.getLevelName(args.file_level), outfile)
|
||||
|
||||
fh = logging.FileHandler(outfile)
|
||||
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
|
||||
fh.setFormatter(formatter)
|
||||
fh.setLevel(args.file_level)
|
||||
logger.addHandler(fh)
|
||||
|
||||
logger.info("Python version %s", sys.version)
|
||||
self.shell.user_ns[args.name] = logger
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@magics_class
|
||||
class MagicMPI(Magics):
|
||||
|
||||
@line_magic
|
||||
@magic_arguments.magic_arguments()
|
||||
@magic_arguments.argument(
|
||||
'name', type=str, help='Name of context to create')
|
||||
@magic_arguments.argument(
|
||||
'--num_engines', '-n', type=int, default=4, help='Number of engines to start')
|
||||
def setup_mpi(self, line):
|
||||
args = magic_arguments.parse_argstring(self.setup_mpi, line)
|
||||
logger = logging.getLogger('GPUSimulators')
|
||||
if args.name in self.shell.user_ns.keys():
|
||||
logger.warning("MPI alreay set up, resetting")
|
||||
self.shell.user_ns[args.name].shutdown()
|
||||
self.shell.user_ns[args.name] = None
|
||||
gc.collect()
|
||||
self.shell.user_ns[args.name] = Common.IPEngine(args.num_engines)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Register
|
||||
ip = get_ipython()
|
||||
ip.register_magics(MagicCudaContext)
|
||||
ip.register_magics(MagicLogger)
|
||||
ip.register_magics(MagicMPI)
|
||||
|
@ -1,252 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Forward-Backward linear scheme
|
||||
"""
|
||||
class KP07 (Simulator.BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
theta=1.3,
|
||||
cfl_scale=0.9,
|
||||
order=2,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
order,
|
||||
block_width, block_height);
|
||||
self.g = np.float32(g)
|
||||
self.theta = np.float32(theta)
|
||||
self.order = np.int32(order)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_KP07.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("KP07Kernel")
|
||||
# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_KP07.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07Kernel", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel .KP07Kernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"KP07Kernel"))
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepRK(dt, step_number)
|
||||
|
||||
|
||||
def substepRK(self, dt, substep):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.theta,
|
||||
# Simulator.stepOrderToCodedInt(step=substep, order=self.order),
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.theta),
|
||||
Simulator.stepOrderToCodedInt(step=substep, order=self.order),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .KP07Kernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
# computing min with hipblas: the output is an index
|
||||
def min_hipblas(self, num_elements, cfl_data, stream):
|
||||
num_bytes = num_elements * np.dtype(np.float32).itemsize
|
||||
num_bytes_i = np.dtype(np.int32).itemsize
|
||||
indx_d = hip_check(hip.hipMalloc(num_bytes_i))
|
||||
indx_h = np.zeros(1, dtype=np.int32)
|
||||
x_temp = np.zeros(num_elements, dtype=np.float32)
|
||||
|
||||
#print("--size.data:", cfl_data.size)
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
|
||||
#hip_check(hipblas.hipblasGetStream(handle, stream))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in indx_d) back to the host (store in indx_h)
|
||||
hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
#hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
min_value = x_temp.flatten()[indx_h[0]-1]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipStreamDestroy(stream))
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
return max_dt*0.5**(self.order-1)
|
@ -1,251 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the dimentionally split KP07 scheme
|
||||
"""
|
||||
class KP07_dimsplit(Simulator.BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
theta=1.3,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height)
|
||||
self.gc_x = 2
|
||||
self.gc_y = 2
|
||||
self.g = np.float32(g)
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_KP07_dimsplit.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("KP07DimsplitKernel")
|
||||
# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_KP07_dimsplit.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07DimsplitKernel", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel .KP07DimsplitKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"KP07DimsplitKernel"))
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
self.gc_x, self.gc_y,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
self.gc_x, self.gc_y,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep)
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .KP07DimsplitKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
# computing min with hipblas: the output is an index
|
||||
def min_hipblas(self, num_elements, cfl_data, stream):
|
||||
num_bytes = num_elements * np.dtype(np.float32).itemsize
|
||||
num_bytes_i = np.dtype(np.int32).itemsize
|
||||
indx_d = hip_check(hip.hipMalloc(num_bytes_i))
|
||||
indx_h = np.zeros(1, dtype=np.int32)
|
||||
x_temp = np.zeros(num_elements, dtype=np.float32)
|
||||
|
||||
#print("--size.data:", cfl_data.size)
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
|
||||
#hip_check(hipblas.hipblasGetStream(handle, stream))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in indx_d) back to the host (store in indx_h)
|
||||
hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
#hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
min_value = x_temp.flatten()[indx_h[0]-1]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipStreamDestroy(stream))
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt*0.5
|
@ -1,238 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the classical Lax-Friedrichs numerical
|
||||
scheme for the shallow water equations
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Lax Friedrichs scheme
|
||||
"""
|
||||
class LxF (Simulator.BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
1,
|
||||
block_width, block_height);
|
||||
self.g = np.float32(g)
|
||||
|
||||
# Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_LxF.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("LxFKernel")
|
||||
# self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_LxF.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"LxFKernel", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel .LxFKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"LxFKernel"))
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .LxFKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
# computing min with hipblas: the output is an index
|
||||
def min_hipblas(self, num_elements, cfl_data, stream):
|
||||
num_bytes = num_elements * np.dtype(np.float32).itemsize
|
||||
num_bytes_i = np.dtype(np.int32).itemsize
|
||||
indx_d = hip_check(hip.hipMalloc(num_bytes_i))
|
||||
indx_h = np.zeros(1, dtype=np.int32)
|
||||
x_temp = np.zeros(num_elements, dtype=np.float32)
|
||||
|
||||
#print("--size.data:", cfl_data.size)
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
|
||||
#hip_check(hipblas.hipblasGetStream(handle, stream))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in indx_d) back to the host (store in indx_h)
|
||||
hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
#hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
min_value = x_temp.flatten()[indx_h[0]-1]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipStreamDestroy(stream))
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt*0.5
|
@ -1,535 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements MPI simulator class
|
||||
|
||||
Copyright (C) 2018 SINTEF Digital
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
import logging
|
||||
from GPUSimulators import Simulator
|
||||
import numpy as np
|
||||
from mpi4py import MPI
|
||||
import time
|
||||
|
||||
#import pycuda.driver as cuda
|
||||
#import nvtx
|
||||
from hip import hip, hiprtc
|
||||
|
||||
|
||||
class MPIGrid(object):
|
||||
"""
|
||||
Class which represents an MPI grid of nodes. Facilitates easy communication between
|
||||
neighboring nodes
|
||||
"""
|
||||
def __init__(self, comm, ndims=2):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
assert ndims == 2, "Unsupported number of dimensions. Must be two at the moment"
|
||||
assert comm.size >= 1, "Must have at least one node"
|
||||
|
||||
self.grid = MPIGrid.getGrid(comm.size, ndims)
|
||||
self.comm = comm
|
||||
|
||||
self.logger.debug("Created MPI grid: {:}. Rank {:d} has coordinate {:}".format(
|
||||
self.grid, self.comm.rank, self.getCoordinate()))
|
||||
|
||||
def getCoordinate(self, rank=None):
|
||||
if (rank is None):
|
||||
rank = self.comm.rank
|
||||
i = (rank % self.grid[0])
|
||||
j = (rank // self.grid[0])
|
||||
return i, j
|
||||
|
||||
def getRank(self, i, j):
|
||||
return j*self.grid[0] + i
|
||||
|
||||
def getEast(self):
|
||||
i, j = self.getCoordinate(self.comm.rank)
|
||||
i = (i+1) % self.grid[0]
|
||||
return self.getRank(i, j)
|
||||
|
||||
def getWest(self):
|
||||
i, j = self.getCoordinate(self.comm.rank)
|
||||
i = (i+self.grid[0]-1) % self.grid[0]
|
||||
return self.getRank(i, j)
|
||||
|
||||
def getNorth(self):
|
||||
i, j = self.getCoordinate(self.comm.rank)
|
||||
j = (j+1) % self.grid[1]
|
||||
return self.getRank(i, j)
|
||||
|
||||
def getSouth(self):
|
||||
i, j = self.getCoordinate(self.comm.rank)
|
||||
j = (j+self.grid[1]-1) % self.grid[1]
|
||||
return self.getRank(i, j)
|
||||
|
||||
def getGrid(num_nodes, num_dims):
|
||||
assert(isinstance(num_nodes, int))
|
||||
assert(isinstance(num_dims, int))
|
||||
|
||||
# Adapted from https://stackoverflow.com/questions/28057307/factoring-a-number-into-roughly-equal-factors
|
||||
# Original code by https://stackoverflow.com/users/3928385/ishamael
|
||||
# Factorizes a number into n roughly equal factors
|
||||
|
||||
#Dictionary to remember already computed permutations
|
||||
memo = {}
|
||||
def dp(n, left): # returns tuple (cost, [factors])
|
||||
"""
|
||||
Recursively searches through all factorizations
|
||||
"""
|
||||
|
||||
#Already tried: return existing result
|
||||
if (n, left) in memo:
|
||||
return memo[(n, left)]
|
||||
|
||||
#Spent all factors: return number itself
|
||||
if left == 1:
|
||||
return (n, [n])
|
||||
|
||||
#Find new factor
|
||||
i = 2
|
||||
best = n
|
||||
bestTuple = [n]
|
||||
while i * i < n:
|
||||
#If factor found
|
||||
if n % i == 0:
|
||||
#Factorize remainder
|
||||
rem = dp(n // i, left - 1)
|
||||
|
||||
#If new permutation better, save it
|
||||
if rem[0] + i < best:
|
||||
best = rem[0] + i
|
||||
bestTuple = [i] + rem[1]
|
||||
i += 1
|
||||
|
||||
#Store calculation
|
||||
memo[(n, left)] = (best, bestTuple)
|
||||
return memo[(n, left)]
|
||||
|
||||
|
||||
grid = dp(num_nodes, num_dims)[1]
|
||||
|
||||
if (len(grid) < num_dims):
|
||||
#Split problematic 4
|
||||
if (4 in grid):
|
||||
grid.remove(4)
|
||||
grid.append(2)
|
||||
grid.append(2)
|
||||
|
||||
#Pad with ones to guarantee num_dims
|
||||
grid = grid + [1]*(num_dims - len(grid))
|
||||
|
||||
#Sort in descending order
|
||||
grid = np.sort(grid)
|
||||
grid = grid[::-1]
|
||||
|
||||
# XXX: We only use vertical (north-south) partitioning for now
|
||||
grid[0] = 1
|
||||
grid[1] = num_nodes
|
||||
|
||||
return grid
|
||||
|
||||
|
||||
def gather(self, data, root=0):
|
||||
out_data = None
|
||||
if (self.comm.rank == root):
|
||||
out_data = np.empty([self.comm.size] + list(data.shape), dtype=data.dtype)
|
||||
self.comm.Gather(data, out_data, root)
|
||||
return out_data
|
||||
|
||||
def getLocalRank(self):
|
||||
"""
|
||||
Returns the local rank on this node for this MPI process
|
||||
"""
|
||||
|
||||
# This function has been adapted from
|
||||
# https://github.com/SheffieldML/PyDeepGP/blob/master/deepgp/util/parallel.py
|
||||
# by Zhenwen Dai released under BSD 3-Clause "New" or "Revised" License:
|
||||
#
|
||||
# Copyright (c) 2016, Zhenwen Dai
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of DGP nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#Get this ranks unique (physical) node name
|
||||
node_name = MPI.Get_processor_name()
|
||||
|
||||
#Gather the list of all node names on all nodes
|
||||
node_names = self.comm.allgather(node_name)
|
||||
|
||||
#Loop over all node names up until our rank
|
||||
#and count how many duplicates of our nodename we find
|
||||
local_rank = len([0 for name in node_names[:self.comm.rank] if name==node_name])
|
||||
|
||||
return local_rank
|
||||
|
||||
|
||||
class MPISimulator(Simulator.BaseSimulator):
|
||||
"""
|
||||
Class which handles communication between simulators on different MPI nodes
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self, sim, grid):
|
||||
self.profiling_data_mpi = { 'start': {}, 'end': {} }
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange"] = 0
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange"] = 0
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_download"] = 0
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_download"] = 0
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_upload"] = 0
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_upload"] = 0
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] = 0
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] = 0
|
||||
self.profiling_data_mpi["start"]["t_mpi_step"] = 0
|
||||
self.profiling_data_mpi["end"]["t_mpi_step"] = 0
|
||||
self.profiling_data_mpi["n_time_steps"] = 0
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
autotuner = sim.context.autotuner
|
||||
sim.context.autotuner = None;
|
||||
boundary_conditions = sim.getBoundaryConditions()
|
||||
super().__init__(sim.context,
|
||||
sim.nx, sim.ny,
|
||||
sim.dx, sim.dy,
|
||||
boundary_conditions,
|
||||
sim.cfl_scale,
|
||||
sim.num_substeps,
|
||||
sim.block_size[0], sim.block_size[1])
|
||||
sim.context.autotuner = autotuner
|
||||
|
||||
self.sim = sim
|
||||
self.grid = grid
|
||||
|
||||
#Get neighbor node ids
|
||||
self.east = grid.getEast()
|
||||
self.west = grid.getWest()
|
||||
self.north = grid.getNorth()
|
||||
self.south = grid.getSouth()
|
||||
|
||||
#Get coordinate of this node
|
||||
#and handle global boundary conditions
|
||||
new_boundary_conditions = Simulator.BoundaryCondition({
|
||||
'north': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'south': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'east': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'west': Simulator.BoundaryCondition.Type.Dirichlet
|
||||
})
|
||||
gi, gj = grid.getCoordinate()
|
||||
#print("gi: " + str(gi) + ", gj: " + str(gj))
|
||||
if (gi == 0 and boundary_conditions.west != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.west = None
|
||||
new_boundary_conditions.west = boundary_conditions.west;
|
||||
if (gj == 0 and boundary_conditions.south != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.south = None
|
||||
new_boundary_conditions.south = boundary_conditions.south;
|
||||
if (gi == grid.grid[0]-1 and boundary_conditions.east != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.east = None
|
||||
new_boundary_conditions.east = boundary_conditions.east;
|
||||
if (gj == grid.grid[1]-1 and boundary_conditions.north != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.north = None
|
||||
new_boundary_conditions.north = boundary_conditions.north;
|
||||
sim.setBoundaryConditions(new_boundary_conditions)
|
||||
|
||||
#Get number of variables
|
||||
self.nvars = len(self.getOutput().gpu_variables)
|
||||
|
||||
#Shorthands for computing extents and sizes
|
||||
gc_x = int(self.sim.getOutput()[0].x_halo)
|
||||
gc_y = int(self.sim.getOutput()[0].y_halo)
|
||||
nx = int(self.sim.nx)
|
||||
ny = int(self.sim.ny)
|
||||
|
||||
#Set regions for ghost cells to read from
|
||||
#These have the format [x0, y0, width, height]
|
||||
self.read_e = np.array([ nx, 0, gc_x, ny + 2*gc_y])
|
||||
self.read_w = np.array([gc_x, 0, gc_x, ny + 2*gc_y])
|
||||
self.read_n = np.array([gc_x, ny, nx, gc_y])
|
||||
self.read_s = np.array([gc_x, gc_y, nx, gc_y])
|
||||
|
||||
#Set regions for ghost cells to write to
|
||||
self.write_e = self.read_e + np.array([gc_x, 0, 0, 0])
|
||||
self.write_w = self.read_w - np.array([gc_x, 0, 0, 0])
|
||||
self.write_n = self.read_n + np.array([0, gc_y, 0, 0])
|
||||
self.write_s = self.read_s - np.array([0, gc_y, 0, 0])
|
||||
|
||||
#Allocate data for receiving
|
||||
#Note that east and west also transfer ghost cells
|
||||
#whilst north/south only transfer internal cells
|
||||
#Reuses the width/height defined in the read-extets above
|
||||
##self.in_e = cuda.pagelocked_empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32) #np.empty((self.nvars, self.read_e[3], self.read_e[2]), dtype=np.float32)
|
||||
|
||||
##self.in_w = cuda.pagelocked_empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32) #np.empty((self.nvars, self.read_w[3], self.read_w[2]), dtype=np.float32)
|
||||
##self.in_n = cuda.pagelocked_empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32) #np.empty((self.nvars, self.read_n[3], self.read_n[2]), dtype=np.float32)
|
||||
##self.in_s = cuda.pagelocked_empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32) #np.empty((self.nvars, self.read_s[3], self.read_s[2]), dtype=np.float32)
|
||||
|
||||
self.in_e = np.empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32)
|
||||
num_bytes_e = self.in_e.size * self.in_e.itemsize
|
||||
#hipHostMalloc allocates pinned host memory which is mapped into the address space of all GPUs in the system, the memory can be accessed directly by the GPU device
|
||||
#hipHostMallocDefault:Memory is mapped and portable (default allocation)
|
||||
#hipHostMallocPortable: memory is explicitely portable across different devices
|
||||
self.in_e = hip_check(hip.hipHostMalloc(num_bytes_e,hip.hipHostMallocPortable))
|
||||
|
||||
self.in_w = np.empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32)
|
||||
num_bytes_w = self.in_w.size * self.in_w.itemsize
|
||||
self.in_w = hip_check(hip.hipHostMalloc(num_bytes_w,hip.hipHostMallocPortable))
|
||||
|
||||
self.in_n = np.empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32)
|
||||
num_bytes_n = self.in_n.size * self.in_n.itemsize
|
||||
self.in_n = hip_check(hip.hipHostMalloc(num_bytes_n,hip.hipHostMallocPortable))
|
||||
|
||||
self.in_s = np.empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32)
|
||||
num_bytes_s = self.in_s.size * self.in_s.itemsize
|
||||
self.in_s = hip_check(hip.hipHostMalloc(num_bytes_s,hip.hipHostMallocPortable))
|
||||
|
||||
#Allocate data for sending
|
||||
#self.out_e = cuda.pagelocked_empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32) #np.empty_like(self.in_e)
|
||||
#self.out_w = cuda.pagelocked_empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32) #np.empty_like(self.in_w)
|
||||
#self.out_n = cuda.pagelocked_empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32) #np.empty_like(self.in_n)
|
||||
#self.out_s = cuda.pagelocked_empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32) #np.empty_like(self.in_s)
|
||||
|
||||
self.out_e = np.empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32)
|
||||
num_bytes_e = self.out_e.size * self.out_e.itemsize
|
||||
self.out_e = hip_check(hip.hipHostMalloc(num_bytes_e,hip.hipHostMallocPortable))
|
||||
|
||||
self.out_w = np.empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32)
|
||||
num_bytes_w = self.out_w.size * self.out_w.itemsize
|
||||
self.out_w = hip_check(hip.hipHostMalloc(num_bytes_w,hip.hipHostMallocPortable))
|
||||
|
||||
self.out_n = np.empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32)
|
||||
num_bytes_n = self.out_n.size * self.out_n.itemsize
|
||||
self.out_n = hip_check(hip.hipHostMalloc(num_bytes_n,hip.hipHostMallocPortable))
|
||||
|
||||
self.out_s = np.empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32)
|
||||
num_bytes_s = self.out_s.size * self.out_s.itemsize
|
||||
self.out_s = hip_check(hip.hipHostMalloc(num_bytes_s,hip.hipHostMallocPortable))
|
||||
|
||||
|
||||
self.logger.debug("Simlator rank {:d} initialized on {:s}".format(self.grid.comm.rank, MPI.Get_processor_name()))
|
||||
|
||||
self.full_exchange()
|
||||
sim.context.synchronize()
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
|
||||
#nvtx.mark("substep start", color="yellow")
|
||||
|
||||
self.profiling_data_mpi["start"]["t_mpi_step"] += time.time()
|
||||
|
||||
#nvtx.mark("substep external", color="blue")
|
||||
self.sim.substep(dt, step_number, external=True, internal=False) # only "internal ghost cells"
|
||||
|
||||
#nvtx.mark("substep internal", color="red")
|
||||
self.sim.substep(dt, step_number, internal=True, external=False) # "internal ghost cells" excluded
|
||||
|
||||
#nvtx.mark("substep full", color="blue")
|
||||
#self.sim.substep(dt, step_number, external=True, internal=True)
|
||||
|
||||
self.sim.swapBuffers()
|
||||
|
||||
self.profiling_data_mpi["end"]["t_mpi_step"] += time.time()
|
||||
|
||||
#nvtx.mark("exchange", color="blue")
|
||||
self.full_exchange()
|
||||
|
||||
#nvtx.mark("sync start", color="blue")
|
||||
#self.sim.stream.synchronize()
|
||||
#self.sim.internal_stream.synchronize()
|
||||
hip_check(hip.hipStreamSynchronize(self.sim.stream))
|
||||
hip_check(hip.hipStreamSynchronize(self.sim.internal_stream))
|
||||
#nvtx.mark("sync end", color="blue")
|
||||
|
||||
self.profiling_data_mpi["n_time_steps"] += 1
|
||||
|
||||
def getOutput(self):
|
||||
return self.sim.getOutput()
|
||||
|
||||
def synchronize(self):
|
||||
self.sim.synchronize()
|
||||
|
||||
def check(self):
|
||||
return self.sim.check()
|
||||
|
||||
def computeDt(self):
|
||||
local_dt = np.array([np.float32(self.sim.computeDt())]);
|
||||
global_dt = np.empty(1, dtype=np.float32)
|
||||
self.grid.comm.Allreduce(local_dt, global_dt, op=MPI.MIN)
|
||||
self.logger.debug("Local dt: {:f}, global dt: {:f}".format(local_dt[0], global_dt[0]))
|
||||
return global_dt[0]
|
||||
|
||||
|
||||
def getExtent(self):
|
||||
"""
|
||||
Function which returns the extent of node with rank
|
||||
rank in the grid
|
||||
"""
|
||||
width = self.sim.nx*self.sim.dx
|
||||
height = self.sim.ny*self.sim.dy
|
||||
i, j = self.grid.getCoordinate()
|
||||
x0 = i * width
|
||||
y0 = j * height
|
||||
x1 = x0 + width
|
||||
y1 = y0 + height
|
||||
return [x0, x1, y0, y1]
|
||||
|
||||
def full_exchange(self):
|
||||
####
|
||||
# First transfer internal cells north-south
|
||||
####
|
||||
|
||||
#Download from the GPU
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_download"] += time.time()
|
||||
|
||||
if self.north is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_n[k,:,:], asynch=True, extent=self.read_n)
|
||||
if self.south is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_s[k,:,:], asynch=True, extent=self.read_s)
|
||||
#self.sim.stream.synchronize()
|
||||
hip_check(hip.hipStreamSynchronize(self.sim.stream))
|
||||
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_download"] += time.time()
|
||||
|
||||
#Send/receive to north/south neighbours
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] += time.time()
|
||||
|
||||
comm_send = []
|
||||
comm_recv = []
|
||||
if self.north is not None:
|
||||
comm_send += [self.grid.comm.Isend(self.out_n, dest=self.north, tag=4*self.nt + 0)]
|
||||
comm_recv += [self.grid.comm.Irecv(self.in_n, source=self.north, tag=4*self.nt + 1)]
|
||||
if self.south is not None:
|
||||
comm_send += [self.grid.comm.Isend(self.out_s, dest=self.south, tag=4*self.nt + 1)]
|
||||
comm_recv += [self.grid.comm.Irecv(self.in_s, source=self.south, tag=4*self.nt + 0)]
|
||||
|
||||
#Wait for incoming transfers to complete
|
||||
for comm in comm_recv:
|
||||
comm.wait()
|
||||
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] += time.time()
|
||||
|
||||
#Upload to the GPU
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_upload"] += time.time()
|
||||
|
||||
if self.north is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].upload(self.sim.stream, self.in_n[k,:,:], extent=self.write_n)
|
||||
if self.south is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].upload(self.sim.stream, self.in_s[k,:,:], extent=self.write_s)
|
||||
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_upload"] += time.time()
|
||||
|
||||
#Wait for sending to complete
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] += time.time()
|
||||
|
||||
for comm in comm_send:
|
||||
comm.wait()
|
||||
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] += time.time()
|
||||
|
||||
####
|
||||
# Then transfer east-west including ghost cells that have been filled in by north-south transfer above
|
||||
####
|
||||
|
||||
#Download from the GPU
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_download"] += time.time()
|
||||
|
||||
if self.east is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_e[k,:,:], asynch=True, extent=self.read_e)
|
||||
if self.west is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_w[k,:,:], asynch=True, extent=self.read_w)
|
||||
#self.sim.stream.synchronize()
|
||||
hip_check(hip.hipStreamSynchronize(self.sim.stream))
|
||||
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_download"] += time.time()
|
||||
|
||||
#Send/receive to east/west neighbours
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] += time.time()
|
||||
|
||||
comm_send = []
|
||||
comm_recv = []
|
||||
if self.east is not None:
|
||||
comm_send += [self.grid.comm.Isend(self.out_e, dest=self.east, tag=4*self.nt + 2)]
|
||||
comm_recv += [self.grid.comm.Irecv(self.in_e, source=self.east, tag=4*self.nt + 3)]
|
||||
if self.west is not None:
|
||||
comm_send += [self.grid.comm.Isend(self.out_w, dest=self.west, tag=4*self.nt + 3)]
|
||||
comm_recv += [self.grid.comm.Irecv(self.in_w, source=self.west, tag=4*self.nt + 2)]
|
||||
|
||||
#Wait for incoming transfers to complete
|
||||
for comm in comm_recv:
|
||||
comm.wait()
|
||||
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] += time.time()
|
||||
|
||||
#Upload to the GPU
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_upload"] += time.time()
|
||||
|
||||
if self.east is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].upload(self.sim.stream, self.in_e[k,:,:], extent=self.write_e)
|
||||
if self.west is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].upload(self.sim.stream, self.in_w[k,:,:], extent=self.write_w)
|
||||
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_upload"] += time.time()
|
||||
|
||||
#Wait for sending to complete
|
||||
self.profiling_data_mpi["start"]["t_mpi_halo_exchange_sendreceive"] += time.time()
|
||||
|
||||
for comm in comm_send:
|
||||
comm.wait()
|
||||
|
||||
self.profiling_data_mpi["end"]["t_mpi_halo_exchange_sendreceive"] += time.time()
|
@ -1,264 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements SHMEM simulator group class
|
||||
|
||||
Copyright (C) 2020 Norwegian Meteorological Institute
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
import logging
|
||||
from GPUSimulators import Simulator, CudaContext
|
||||
import numpy as np
|
||||
|
||||
#import pycuda.driver as cuda
|
||||
from hip import hip, hiprtc
|
||||
|
||||
import time
|
||||
|
||||
class SHMEMSimulator(Simulator.BaseSimulator):
|
||||
"""
|
||||
Class which handles communication and synchronization between simulators in different
|
||||
contexts (presumably on different GPUs)
|
||||
"""
|
||||
def __init__(self, sims, grid):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
assert(len(sims) > 1)
|
||||
|
||||
self.sims = sims
|
||||
|
||||
# XXX: This is not what was intended. Do we need extra wrapper class SHMEMSimulator?
|
||||
# See also getOutput() and check().
|
||||
#
|
||||
# SHMEMSimulatorGroup would then not have any superclass, but manage a collection of
|
||||
# SHMEMSimulators that have BaseSimulator as a superclass.
|
||||
#
|
||||
# This would also eliminate the need for all the array bookkeeping in this class.
|
||||
autotuner = sims[0].context.autotuner
|
||||
sims[0].context.autotuner = None
|
||||
boundary_conditions = sims[0].getBoundaryConditions()
|
||||
super().__init__(sims[0].context,
|
||||
sims[0].nx, sims[0].ny,
|
||||
sims[0].dx, sims[0].dy,
|
||||
boundary_conditions,
|
||||
sims[0].cfl_scale,
|
||||
sims[0].num_substeps,
|
||||
sims[0].block_size[0], sims[0].block_size[1])
|
||||
sims[0].context.autotuner = autotuner
|
||||
|
||||
self.sims = sims
|
||||
self.grid = grid
|
||||
|
||||
self.east = [None] * len(self.sims)
|
||||
self.west = [None] * len(self.sims)
|
||||
self.north = [None] * len(self.sims)
|
||||
self.south = [None] * len(self.sims)
|
||||
|
||||
self.nvars = [None] * len(self.sims)
|
||||
|
||||
self.read_e = [None] * len(self.sims)
|
||||
self.read_w = [None] * len(self.sims)
|
||||
self.read_n = [None] * len(self.sims)
|
||||
self.read_s = [None] * len(self.sims)
|
||||
|
||||
self.write_e = [None] * len(self.sims)
|
||||
self.write_w = [None] * len(self.sims)
|
||||
self.write_n = [None] * len(self.sims)
|
||||
self.write_s = [None] * len(self.sims)
|
||||
|
||||
self.e = [None] * len(self.sims)
|
||||
self.w = [None] * len(self.sims)
|
||||
self.n = [None] * len(self.sims)
|
||||
self.s = [None] * len(self.sims)
|
||||
|
||||
for i, sim in enumerate(self.sims):
|
||||
#Get neighbor subdomain ids
|
||||
self.east[i] = grid.getEast(i)
|
||||
self.west[i] = grid.getWest(i)
|
||||
self.north[i] = grid.getNorth(i)
|
||||
self.south[i] = grid.getSouth(i)
|
||||
|
||||
#Get coordinate of this subdomain
|
||||
#and handle global boundary conditions
|
||||
new_boundary_conditions = Simulator.BoundaryCondition({
|
||||
'north': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'south': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'east': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'west': Simulator.BoundaryCondition.Type.Dirichlet
|
||||
})
|
||||
gi, gj = grid.getCoordinate(i)
|
||||
if (gi == 0 and boundary_conditions.west != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.west = None
|
||||
new_boundary_conditions.west = boundary_conditions.west;
|
||||
if (gj == 0 and boundary_conditions.south != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.south = None
|
||||
new_boundary_conditions.south = boundary_conditions.south;
|
||||
if (gi == grid.grid[0]-1 and boundary_conditions.east != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.east = None
|
||||
new_boundary_conditions.east = boundary_conditions.east;
|
||||
if (gj == grid.grid[1]-1 and boundary_conditions.north != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.north = None
|
||||
new_boundary_conditions.north = boundary_conditions.north;
|
||||
sim.setBoundaryConditions(new_boundary_conditions)
|
||||
|
||||
#Get number of variables
|
||||
self.nvars[i] = len(sim.getOutput().gpu_variables)
|
||||
|
||||
#Shorthands for computing extents and sizes
|
||||
gc_x = int(sim.getOutput()[0].x_halo)
|
||||
gc_y = int(sim.getOutput()[0].y_halo)
|
||||
nx = int(sim.nx)
|
||||
ny = int(sim.ny)
|
||||
|
||||
#Set regions for ghost cells to read from
|
||||
#These have the format [x0, y0, width, height]
|
||||
self.read_e[i] = np.array([ nx, 0, gc_x, ny + 2*gc_y])
|
||||
self.read_w[i] = np.array([gc_x, 0, gc_x, ny + 2*gc_y])
|
||||
self.read_n[i] = np.array([gc_x, ny, nx, gc_y])
|
||||
self.read_s[i] = np.array([gc_x, gc_y, nx, gc_y])
|
||||
|
||||
#Set regions for ghost cells to write to
|
||||
self.write_e[i] = self.read_e[i] + np.array([gc_x, 0, 0, 0])
|
||||
self.write_w[i] = self.read_w[i] - np.array([gc_x, 0, 0, 0])
|
||||
self.write_n[i] = self.read_n[i] + np.array([0, gc_y, 0, 0])
|
||||
self.write_s[i] = self.read_s[i] - np.array([0, gc_y, 0, 0])
|
||||
|
||||
#Allocate host data
|
||||
#Note that east and west also transfer ghost cells
|
||||
#whilst north/south only transfer internal cells
|
||||
#Reuses the width/height defined in the read-extets above
|
||||
self.e[i] = np.empty((self.nvars[i], self.read_e[i][3], self.read_e[i][2]), dtype=np.float32)
|
||||
self.w[i] = np.empty((self.nvars[i], self.read_w[i][3], self.read_w[i][2]), dtype=np.float32)
|
||||
self.n[i] = np.empty((self.nvars[i], self.read_n[i][3], self.read_n[i][2]), dtype=np.float32)
|
||||
self.s[i] = np.empty((self.nvars[i], self.read_s[i][3], self.read_s[i][2]), dtype=np.float32)
|
||||
|
||||
self.logger.debug("Initialized {:d} subdomains".format(len(self.sims)))
|
||||
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.exchange()
|
||||
|
||||
for i, sim in enumerate(self.sims):
|
||||
sim.substep(dt, step_number)
|
||||
|
||||
def getOutput(self):
|
||||
# XXX: Does not return what we would expect.
|
||||
# Returns first subdomain, but we want the whole domain.
|
||||
return self.sims[0].getOutput()
|
||||
|
||||
def synchronize(self):
|
||||
for sim in self.sims:
|
||||
sim.synchronize()
|
||||
|
||||
def check(self):
|
||||
# XXX: Does not return what we would expect.
|
||||
# Checks only first subdomain, but we want to check the whole domain.
|
||||
return self.sims[0].check()
|
||||
|
||||
def computeDt(self):
|
||||
global_dt = float("inf")
|
||||
|
||||
for sim in self.sims:
|
||||
sim.context.synchronize()
|
||||
|
||||
for sim in self.sims:
|
||||
local_dt = sim.computeDt()
|
||||
if local_dt < global_dt:
|
||||
global_dt = local_dt
|
||||
self.logger.debug("Local dt: {:f}".format(local_dt))
|
||||
|
||||
self.logger.debug("Global dt: {:f}".format(global_dt))
|
||||
return global_dt
|
||||
|
||||
def getExtent(self, index=0):
|
||||
"""
|
||||
Function which returns the extent of the subdomain with index
|
||||
index in the grid
|
||||
"""
|
||||
width = self.sims[index].nx*self.sims[index].dx
|
||||
height = self.sims[index].ny*self.sims[index].dy
|
||||
i, j = self.grid.getCoordinate(index)
|
||||
x0 = i * width
|
||||
y0 = j * height
|
||||
x1 = x0 + width
|
||||
y1 = y0 + height
|
||||
return [x0, x1, y0, y1]
|
||||
|
||||
def exchange(self):
|
||||
####
|
||||
# First transfer internal cells north-south
|
||||
####
|
||||
for i in range(len(self.sims)):
|
||||
self.ns_download(i)
|
||||
|
||||
for i in range(len(self.sims)):
|
||||
self.ns_upload(i)
|
||||
|
||||
####
|
||||
# Then transfer east-west including ghost cells that have been filled in by north-south transfer above
|
||||
####
|
||||
for i in range(len(self.sims)):
|
||||
self.ew_download(i)
|
||||
|
||||
for i in range(len(self.sims)):
|
||||
self.ew_upload(i)
|
||||
|
||||
def ns_download(self, i):
|
||||
#Download from the GPU
|
||||
if self.north[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
# XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the north)
|
||||
self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.n[i][k,:,:], extent=self.read_n[i])
|
||||
if self.south[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
# XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the south)
|
||||
self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.s[i][k,:,:], extent=self.read_s[i])
|
||||
self.sims[i].stream.synchronize()
|
||||
|
||||
def ns_upload(self, i):
|
||||
#Upload to the GPU
|
||||
if self.north[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
self.sims[i].u0[k].upload(self.sims[i].stream, self.s[self.north[i]][k,:,:], extent=self.write_n[i])
|
||||
if self.south[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
self.sims[i].u0[k].upload(self.sims[i].stream, self.n[self.south[i]][k,:,:], extent=self.write_s[i])
|
||||
|
||||
def ew_download(self, i):
|
||||
#Download from the GPU
|
||||
if self.east[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
# XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the east)
|
||||
self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.e[i][k,:,:], extent=self.read_e[i])
|
||||
if self.west[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
# XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the west)
|
||||
self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.w[i][k,:,:], extent=self.read_w[i])
|
||||
self.sims[i].stream.synchronize()
|
||||
|
||||
def ew_upload(self, i):
|
||||
#Upload to the GPU
|
||||
if self.east[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
self.sims[i].u0[k].upload(self.sims[i].stream, self.w[self.east[i]][k,:,:], extent=self.write_e[i])
|
||||
#test_east = np.ones_like(self.e[self.east[i]][k,:,:])
|
||||
#self.sims[i].u0[k].upload(self.sims[i].stream, test_east, extent=self.write_e[i])
|
||||
if self.west[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
self.sims[i].u0[k].upload(self.sims[i].stream, self.e[self.west[i]][k,:,:], extent=self.write_w[i])
|
||||
#test_west = np.ones_like(self.e[self.west[i]][k,:,:])
|
||||
#self.sims[i].u0[k].upload(self.sims[i].stream, test_west, extent=self.write_w[i])
|
@ -1,413 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements SHMEM simulator group class
|
||||
|
||||
Copyright (C) 2020 Norwegian Meteorological Institute
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
import logging
|
||||
from GPUSimulators import Simulator, CudaContext
|
||||
import numpy as np
|
||||
|
||||
#import pycuda.driver as cuda
|
||||
from hip import hip, hiprtc
|
||||
|
||||
import time
|
||||
|
||||
class SHMEMGrid(object):
|
||||
"""
|
||||
Class which represents an SHMEM grid of GPUs. Facilitates easy communication between
|
||||
neighboring subdomains in the grid. Contains one CUDA context per subdomain.
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self, ngpus=None, ndims=2):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
#cuda.init(flags=0)
|
||||
self.logger.info("Initializing HIP")
|
||||
#num_cuda_devices = cuda.Device.count()
|
||||
num_cuda_devices = hip_check(hip.hipGetDeviceCount())
|
||||
|
||||
if ngpus is None:
|
||||
ngpus = num_cuda_devices
|
||||
|
||||
# XXX: disabled for testing on single-GPU system
|
||||
#assert ngpus <= num_cuda_devices, "Trying to allocate more GPUs than are available in the system."
|
||||
#assert ngpus >= 2, "Must have at least two GPUs available to run multi-GPU simulations."
|
||||
|
||||
assert ndims == 2, "Unsupported number of dimensions. Must be two at the moment"
|
||||
|
||||
self.ngpus = ngpus
|
||||
self.ndims = ndims
|
||||
|
||||
self.grid = SHMEMGrid.getGrid(self.ngpus, self.ndims)
|
||||
|
||||
self.logger.debug("Created {:}-dimensional SHMEM grid, using {:} GPUs".format(
|
||||
self.ndims, self.ngpus))
|
||||
|
||||
# XXX: Is this a natural place to store the contexts? Consider moving contexts out of this
|
||||
# class, into notebook / calling script (shmemTesting.py)
|
||||
self.cuda_contexts = []
|
||||
|
||||
for i in range(self.ngpus):
|
||||
# XXX: disabled for testing on single-GPU system
|
||||
#self.cuda_contexts.append(CudaContext.CudaContext(device=i, autotuning=False))
|
||||
self.cuda_contexts.append(CudaContext.CudaContext(device=0, autotuning=False))
|
||||
|
||||
def getCoordinate(self, index):
|
||||
i = (index % self.grid[0])
|
||||
j = (index // self.grid[0])
|
||||
return i, j
|
||||
|
||||
def getIndex(self, i, j):
|
||||
return j*self.grid[0] + i
|
||||
|
||||
def getEast(self, index):
|
||||
i, j = self.getCoordinate(index)
|
||||
i = (i+1) % self.grid[0]
|
||||
return self.getIndex(i, j)
|
||||
|
||||
def getWest(self, index):
|
||||
i, j = self.getCoordinate(index)
|
||||
i = (i+self.grid[0]-1) % self.grid[0]
|
||||
return self.getIndex(i, j)
|
||||
|
||||
def getNorth(self, index):
|
||||
i, j = self.getCoordinate(index)
|
||||
j = (j+1) % self.grid[1]
|
||||
return self.getIndex(i, j)
|
||||
|
||||
def getSouth(self, index):
|
||||
i, j = self.getCoordinate(index)
|
||||
j = (j+self.grid[1]-1) % self.grid[1]
|
||||
return self.getIndex(i, j)
|
||||
|
||||
def getGrid(num_gpus, num_dims):
|
||||
assert(isinstance(num_gpus, int))
|
||||
assert(isinstance(num_dims, int))
|
||||
|
||||
# Adapted from https://stackoverflow.com/questions/28057307/factoring-a-number-into-roughly-equal-factors
|
||||
# Original code by https://stackoverflow.com/users/3928385/ishamael
|
||||
# Factorizes a number into n roughly equal factors
|
||||
|
||||
#Dictionary to remember already computed permutations
|
||||
memo = {}
|
||||
def dp(n, left): # returns tuple (cost, [factors])
|
||||
"""
|
||||
Recursively searches through all factorizations
|
||||
"""
|
||||
|
||||
#Already tried: return existing result
|
||||
if (n, left) in memo:
|
||||
return memo[(n, left)]
|
||||
|
||||
#Spent all factors: return number itself
|
||||
if left == 1:
|
||||
return (n, [n])
|
||||
|
||||
#Find new factor
|
||||
i = 2
|
||||
best = n
|
||||
bestTuple = [n]
|
||||
while i * i < n:
|
||||
#If factor found
|
||||
if n % i == 0:
|
||||
#Factorize remainder
|
||||
rem = dp(n // i, left - 1)
|
||||
|
||||
#If new permutation better, save it
|
||||
if rem[0] + i < best:
|
||||
best = rem[0] + i
|
||||
bestTuple = [i] + rem[1]
|
||||
i += 1
|
||||
|
||||
#Store calculation
|
||||
memo[(n, left)] = (best, bestTuple)
|
||||
return memo[(n, left)]
|
||||
|
||||
|
||||
grid = dp(num_gpus, num_dims)[1]
|
||||
|
||||
if (len(grid) < num_dims):
|
||||
#Split problematic 4
|
||||
if (4 in grid):
|
||||
grid.remove(4)
|
||||
grid.append(2)
|
||||
grid.append(2)
|
||||
|
||||
#Pad with ones to guarantee num_dims
|
||||
grid = grid + [1]*(num_dims - len(grid))
|
||||
|
||||
#Sort in descending order
|
||||
grid = np.sort(grid)
|
||||
grid = grid[::-1]
|
||||
|
||||
return grid
|
||||
|
||||
class SHMEMSimulatorGroup(object):
|
||||
"""
|
||||
Class which handles communication and synchronization between simulators in different
|
||||
contexts (typically on different GPUs)
|
||||
"""
|
||||
def __init__(self, sims, grid):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
assert(len(sims) > 1)
|
||||
|
||||
self.sims = sims
|
||||
|
||||
# XXX: This is not what was intended. Do we need extra wrapper class SHMEMSimulator?
|
||||
# See also getOutput() and check().
|
||||
#
|
||||
# SHMEMSimulatorGroup would then not have any superclass, but manage a collection of
|
||||
# SHMEMSimulators that have BaseSimulator as a superclass.
|
||||
#
|
||||
# This would also eliminate the need for all the array bookkeeping in this class.
|
||||
#
|
||||
CONT HERE! Model shmemTesting after mpiTesting and divide existing functionality between SHMEMSimulatorGroup and SHMEMSimulator
|
||||
|
||||
autotuner = sims[0].context.autotuner
|
||||
sims[0].context.autotuner = None
|
||||
boundary_conditions = sims[0].getBoundaryConditions()
|
||||
super().__init__(sims[0].context,
|
||||
sims[0].nx, sims[0].ny,
|
||||
sims[0].dx, sims[0].dy,
|
||||
boundary_conditions,
|
||||
sims[0].cfl_scale,
|
||||
sims[0].num_substeps,
|
||||
sims[0].block_size[0], sims[0].block_size[1])
|
||||
sims[0].context.autotuner = autotuner
|
||||
|
||||
self.sims = sims
|
||||
self.grid = grid
|
||||
|
||||
self.east = [None] * len(self.sims)
|
||||
self.west = [None] * len(self.sims)
|
||||
self.north = [None] * len(self.sims)
|
||||
self.south = [None] * len(self.sims)
|
||||
|
||||
self.nvars = [None] * len(self.sims)
|
||||
|
||||
self.read_e = [None] * len(self.sims)
|
||||
self.read_w = [None] * len(self.sims)
|
||||
self.read_n = [None] * len(self.sims)
|
||||
self.read_s = [None] * len(self.sims)
|
||||
|
||||
self.write_e = [None] * len(self.sims)
|
||||
self.write_w = [None] * len(self.sims)
|
||||
self.write_n = [None] * len(self.sims)
|
||||
self.write_s = [None] * len(self.sims)
|
||||
|
||||
self.e = [None] * len(self.sims)
|
||||
self.w = [None] * len(self.sims)
|
||||
self.n = [None] * len(self.sims)
|
||||
self.s = [None] * len(self.sims)
|
||||
|
||||
for i, sim in enumerate(self.sims):
|
||||
#Get neighbor subdomain ids
|
||||
self.east[i] = grid.getEast(i)
|
||||
self.west[i] = grid.getWest(i)
|
||||
self.north[i] = grid.getNorth(i)
|
||||
self.south[i] = grid.getSouth(i)
|
||||
|
||||
#Get coordinate of this subdomain
|
||||
#and handle global boundary conditions
|
||||
new_boundary_conditions = Simulator.BoundaryCondition({
|
||||
'north': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'south': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'east': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'west': Simulator.BoundaryCondition.Type.Dirichlet
|
||||
})
|
||||
gi, gj = grid.getCoordinate(i)
|
||||
if (gi == 0 and boundary_conditions.west != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.west = None
|
||||
new_boundary_conditions.west = boundary_conditions.west;
|
||||
if (gj == 0 and boundary_conditions.south != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.south = None
|
||||
new_boundary_conditions.south = boundary_conditions.south;
|
||||
if (gi == grid.grid[0]-1 and boundary_conditions.east != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.east = None
|
||||
new_boundary_conditions.east = boundary_conditions.east;
|
||||
if (gj == grid.grid[1]-1 and boundary_conditions.north != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.north = None
|
||||
new_boundary_conditions.north = boundary_conditions.north;
|
||||
sim.setBoundaryConditions(new_boundary_conditions)
|
||||
|
||||
#Get number of variables
|
||||
self.nvars[i] = len(sim.getOutput().gpu_variables)
|
||||
|
||||
#Shorthands for computing extents and sizes
|
||||
gc_x = int(sim.getOutput()[0].x_halo)
|
||||
gc_y = int(sim.getOutput()[0].y_halo)
|
||||
nx = int(sim.nx)
|
||||
ny = int(sim.ny)
|
||||
|
||||
#Set regions for ghost cells to read from
|
||||
#These have the format [x0, y0, width, height]
|
||||
self.read_e[i] = np.array([ nx, 0, gc_x, ny + 2*gc_y])
|
||||
self.read_w[i] = np.array([gc_x, 0, gc_x, ny + 2*gc_y])
|
||||
self.read_n[i] = np.array([gc_x, ny, nx, gc_y])
|
||||
self.read_s[i] = np.array([gc_x, gc_y, nx, gc_y])
|
||||
|
||||
#Set regions for ghost cells to write to
|
||||
self.write_e[i] = self.read_e[i] + np.array([gc_x, 0, 0, 0])
|
||||
self.write_w[i] = self.read_w[i] - np.array([gc_x, 0, 0, 0])
|
||||
self.write_n[i] = self.read_n[i] + np.array([0, gc_y, 0, 0])
|
||||
self.write_s[i] = self.read_s[i] - np.array([0, gc_y, 0, 0])
|
||||
|
||||
#Allocate host data
|
||||
#Note that east and west also transfer ghost cells
|
||||
#whilst north/south only transfer internal cells
|
||||
#Reuses the width/height defined in the read-extets above
|
||||
self.e[i] = np.empty((self.nvars[i], self.read_e[i][3], self.read_e[i][2]), dtype=np.float32)
|
||||
self.w[i] = np.empty((self.nvars[i], self.read_w[i][3], self.read_w[i][2]), dtype=np.float32)
|
||||
self.n[i] = np.empty((self.nvars[i], self.read_n[i][3], self.read_n[i][2]), dtype=np.float32)
|
||||
self.s[i] = np.empty((self.nvars[i], self.read_s[i][3], self.read_s[i][2]), dtype=np.float32)
|
||||
|
||||
self.logger.debug("Initialized {:d} subdomains".format(len(self.sims)))
|
||||
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.exchange()
|
||||
|
||||
for i, sim in enumerate(self.sims):
|
||||
sim.substep(dt, step_number)
|
||||
|
||||
def getOutput(self):
|
||||
# XXX: Does not return what we would expect.
|
||||
# Returns first subdomain, but we want the whole domain.
|
||||
return self.sims[0].getOutput()
|
||||
|
||||
def synchronize(self):
|
||||
for sim in self.sims:
|
||||
sim.synchronize()
|
||||
|
||||
def check(self):
|
||||
# XXX: Does not return what we would expect.
|
||||
# Checks only first subdomain, but we want to check the whole domain.
|
||||
return self.sims[0].check()
|
||||
|
||||
def computeDt(self):
|
||||
global_dt = float("inf")
|
||||
|
||||
for sim in self.sims:
|
||||
sim.context.synchronize()
|
||||
|
||||
for sim in self.sims:
|
||||
local_dt = sim.computeDt()
|
||||
if local_dt < global_dt:
|
||||
global_dt = local_dt
|
||||
self.logger.debug("Local dt: {:f}".format(local_dt))
|
||||
|
||||
self.logger.debug("Global dt: {:f}".format(global_dt))
|
||||
return global_dt
|
||||
|
||||
def getExtent(self, index=0):
|
||||
"""
|
||||
Function which returns the extent of the subdomain with index
|
||||
index in the grid
|
||||
"""
|
||||
width = self.sims[index].nx*self.sims[index].dx
|
||||
height = self.sims[index].ny*self.sims[index].dy
|
||||
i, j = self.grid.getCoordinate(index)
|
||||
x0 = i * width
|
||||
y0 = j * height
|
||||
x1 = x0 + width
|
||||
y1 = y0 + height
|
||||
return [x0, x1, y0, y1]
|
||||
|
||||
def exchange(self):
|
||||
####
|
||||
# First transfer internal cells north-south
|
||||
####
|
||||
for i in range(len(self.sims)):
|
||||
self.ns_download(i)
|
||||
|
||||
for i in range(len(self.sims)):
|
||||
self.ns_upload(i)
|
||||
|
||||
####
|
||||
# Then transfer east-west including ghost cells that have been filled in by north-south transfer above
|
||||
####
|
||||
for i in range(len(self.sims)):
|
||||
self.ew_download(i)
|
||||
|
||||
for i in range(len(self.sims)):
|
||||
self.ew_upload(i)
|
||||
|
||||
def ns_download(self, i):
|
||||
#Download from the GPU
|
||||
if self.north[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
# XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the north)
|
||||
self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.n[i][k,:,:], extent=self.read_n[i])
|
||||
if self.south[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
# XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the south)
|
||||
self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.s[i][k,:,:], extent=self.read_s[i])
|
||||
#self.sims[i].stream.synchronize()
|
||||
hip_check(hip.hipStreamSynchronize(self.sims[i].stream))
|
||||
|
||||
|
||||
def ns_upload(self, i):
|
||||
#Upload to the GPU
|
||||
if self.north[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
self.sims[i].u0[k].upload(self.sims[i].stream, self.s[self.north[i]][k,:,:], extent=self.write_n[i])
|
||||
if self.south[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
self.sims[i].u0[k].upload(self.sims[i].stream, self.n[self.south[i]][k,:,:], extent=self.write_s[i])
|
||||
|
||||
def ew_download(self, i):
|
||||
#Download from the GPU
|
||||
if self.east[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
# XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the east)
|
||||
self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.e[i][k,:,:], extent=self.read_e[i])
|
||||
if self.west[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
# XXX: Unnecessary global sync (only need to sync with neighboring subdomain to the west)
|
||||
self.sims[i].u0[k].download(self.sims[i].stream, cpu_data=self.w[i][k,:,:], extent=self.read_w[i])
|
||||
#self.sims[i].stream.synchronize()
|
||||
hip_check(hip.hipStreamSynchronize(self.sims[i].stream))
|
||||
|
||||
def ew_upload(self, i):
|
||||
#Upload to the GPU
|
||||
if self.east[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
self.sims[i].u0[k].upload(self.sims[i].stream, self.w[self.east[i]][k,:,:], extent=self.write_e[i])
|
||||
#test_east = np.ones_like(self.e[self.east[i]][k,:,:])
|
||||
#self.sims[i].u0[k].upload(self.sims[i].stream, test_east, extent=self.write_e[i])
|
||||
if self.west[i] is not None:
|
||||
for k in range(self.nvars[i]):
|
||||
self.sims[i].u0[k].upload(self.sims[i].stream, self.e[self.west[i]][k,:,:], extent=self.write_w[i])
|
||||
#test_west = np.ones_like(self.e[self.west[i]][k,:,:])
|
||||
#self.sims[i].u0[k].upload(self.sims[i].stream, test_west, extent=self.write_w[i])
|
@ -1,286 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the classical Lax-Friedrichs numerical
|
||||
scheme for the shallow water equations
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
import numpy as np
|
||||
import logging
|
||||
from enum import IntEnum
|
||||
|
||||
#import pycuda.compiler as cuda_compiler
|
||||
#import pycuda.gpuarray
|
||||
#import pycuda.driver as cuda
|
||||
|
||||
from hip import hip, hiprtc
|
||||
|
||||
from GPUSimulators import Common
|
||||
|
||||
|
||||
class BoundaryCondition(object):
|
||||
"""
|
||||
Class for holding boundary conditions for global boundaries
|
||||
"""
|
||||
|
||||
|
||||
class Type(IntEnum):
|
||||
"""
|
||||
Enum that describes the different types of boundary conditions
|
||||
WARNING: MUST MATCH THAT OF common.h IN CUDA
|
||||
"""
|
||||
Dirichlet = 0,
|
||||
Neumann = 1,
|
||||
Periodic = 2,
|
||||
Reflective = 3
|
||||
|
||||
def __init__(self, types={
|
||||
'north': Type.Reflective,
|
||||
'south': Type.Reflective,
|
||||
'east': Type.Reflective,
|
||||
'west': Type.Reflective
|
||||
}):
|
||||
"""
|
||||
Constructor
|
||||
"""
|
||||
self.north = types['north']
|
||||
self.south = types['south']
|
||||
self.east = types['east']
|
||||
self.west = types['west']
|
||||
|
||||
if (self.north == BoundaryCondition.Type.Neumann \
|
||||
or self.south == BoundaryCondition.Type.Neumann \
|
||||
or self.east == BoundaryCondition.Type.Neumann \
|
||||
or self.west == BoundaryCondition.Type.Neumann):
|
||||
raise(NotImplementedError("Neumann boundary condition not supported"))
|
||||
|
||||
def __str__(self):
|
||||
return '[north={:s}, south={:s}, east={:s}, west={:s}]'.format(str(self.north), str(self.south), str(self.east), str(self.west))
|
||||
|
||||
|
||||
def asCodedInt(self):
|
||||
"""
|
||||
Helper function which packs four boundary conditions into one integer
|
||||
"""
|
||||
bc = 0
|
||||
bc = bc | (self.north & 0x0000000F) << 24
|
||||
bc = bc | (self.south & 0x0000000F) << 16
|
||||
bc = bc | (self.east & 0x0000000F) << 8
|
||||
bc = bc | (self.west & 0x0000000F) << 0
|
||||
|
||||
#for t in types:
|
||||
# print("{0:s}, {1:d}, {1:032b}, {1:08b}".format(t, types[t]))
|
||||
#print("bc: {0:032b}".format(bc))
|
||||
|
||||
return np.int32(bc)
|
||||
|
||||
def getTypes(bc):
|
||||
types = {}
|
||||
types['north'] = BoundaryCondition.Type((bc >> 24) & 0x0000000F)
|
||||
types['south'] = BoundaryCondition.Type((bc >> 16) & 0x0000000F)
|
||||
types['east'] = BoundaryCondition.Type((bc >> 8) & 0x0000000F)
|
||||
types['west'] = BoundaryCondition.Type((bc >> 0) & 0x0000000F)
|
||||
return types
|
||||
|
||||
|
||||
|
||||
class BaseSimulator(object):
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
num_substeps,
|
||||
block_width, block_height):
|
||||
"""
|
||||
Initialization routine
|
||||
context: GPU context to use
|
||||
kernel_wrapper: wrapper function of GPU kernel
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
cfl_scale: Courant number
|
||||
num_substeps: Number of substeps to perform for a full step
|
||||
"""
|
||||
#Get logger
|
||||
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
|
||||
|
||||
#Save input parameters
|
||||
#Notice that we need to specify them in the correct dataformat for the
|
||||
#GPU kernel
|
||||
self.context = context
|
||||
self.nx = np.int32(nx)
|
||||
self.ny = np.int32(ny)
|
||||
self.dx = np.float32(dx)
|
||||
self.dy = np.float32(dy)
|
||||
self.setBoundaryConditions(boundary_conditions)
|
||||
self.cfl_scale = cfl_scale
|
||||
self.num_substeps = num_substeps
|
||||
|
||||
#Handle autotuning block size
|
||||
if (self.context.autotuner):
|
||||
peak_configuration = self.context.autotuner.get_peak_performance(self.__class__)
|
||||
block_width = int(peak_configuration["block_width"])
|
||||
block_height = int(peak_configuration["block_height"])
|
||||
self.logger.debug("Used autotuning to get block size [%d x %d]", block_width, block_height)
|
||||
|
||||
#Compute kernel launch parameters
|
||||
self.block_size = (block_width, block_height, 1)
|
||||
self.grid_size = (
|
||||
int(np.ceil(self.nx / float(self.block_size[0]))),
|
||||
int(np.ceil(self.ny / float(self.block_size[1])))
|
||||
)
|
||||
|
||||
#Create a CUDA stream
|
||||
#self.stream = cuda.Stream()
|
||||
#self.internal_stream = cuda.Stream()
|
||||
self.stream = hip_check(hip.hipStreamCreate())
|
||||
self.internal_stream = hip_check(hip.hipStreamCreate())
|
||||
|
||||
#Keep track of simulation time and number of timesteps
|
||||
self.t = 0.0
|
||||
self.nt = 0
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return "{:s} [{:d}x{:d}]".format(self.__class__.__name__, self.nx, self.ny)
|
||||
|
||||
|
||||
def simulate(self, t, dt=None):
|
||||
"""
|
||||
Function which simulates t_end seconds using the step function
|
||||
Requires that the step() function is implemented in the subclasses
|
||||
"""
|
||||
|
||||
printer = Common.ProgressPrinter(t)
|
||||
|
||||
t_start = self.simTime()
|
||||
t_end = t_start + t
|
||||
|
||||
update_dt = True
|
||||
if (dt is not None):
|
||||
update_dt = False
|
||||
self.dt = dt
|
||||
|
||||
while(self.simTime() < t_end):
|
||||
# Update dt every 100 timesteps and cross your fingers it works
|
||||
# for the next 100
|
||||
if (update_dt and (self.simSteps() % 100 == 0)):
|
||||
self.dt = self.computeDt()*self.cfl_scale
|
||||
|
||||
# Compute timestep for "this" iteration (i.e., shorten last timestep)
|
||||
current_dt = np.float32(min(self.dt, t_end-self.simTime()))
|
||||
|
||||
# Stop if end reached (should not happen)
|
||||
if (current_dt <= 0.0):
|
||||
self.logger.warning("Timestep size {:d} is less than or equal to zero!".format(self.simSteps()))
|
||||
break
|
||||
|
||||
# Step forward in time
|
||||
self.step(current_dt)
|
||||
|
||||
#Print info
|
||||
print_string = printer.getPrintString(self.simTime() - t_start)
|
||||
if (print_string):
|
||||
self.logger.info("%s: %s", self, print_string)
|
||||
try:
|
||||
self.check()
|
||||
except AssertionError as e:
|
||||
e.args += ("Step={:d}, time={:f}".format(self.simSteps(), self.simTime()),)
|
||||
raise
|
||||
|
||||
|
||||
def step(self, dt):
|
||||
"""
|
||||
Function which performs one single timestep of size dt
|
||||
"""
|
||||
for i in range(self.num_substeps):
|
||||
self.substep(dt, i)
|
||||
|
||||
self.t += dt
|
||||
self.nt += 1
|
||||
|
||||
def download(self, variables=None):
|
||||
return self.getOutput().download(self.stream, variables)
|
||||
|
||||
def synchronize(self):
|
||||
#self.stream.synchronize()
|
||||
#Synchronize the stream to ensure operations in the stream is complete
|
||||
hip_check(hip.hipStreamSynchronize(self.stream))
|
||||
|
||||
def simTime(self):
|
||||
return self.t
|
||||
|
||||
def simSteps(self):
|
||||
return self.nt
|
||||
|
||||
def getExtent(self):
|
||||
return [0, 0, self.nx*self.dx, self.ny*self.dy]
|
||||
|
||||
def setBoundaryConditions(self, boundary_conditions):
|
||||
self.logger.debug("Boundary conditions set to {:s}".format(str(boundary_conditions)))
|
||||
self.boundary_conditions = boundary_conditions.asCodedInt()
|
||||
|
||||
def getBoundaryConditions(self):
|
||||
return BoundaryCondition(BoundaryCondition.getTypes(self.boundary_conditions))
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
"""
|
||||
Function which performs one single substep with stepsize dt
|
||||
"""
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def getOutput(self):
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def check(self):
|
||||
self.logger.warning("check() is not implemented - please implement")
|
||||
#raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def computeDt(self):
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
|
||||
|
||||
|
||||
def stepOrderToCodedInt(step, order):
|
||||
"""
|
||||
Helper function which packs the step and order into a single integer
|
||||
"""
|
||||
step_order = (step << 16) | (order & 0x0000ffff)
|
||||
#print("Step: {0:032b}".format(step))
|
||||
#print("Order: {0:032b}".format(order))
|
||||
#print("Mix: {0:032b}".format(step_order))
|
||||
return np.int32(step_order)
|
@ -1,241 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the Weighted average flux (WAF) described in
|
||||
E. Toro, Shock-Capturing methods for free-surface shallow flows, 2001
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Forward-Backward linear scheme
|
||||
"""
|
||||
class WAF (Simulator.BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height);
|
||||
self.g = np.float32(g)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_WAF.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("WAFKernel")
|
||||
# self.kernel.prepare("iiffffiiPiPiPiPiPiPiP")
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_WAF.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"WAFKernel", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel .WAFKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"WAFKernel"))
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .WAFKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
# computing min with hipblas: the output is an index
|
||||
def min_hipblas(self, num_elements, cfl_data, stream):
|
||||
num_bytes = num_elements * np.dtype(np.float32).itemsize
|
||||
num_bytes_i = np.dtype(np.int32).itemsize
|
||||
indx_d = hip_check(hip.hipMalloc(num_bytes_i))
|
||||
indx_h = np.zeros(1, dtype=np.int32)
|
||||
x_temp = np.zeros(num_elements, dtype=np.float32)
|
||||
|
||||
#print("--size.data:", cfl_data.size)
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
|
||||
#hip_check(hipblas.hipblasGetStream(handle, stream))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in indx_d) back to the host (store in indx_h)
|
||||
hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
#hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
min_value = x_temp.flatten()[indx_h[0]-1]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipStreamDestroy(stream))
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt*0.5
|
@ -1,5 +0,0 @@
|
||||
#!/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
# Nothing general to do
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
@ -1,250 +0,0 @@
|
||||
/*
|
||||
This kernel implements the Central Upwind flux function to
|
||||
solve the Euler equations
|
||||
|
||||
Copyright (C) 2018 SINTEF Digital
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "EulerCommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
__device__
|
||||
void computeFluxF(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float gamma_, const float dx_, const float dt_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
const float4 Q_rl = make_float4(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1],
|
||||
Q[3][j][i+1] - 0.5f*Qx[3][j][i+1]);
|
||||
const float4 Q_rr = make_float4(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1],
|
||||
Q[3][j][i+1] + 0.5f*Qx[3][j][i+1]);
|
||||
|
||||
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] - 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] - 0.5f*Qx[2][j][i],
|
||||
Q[3][j][i] - 0.5f*Qx[3][j][i]);
|
||||
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] + 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] + 0.5f*Qx[2][j][i],
|
||||
Q[3][j][i] + 0.5f*Qx[3][j][i]);
|
||||
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float4 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
|
||||
const float4 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
//const float4 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
|
||||
const float4 flux = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
|
||||
|
||||
//Write to shared memory
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
F[3][j][i] = flux.w;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__device__
|
||||
void computeFluxG(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float gamma_, const float dy_, const float dt_) {
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float4 Q_rl = make_float4(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i],
|
||||
Q[3][j+1][i] - 0.5f*Qy[3][j+1][i]);
|
||||
const float4 Q_rr = make_float4(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i],
|
||||
Q[3][j+1][i] + 0.5f*Qy[3][j+1][i]);
|
||||
|
||||
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] - 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] - 0.5f*Qy[1][j][i],
|
||||
Q[3][j][i] - 0.5f*Qy[3][j][i]);
|
||||
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] + 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] + 0.5f*Qy[1][j][i],
|
||||
Q[3][j][i] + 0.5f*Qy[3][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float4 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
|
||||
const float4 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float4 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
|
||||
//const float4 flux = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
|
||||
|
||||
//Write to shared memory
|
||||
//Note that we here swap hu and hv back to the original
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
G[3][j][i] = flux.w;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
|
||||
*/
|
||||
extern "C" {
|
||||
|
||||
__global__ void KP07DimsplitKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
float gamma_,
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* rho0_ptr_, int rho0_pitch_,
|
||||
float* rho_u0_ptr_, int rho_u0_pitch_,
|
||||
float* rho_v0_ptr_, int rho_v0_pitch_,
|
||||
float* E0_ptr_, int E0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* rho1_ptr_, int rho1_pitch_,
|
||||
float* rho_u1_ptr_, int rho_u1_pitch_,
|
||||
float* rho_v1_ptr_, int rho_v1_pitch_,
|
||||
float* E1_ptr_, int E1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_,
|
||||
|
||||
//Subarea of internal domain to compute
|
||||
int x0=0, int y0=0,
|
||||
int x1=0, int y1=0) {
|
||||
|
||||
if(x1 == 0)
|
||||
x1 = nx_;
|
||||
|
||||
if(y1 == 0)
|
||||
y1 = ny_;
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 4;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[4][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float Qx[4][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[4][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( rho0_ptr_, rho0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(rho_u0_ptr_, rho_u0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(rho_v0_ptr_, rho_v0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( E0_ptr_, E0_pitch_, Q[3], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
||||
|
||||
//Step 0 => evolve x first, then y
|
||||
if (step_ == 0) {
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Gravity source term
|
||||
if (g_ > 0.0f) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const float rho_v = Q[2][j][i];
|
||||
Q[2][j][i] -= g_*Q[0][j][i]*dt_;
|
||||
Q[3][j][i] -= g_*rho_v*dt_;
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
else {
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Gravity source term
|
||||
if (g_ > 0.0f) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const float rho_v = Q[2][j][i];
|
||||
Q[2][j][i] -= g_*Q[0][j][i]*dt_;
|
||||
Q[3][j][i] -= g_*rho_v*dt_;
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( rho1_ptr_, rho1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
||||
writeBlock<w, h, gc_x, gc_y>(rho_u1_ptr_, rho_u1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
||||
writeBlock<w, h, gc_x, gc_y>(rho_v1_ptr_, rho_v1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
||||
writeBlock<w, h, gc_x, gc_y>( E1_ptr_, E1_pitch_, Q[3], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, gamma_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // extern "C"
|
@ -1,251 +0,0 @@
|
||||
#include "hip/hip_runtime.h"
|
||||
/*
|
||||
This kernel implements the Central Upwind flux function to
|
||||
solve the Euler equations
|
||||
|
||||
Copyright (C) 2018 SINTEF Digital
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "EulerCommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
__device__
|
||||
void computeFluxF(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float gamma_, const float dx_, const float dt_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
const float4 Q_rl = make_float4(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1],
|
||||
Q[3][j][i+1] - 0.5f*Qx[3][j][i+1]);
|
||||
const float4 Q_rr = make_float4(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1],
|
||||
Q[3][j][i+1] + 0.5f*Qx[3][j][i+1]);
|
||||
|
||||
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] - 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] - 0.5f*Qx[2][j][i],
|
||||
Q[3][j][i] - 0.5f*Qx[3][j][i]);
|
||||
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] + 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] + 0.5f*Qx[2][j][i],
|
||||
Q[3][j][i] + 0.5f*Qx[3][j][i]);
|
||||
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float4 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
|
||||
const float4 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
//const float4 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
|
||||
const float4 flux = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
|
||||
|
||||
//Write to shared memory
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
F[3][j][i] = flux.w;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__device__
|
||||
void computeFluxG(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float gamma_, const float dy_, const float dt_) {
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float4 Q_rl = make_float4(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i],
|
||||
Q[3][j+1][i] - 0.5f*Qy[3][j+1][i]);
|
||||
const float4 Q_rr = make_float4(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i],
|
||||
Q[3][j+1][i] + 0.5f*Qy[3][j+1][i]);
|
||||
|
||||
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] - 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] - 0.5f*Qy[1][j][i],
|
||||
Q[3][j][i] - 0.5f*Qy[3][j][i]);
|
||||
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] + 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] + 0.5f*Qy[1][j][i],
|
||||
Q[3][j][i] + 0.5f*Qy[3][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float4 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
|
||||
const float4 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float4 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
|
||||
//const float4 flux = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
|
||||
|
||||
//Write to shared memory
|
||||
//Note that we here swap hu and hv back to the original
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
G[3][j][i] = flux.w;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
|
||||
*/
|
||||
extern "C" {
|
||||
|
||||
__global__ void KP07DimsplitKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
float gamma_,
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* rho0_ptr_, int rho0_pitch_,
|
||||
float* rho_u0_ptr_, int rho_u0_pitch_,
|
||||
float* rho_v0_ptr_, int rho_v0_pitch_,
|
||||
float* E0_ptr_, int E0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* rho1_ptr_, int rho1_pitch_,
|
||||
float* rho_u1_ptr_, int rho_u1_pitch_,
|
||||
float* rho_v1_ptr_, int rho_v1_pitch_,
|
||||
float* E1_ptr_, int E1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_,
|
||||
|
||||
//Subarea of internal domain to compute
|
||||
int x0=0, int y0=0,
|
||||
int x1=0, int y1=0) {
|
||||
|
||||
if(x1 == 0)
|
||||
x1 = nx_;
|
||||
|
||||
if(y1 == 0)
|
||||
y1 = ny_;
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 4;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[4][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float Qx[4][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[4][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( rho0_ptr_, rho0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(rho_u0_ptr_, rho_u0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(rho_v0_ptr_, rho_v0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( E0_ptr_, E0_pitch_, Q[3], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
||||
|
||||
//Step 0 => evolve x first, then y
|
||||
if (step_ == 0) {
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Gravity source term
|
||||
if (g_ > 0.0f) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const float rho_v = Q[2][j][i];
|
||||
Q[2][j][i] -= g_*Q[0][j][i]*dt_;
|
||||
Q[3][j][i] -= g_*rho_v*dt_;
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
else {
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Gravity source term
|
||||
if (g_ > 0.0f) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const float rho_v = Q[2][j][i];
|
||||
Q[2][j][i] -= g_*Q[0][j][i]*dt_;
|
||||
Q[3][j][i] -= g_*rho_v*dt_;
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( rho1_ptr_, rho1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
||||
writeBlock<w, h, gc_x, gc_y>(rho_u1_ptr_, rho_u1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
||||
writeBlock<w, h, gc_x, gc_y>(rho_v1_ptr_, rho_v1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
||||
writeBlock<w, h, gc_x, gc_y>( E1_ptr_, E1_pitch_, Q[3], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, gamma_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // extern "C"
|
@ -1,187 +0,0 @@
|
||||
/*
|
||||
These CUDA functions implement different types of numerical flux
|
||||
functions for the shallow water equations
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF Digital
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void writeCfl(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float shmem[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
const float dx_, const float dy_, const float gamma_,
|
||||
float* output_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x + gc_x;
|
||||
const int ty = threadIdx.y + gc_y;
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = blockDim.x*blockIdx.x + tx;
|
||||
const int tj = blockDim.y*blockIdx.y + ty;
|
||||
|
||||
//Only internal cells
|
||||
if (ti < nx_+gc_x && tj < ny_+gc_y) {
|
||||
const float rho = Q[0][ty][tx];
|
||||
const float u = Q[1][ty][tx] / rho;
|
||||
const float v = Q[2][ty][tx] / rho;
|
||||
|
||||
const float max_u = dx_ / (fabsf(u) + sqrtf(gamma_*rho));
|
||||
const float max_v = dy_ / (fabsf(v) + sqrtf(gamma_*rho));
|
||||
|
||||
shmem[ty][tx] = fminf(max_u, max_v);
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//One row of threads loop over all rows
|
||||
if (ti < nx_+gc_x && tj < ny_+gc_y) {
|
||||
if (ty == gc_y) {
|
||||
float min_val = shmem[ty][tx];
|
||||
const int max_y = min(h, ny_+gc_y - tj);
|
||||
for (int j=gc_y; j<max_y+gc_y; j++) {
|
||||
min_val = fminf(min_val, shmem[j][tx]);
|
||||
}
|
||||
shmem[ty][tx] = min_val;
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//One thread loops over first row to find global max
|
||||
if (tx == gc_x && ty == gc_y) {
|
||||
float min_val = shmem[ty][tx];
|
||||
const int max_x = min(w, nx_+gc_x - ti);
|
||||
for (int i=gc_x; i<max_x+gc_x; ++i) {
|
||||
min_val = fminf(min_val, shmem[ty][i]);
|
||||
}
|
||||
|
||||
const int idx = gridDim.x*blockIdx.y + blockIdx.x;
|
||||
output_[idx] = min_val;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
inline __device__ float pressure(float4 Q, float gamma) {
|
||||
const float rho = Q.x;
|
||||
const float rho_u = Q.y;
|
||||
const float rho_v = Q.z;
|
||||
const float E = Q.w;
|
||||
|
||||
return (gamma-1.0f)*(E-0.5f*(rho_u*rho_u + rho_v*rho_v)/rho);
|
||||
}
|
||||
|
||||
|
||||
__device__ float4 F_func(const float4 Q, float P) {
|
||||
const float rho = Q.x;
|
||||
const float rho_u = Q.y;
|
||||
const float rho_v = Q.z;
|
||||
const float E = Q.w;
|
||||
|
||||
const float u = rho_u/rho;
|
||||
|
||||
float4 F;
|
||||
|
||||
F.x = rho_u;
|
||||
F.y = rho_u*u + P;
|
||||
F.z = rho_v*u;
|
||||
F.w = u*(E+P);
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 180)
|
||||
*/
|
||||
__device__ float4 HLL_flux(const float4 Q_l, const float4 Q_r, const float gamma) {
|
||||
const float h_l = Q_l.x;
|
||||
const float h_r = Q_r.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l.y / h_l;
|
||||
const float u_r = Q_r.y / h_r;
|
||||
|
||||
// Calculate pressures
|
||||
const float P_l = pressure(Q_l, gamma);
|
||||
const float P_r = pressure(Q_r, gamma);
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(gamma*P_l/Q_l.x);
|
||||
const float c_r = sqrt(gamma*P_r/Q_r.x);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
|
||||
//Upwind selection
|
||||
if (S_l >= 0.0f) {
|
||||
return F_func(Q_l, P_l);
|
||||
}
|
||||
else if (S_r <= 0.0f) {
|
||||
return F_func(Q_r, P_r);
|
||||
}
|
||||
//Or estimate flux in the star region
|
||||
else {
|
||||
const float4 F_l = F_func(Q_l, P_l);
|
||||
const float4 F_r = F_func(Q_r, P_r);
|
||||
const float4 flux = (S_r*F_l - S_l*F_r + S_r*S_l*(Q_r - Q_l)) / (S_r-S_l);
|
||||
return flux;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Central upwind flux function
|
||||
*/
|
||||
__device__ float4 CentralUpwindFlux(const float4 Qm, const float4 Qp, const float gamma) {
|
||||
|
||||
const float Pp = pressure(Qp, gamma);
|
||||
const float4 Fp = F_func(Qp, Pp);
|
||||
const float up = Qp.y / Qp.x; // rho*u / rho
|
||||
const float cp = sqrt(gamma*Pp/Qp.x); // sqrt(gamma*P/rho)
|
||||
|
||||
const float Pm = pressure(Qm, gamma);
|
||||
const float4 Fm = F_func(Qm, Pm);
|
||||
const float um = Qm.y / Qm.x; // rho*u / rho
|
||||
const float cm = sqrt(gamma*Pm/Qm.x); // sqrt(gamma*P/rho)
|
||||
|
||||
const float am = min(min(um-cm, up-cp), 0.0f); // largest negative wave speed
|
||||
const float ap = max(max(um+cm, up+cp), 0.0f); // largest positive wave speed
|
||||
|
||||
return ((ap*Fm - am*Fp) + ap*am*(Qp-Qm))/(ap-am);
|
||||
}
|
@ -1,143 +0,0 @@
|
||||
/*
|
||||
This OpenCL kernel implements the classical Lax-Friedrichs scheme
|
||||
for the shallow water equations, with edge fluxes.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
//Compute fluxes along the x axis
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
const float3 Qp = make_float3(Q[0][j][i+1],
|
||||
Q[1][j][i+1],
|
||||
Q[2][j][i+1]);
|
||||
const float3 Qm = make_float3(Q[0][j][i],
|
||||
Q[1][j][i],
|
||||
Q[2][j][i]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = FORCE_1D_flux(Qm, Qp, g_, dx_, dt_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
//Compute fluxes along the y axis
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Qp = make_float3(Q[0][j+1][i],
|
||||
Q[2][j+1][i],
|
||||
Q[1][j+1][i]);
|
||||
const float3 Qm = make_float3(Q[0][j][i],
|
||||
Q[2][j][i],
|
||||
Q[1][j][i]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
const float3 flux = FORCE_1D_flux(Qm, Qp, g_, dy_, dt_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
extern "C" {
|
||||
__global__ void FORCEKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 1;
|
||||
const unsigned int gc_y = 1;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute flux along x, and evolve
|
||||
computeFluxF(Q, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute flux along y, and evolve
|
||||
computeFluxG(Q, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Write to main memory
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -1,144 +0,0 @@
|
||||
#include "hip/hip_runtime.h"
|
||||
/*
|
||||
This OpenCL kernel implements the classical Lax-Friedrichs scheme
|
||||
for the shallow water equations, with edge fluxes.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
//Compute fluxes along the x axis
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
const float3 Qp = make_float3(Q[0][j][i+1],
|
||||
Q[1][j][i+1],
|
||||
Q[2][j][i+1]);
|
||||
const float3 Qm = make_float3(Q[0][j][i],
|
||||
Q[1][j][i],
|
||||
Q[2][j][i]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = FORCE_1D_flux(Qm, Qp, g_, dx_, dt_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
//Compute fluxes along the y axis
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Qp = make_float3(Q[0][j+1][i],
|
||||
Q[2][j+1][i],
|
||||
Q[1][j+1][i]);
|
||||
const float3 Qm = make_float3(Q[0][j][i],
|
||||
Q[2][j][i],
|
||||
Q[1][j][i]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
const float3 flux = FORCE_1D_flux(Qm, Qp, g_, dy_, dt_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
extern "C" {
|
||||
__global__ void FORCEKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 1;
|
||||
const unsigned int gc_y = 1;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute flux along x, and evolve
|
||||
computeFluxF(Q, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute flux along y, and evolve
|
||||
computeFluxG(Q, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Write to main memory
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -1,161 +0,0 @@
|
||||
/*
|
||||
This GPU kernel implements the HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
const float3 Q_r = make_float3(Q[0][j][i+1],
|
||||
Q[1][j][i+1],
|
||||
Q[2][j][i+1]);
|
||||
const float3 Q_l = make_float3(Q[0][j][i],
|
||||
Q[1][j][i],
|
||||
Q[2][j][i]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = HLL_flux(Q_l, Q_r, g_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_) {
|
||||
//Compute fluxes along the y axis
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Q_r = make_float3(Q[0][j+1][i],
|
||||
Q[2][j+1][i],
|
||||
Q[1][j+1][i]);
|
||||
const float3 Q_l = make_float3(Q[0][j][i],
|
||||
Q[2][j][i],
|
||||
Q[1][j][i]);
|
||||
|
||||
// Computed flux
|
||||
//Note that we here swap hu and hv back to the original
|
||||
const float3 flux = HLL_flux(Q_l, Q_r, g_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern "C" {
|
||||
|
||||
__global__ void HLLKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 1;
|
||||
const unsigned int gc_y = 1;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Compute F flux
|
||||
computeFluxF(Q, F, g_);
|
||||
__syncthreads();
|
||||
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute G flux
|
||||
computeFluxG(Q, F, g_);
|
||||
__syncthreads();
|
||||
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -1,162 +0,0 @@
|
||||
#include "hip/hip_runtime.h"
|
||||
/*
|
||||
This GPU kernel implements the HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
const float3 Q_r = make_float3(Q[0][j][i+1],
|
||||
Q[1][j][i+1],
|
||||
Q[2][j][i+1]);
|
||||
const float3 Q_l = make_float3(Q[0][j][i],
|
||||
Q[1][j][i],
|
||||
Q[2][j][i]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = HLL_flux(Q_l, Q_r, g_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_) {
|
||||
//Compute fluxes along the y axis
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Q_r = make_float3(Q[0][j+1][i],
|
||||
Q[2][j+1][i],
|
||||
Q[1][j+1][i]);
|
||||
const float3 Q_l = make_float3(Q[0][j][i],
|
||||
Q[2][j][i],
|
||||
Q[1][j][i]);
|
||||
|
||||
// Computed flux
|
||||
//Note that we here swap hu and hv back to the original
|
||||
const float3 flux = HLL_flux(Q_l, Q_r, g_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern "C" {
|
||||
|
||||
__global__ void HLLKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 1;
|
||||
const unsigned int gc_y = 1;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Compute F flux
|
||||
computeFluxF(Q, F, g_);
|
||||
__syncthreads();
|
||||
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute G flux
|
||||
computeFluxG(Q, F, g_);
|
||||
__syncthreads();
|
||||
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -1,216 +0,0 @@
|
||||
/*
|
||||
This OpenCL kernel implements the second order HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
const float3 Q_rl = make_float3(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Q_rr = make_float3(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] - 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] - 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] + 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] + 0.5f*Qx[2][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float3 flux = HLL_flux(Q_l_bar, Q_r_bar, g_);
|
||||
|
||||
//Write to shared memory
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float3 Q_rl = make_float3(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rr = make_float3(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] - 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] - 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] + 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] + 0.5f*Qy[1][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float3 flux = HLL_flux(Q_l_bar, Q_r_bar, g_);
|
||||
|
||||
//Write to shared memory
|
||||
//Note that we here swap hu and hv back to the original
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern "C" {
|
||||
__global__ void HLL2Kernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][h+4][w+4];
|
||||
__shared__ float Qx[3][h+4][w+4];
|
||||
__shared__ float F[3][h+4][w+4];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Step 0 => evolve x first, then y
|
||||
if (step_ == 0) {
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
else {
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -1,217 +0,0 @@
|
||||
#include "hip/hip_runtime.h"
|
||||
/*
|
||||
This OpenCL kernel implements the second order HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
const float3 Q_rl = make_float3(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Q_rr = make_float3(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] - 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] - 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] + 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] + 0.5f*Qx[2][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float3 flux = HLL_flux(Q_l_bar, Q_r_bar, g_);
|
||||
|
||||
//Write to shared memory
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float3 Q_rl = make_float3(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rr = make_float3(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] - 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] - 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] + 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] + 0.5f*Qy[1][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float3 flux = HLL_flux(Q_l_bar, Q_r_bar, g_);
|
||||
|
||||
//Write to shared memory
|
||||
//Note that we here swap hu and hv back to the original
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern "C" {
|
||||
__global__ void HLL2Kernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][h+4][w+4];
|
||||
__shared__ float Qx[3][h+4][w+4];
|
||||
__shared__ float F[3][h+4][w+4];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Step 0 => evolve x first, then y
|
||||
if (step_ == 0) {
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
else {
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -1,233 +0,0 @@
|
||||
/*
|
||||
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const float g_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
{
|
||||
int j=ty;
|
||||
const int l = j + 2; //Skip ghost cells
|
||||
for (int i=tx; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
const int k = i + 1;
|
||||
// Q at interface from the right and left
|
||||
const float3 Qp = make_float3(Q[0][l][k+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][l][k+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][l][k+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Qm = make_float3(Q[0][l][k ] + 0.5f*Qx[0][j][i ],
|
||||
Q[1][l][k ] + 0.5f*Qx[1][j][i ],
|
||||
Q[2][l][k ] + 0.5f*Qx[2][j][i ]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = CentralUpwindFlux(Qm, Qp, g_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const float g_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
for (int j=ty; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
const int l = j + 1;
|
||||
{
|
||||
int i=tx;
|
||||
const int k = i + 2; //Skip ghost cells
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Qp = make_float3(Q[0][l+1][k] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][l+1][k] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][l+1][k] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Qm = make_float3(Q[0][l ][k] + 0.5f*Qy[0][j ][i],
|
||||
Q[2][l ][k] + 0.5f*Qy[2][j ][i],
|
||||
Q[1][l ][k] + 0.5f*Qy[1][j ][i]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
const float3 flux = CentralUpwindFlux(Qm, Qp, g_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ void minmodSlopeX(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float theta_) {
|
||||
//Reconstruct slopes along x axis
|
||||
for (int p=0; p<3; ++p) {
|
||||
{
|
||||
const int j = threadIdx.y+2;
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+3; i+=BLOCK_WIDTH) {
|
||||
Qx[p][j-2][i-1] = minmodSlope(Q[p][j][i-1], Q[p][j][i], Q[p][j][i+1], theta_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reconstructs a minmod slope for a whole block along the ordinate
|
||||
*/
|
||||
__device__ void minmodSlopeY(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float theta_) {
|
||||
//Reconstruct slopes along y axis
|
||||
for (int p=0; p<3; ++p) {
|
||||
const int i = threadIdx.x + 2;
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+3; j+=BLOCK_HEIGHT) {
|
||||
{
|
||||
Qy[p][j-1][i-2] = minmodSlope(Q[p][j-1][i], Q[p][j][i], Q[p][j+1][i], theta_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
|
||||
*/
|
||||
extern "C" {
|
||||
__global__ void KP07Kernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_order_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = blockDim.x*blockIdx.x + threadIdx.x + 2; //Skip global ghost cells, i.e., +2
|
||||
const int tj = blockDim.y*blockIdx.y + threadIdx.y + 2;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][h+4][w+4];
|
||||
__shared__ float Qx[3][h+2][w+2];
|
||||
__shared__ float Qy[3][h+2][w+2];
|
||||
__shared__ float F[3][h+1][w+1];
|
||||
__shared__ float G[3][h+1][w+1];
|
||||
|
||||
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
|
||||
//Reconstruct slopes along x and axis
|
||||
minmodSlopeX(Q, Qx, theta_);
|
||||
minmodSlopeY(Q, Qy, theta_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
//Compute fluxes along the x and y axis
|
||||
computeFluxF(Q, Qx, F, g_);
|
||||
computeFluxG(Q, Qy, G, g_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
//Sum fluxes and advance in time for all internal cells
|
||||
if (ti > 1 && ti < nx_+2 && tj > 1 && tj < ny_+2) {
|
||||
const int i = tx + 2; //Skip local ghost cells, i.e., +2
|
||||
const int j = ty + 2;
|
||||
|
||||
Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
|
||||
|
||||
float* const h_row = (float*) ((char*) h1_ptr_ + h1_pitch_*tj);
|
||||
float* const hu_row = (float*) ((char*) hu1_ptr_ + hu1_pitch_*tj);
|
||||
float* const hv_row = (float*) ((char*) hv1_ptr_ + hv1_pitch_*tj);
|
||||
|
||||
if (getOrder(step_order_) == 2 && getStep(step_order_) == 1) {
|
||||
//Write to main memory
|
||||
h_row[ti] = 0.5f*(h_row[ti] + Q[0][j][i]);
|
||||
hu_row[ti] = 0.5f*(hu_row[ti] + Q[1][j][i]);
|
||||
hv_row[ti] = 0.5f*(hv_row[ti] + Q[2][j][i]);
|
||||
}
|
||||
else {
|
||||
h_row[ti] = Q[0][j][i];
|
||||
hu_row[ti] = Q[1][j][i];
|
||||
hv_row[ti] = Q[2][j][i];
|
||||
}
|
||||
}
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
} //extern "C"
|
@ -1,234 +0,0 @@
|
||||
#include "hip/hip_runtime.h"
|
||||
/*
|
||||
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const float g_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
{
|
||||
int j=ty;
|
||||
const int l = j + 2; //Skip ghost cells
|
||||
for (int i=tx; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
const int k = i + 1;
|
||||
// Q at interface from the right and left
|
||||
const float3 Qp = make_float3(Q[0][l][k+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][l][k+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][l][k+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Qm = make_float3(Q[0][l][k ] + 0.5f*Qx[0][j][i ],
|
||||
Q[1][l][k ] + 0.5f*Qx[1][j][i ],
|
||||
Q[2][l][k ] + 0.5f*Qx[2][j][i ]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = CentralUpwindFlux(Qm, Qp, g_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const float g_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
for (int j=ty; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
const int l = j + 1;
|
||||
{
|
||||
int i=tx;
|
||||
const int k = i + 2; //Skip ghost cells
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Qp = make_float3(Q[0][l+1][k] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][l+1][k] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][l+1][k] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Qm = make_float3(Q[0][l ][k] + 0.5f*Qy[0][j ][i],
|
||||
Q[2][l ][k] + 0.5f*Qy[2][j ][i],
|
||||
Q[1][l ][k] + 0.5f*Qy[1][j ][i]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
const float3 flux = CentralUpwindFlux(Qm, Qp, g_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ void minmodSlopeX(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float theta_) {
|
||||
//Reconstruct slopes along x axis
|
||||
for (int p=0; p<3; ++p) {
|
||||
{
|
||||
const int j = threadIdx.y+2;
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+3; i+=BLOCK_WIDTH) {
|
||||
Qx[p][j-2][i-1] = minmodSlope(Q[p][j][i-1], Q[p][j][i], Q[p][j][i+1], theta_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reconstructs a minmod slope for a whole block along the ordinate
|
||||
*/
|
||||
__device__ void minmodSlopeY(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float theta_) {
|
||||
//Reconstruct slopes along y axis
|
||||
for (int p=0; p<3; ++p) {
|
||||
const int i = threadIdx.x + 2;
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+3; j+=BLOCK_HEIGHT) {
|
||||
{
|
||||
Qy[p][j-1][i-2] = minmodSlope(Q[p][j-1][i], Q[p][j][i], Q[p][j+1][i], theta_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
|
||||
*/
|
||||
extern "C" {
|
||||
__global__ void KP07Kernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_order_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = blockDim.x*blockIdx.x + threadIdx.x + 2; //Skip global ghost cells, i.e., +2
|
||||
const int tj = blockDim.y*blockIdx.y + threadIdx.y + 2;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][h+4][w+4];
|
||||
__shared__ float Qx[3][h+2][w+2];
|
||||
__shared__ float Qy[3][h+2][w+2];
|
||||
__shared__ float F[3][h+1][w+1];
|
||||
__shared__ float G[3][h+1][w+1];
|
||||
|
||||
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
|
||||
//Reconstruct slopes along x and axis
|
||||
minmodSlopeX(Q, Qx, theta_);
|
||||
minmodSlopeY(Q, Qy, theta_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
//Compute fluxes along the x and y axis
|
||||
computeFluxF(Q, Qx, F, g_);
|
||||
computeFluxG(Q, Qy, G, g_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
//Sum fluxes and advance in time for all internal cells
|
||||
if (ti > 1 && ti < nx_+2 && tj > 1 && tj < ny_+2) {
|
||||
const int i = tx + 2; //Skip local ghost cells, i.e., +2
|
||||
const int j = ty + 2;
|
||||
|
||||
Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
|
||||
|
||||
float* const h_row = (float*) ((char*) h1_ptr_ + h1_pitch_*tj);
|
||||
float* const hu_row = (float*) ((char*) hu1_ptr_ + hu1_pitch_*tj);
|
||||
float* const hv_row = (float*) ((char*) hv1_ptr_ + hv1_pitch_*tj);
|
||||
|
||||
if (getOrder(step_order_) == 2 && getStep(step_order_) == 1) {
|
||||
//Write to main memory
|
||||
h_row[ti] = 0.5f*(h_row[ti] + Q[0][j][i]);
|
||||
hu_row[ti] = 0.5f*(hu_row[ti] + Q[1][j][i]);
|
||||
hv_row[ti] = 0.5f*(hv_row[ti] + Q[2][j][i]);
|
||||
}
|
||||
else {
|
||||
h_row[ti] = Q[0][j][i];
|
||||
hu_row[ti] = Q[1][j][i];
|
||||
hv_row[ti] = Q[2][j][i];
|
||||
}
|
||||
}
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
} //extern "C"
|
@ -1,216 +0,0 @@
|
||||
/*
|
||||
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
template <int w, int h, int gc_x, int gc_y>
|
||||
__device__
|
||||
void computeFluxF(float Q[3][h+2*gc_y][w+2*gc_x],
|
||||
float Qx[3][h+2*gc_y][w+2*gc_x],
|
||||
float F[3][h+2*gc_y][w+2*gc_x],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
for (int i=threadIdx.x+1; i<w+2*gc_x-2; i+=w) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
const float3 Q_rl = make_float3(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Q_rr = make_float3(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] - 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] - 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] + 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] + 0.5f*Qx[2][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float3 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, g_);
|
||||
|
||||
//Write to shared memory
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <int w, int h, int gc_x, int gc_y>
|
||||
__device__
|
||||
void computeFluxG(float Q[3][h+2*gc_y][w+2*gc_x],
|
||||
float Qy[3][h+2*gc_y][w+2*gc_x],
|
||||
float G[3][h+2*gc_y][w+2*gc_x],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
for (int j=threadIdx.y+1; j<h+2*gc_y-2; j+=h) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float3 Q_rl = make_float3(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rr = make_float3(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] - 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] - 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] + 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] + 0.5f*Qy[1][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float3 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, g_);
|
||||
|
||||
//Write to shared memory
|
||||
//Note that we here swap hu and hv back to the original
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
|
||||
*/
|
||||
extern "C" {
|
||||
|
||||
|
||||
|
||||
|
||||
__global__ void KP07DimsplitKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float Qx[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
if (step_ == 0) {
|
||||
//Along X
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF<w, h, gc_x, gc_y>(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Along Y
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG<w, h, gc_x, gc_y>(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
else {
|
||||
//Along Y
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG<w, h, gc_x, gc_y>(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Along X
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF<w, h, gc_x, gc_y>(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
} // extern "C"
|
@ -1,217 +0,0 @@
|
||||
#include "hip/hip_runtime.h"
|
||||
/*
|
||||
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
template <int w, int h, int gc_x, int gc_y>
|
||||
__device__
|
||||
void computeFluxF(float Q[3][h+2*gc_y][w+2*gc_x],
|
||||
float Qx[3][h+2*gc_y][w+2*gc_x],
|
||||
float F[3][h+2*gc_y][w+2*gc_x],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
for (int i=threadIdx.x+1; i<w+2*gc_x-2; i+=w) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
const float3 Q_rl = make_float3(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Q_rr = make_float3(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] - 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] - 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] + 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] + 0.5f*Qx[2][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float3 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, g_);
|
||||
|
||||
//Write to shared memory
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
template <int w, int h, int gc_x, int gc_y>
|
||||
__device__
|
||||
void computeFluxG(float Q[3][h+2*gc_y][w+2*gc_x],
|
||||
float Qy[3][h+2*gc_y][w+2*gc_x],
|
||||
float G[3][h+2*gc_y][w+2*gc_x],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
for (int j=threadIdx.y+1; j<h+2*gc_y-2; j+=h) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float3 Q_rl = make_float3(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rr = make_float3(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] - 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] - 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] + 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] + 0.5f*Qy[1][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float3 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, g_);
|
||||
|
||||
//Write to shared memory
|
||||
//Note that we here swap hu and hv back to the original
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
|
||||
*/
|
||||
extern "C" {
|
||||
|
||||
|
||||
|
||||
|
||||
__global__ void KP07DimsplitKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float Qx[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
if (step_ == 0) {
|
||||
//Along X
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF<w, h, gc_x, gc_y>(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Along Y
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG<w, h, gc_x, gc_y>(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
else {
|
||||
//Along Y
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG<w, h, gc_x, gc_y>(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Along X
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF<w, h, gc_x, gc_y>(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
} // extern "C"
|
@ -1,168 +0,0 @@
|
||||
/*
|
||||
This OpenCL kernel implements the classical Lax-Friedrichs scheme
|
||||
for the shallow water equations, with edge fluxes.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
template <int block_width, int block_height>
|
||||
__device__
|
||||
void computeFluxF(float Q[3][block_height+2][block_width+2],
|
||||
float F[3][block_height][block_width+1],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
{
|
||||
const int j=ty;
|
||||
const int l = j + 1; //Skip ghost cells
|
||||
for (int i=tx; i<block_width+1; i+=block_width) {
|
||||
const int k = i;
|
||||
|
||||
// Q at interface from the right and left
|
||||
const float3 Qp = make_float3(Q[0][l][k+1],
|
||||
Q[1][l][k+1],
|
||||
Q[2][l][k+1]);
|
||||
const float3 Qm = make_float3(Q[0][l][k],
|
||||
Q[1][l][k],
|
||||
Q[2][l][k]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = LxF_2D_flux(Qm, Qp, g_, dx_, dt_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
template <int block_width, int block_height>
|
||||
__device__
|
||||
void computeFluxG(float Q[3][block_height+2][block_width+2],
|
||||
float G[3][block_height+1][block_width],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
for (int j=ty; j<block_height+1; j+=block_height) {
|
||||
const int l = j;
|
||||
{
|
||||
const int i=tx;
|
||||
const int k = i + 1; //Skip ghost cells
|
||||
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Qp = make_float3(Q[0][l+1][k],
|
||||
Q[2][l+1][k],
|
||||
Q[1][l+1][k]);
|
||||
const float3 Qm = make_float3(Q[0][l][k],
|
||||
Q[2][l][k],
|
||||
Q[1][l][k]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
const float3 flux = LxF_2D_flux(Qm, Qp, g_, dy_, dt_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
extern "C" {
|
||||
__global__
|
||||
void LxFKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 1;
|
||||
const unsigned int gc_y = 1;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
__shared__ float Q[vars][h+2][w+2];
|
||||
__shared__ float F[vars][h ][w+1];
|
||||
__shared__ float G[vars][h+1][w ];
|
||||
|
||||
//Read from global memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Compute fluxes along the x and y axis
|
||||
computeFluxF<w, h>(Q, F, g_, dx_, dt_);
|
||||
computeFluxG<w, h>(Q, G, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Evolve for all cells
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
const int i = tx + 1; //Skip local ghost cells, i.e., +1
|
||||
const int j = ty + 1;
|
||||
|
||||
Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
|
||||
__syncthreads();
|
||||
|
||||
//Write to main memory
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
@ -1,169 +0,0 @@
|
||||
#include "hip/hip_runtime.h"
|
||||
/*
|
||||
This OpenCL kernel implements the classical Lax-Friedrichs scheme
|
||||
for the shallow water equations, with edge fluxes.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
template <int block_width, int block_height>
|
||||
__device__
|
||||
void computeFluxF(float Q[3][block_height+2][block_width+2],
|
||||
float F[3][block_height][block_width+1],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
{
|
||||
const int j=ty;
|
||||
const int l = j + 1; //Skip ghost cells
|
||||
for (int i=tx; i<block_width+1; i+=block_width) {
|
||||
const int k = i;
|
||||
|
||||
// Q at interface from the right and left
|
||||
const float3 Qp = make_float3(Q[0][l][k+1],
|
||||
Q[1][l][k+1],
|
||||
Q[2][l][k+1]);
|
||||
const float3 Qm = make_float3(Q[0][l][k],
|
||||
Q[1][l][k],
|
||||
Q[2][l][k]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = LxF_2D_flux(Qm, Qp, g_, dx_, dt_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
template <int block_width, int block_height>
|
||||
__device__
|
||||
void computeFluxG(float Q[3][block_height+2][block_width+2],
|
||||
float G[3][block_height+1][block_width],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
for (int j=ty; j<block_height+1; j+=block_height) {
|
||||
const int l = j;
|
||||
{
|
||||
const int i=tx;
|
||||
const int k = i + 1; //Skip ghost cells
|
||||
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Qp = make_float3(Q[0][l+1][k],
|
||||
Q[2][l+1][k],
|
||||
Q[1][l+1][k]);
|
||||
const float3 Qm = make_float3(Q[0][l][k],
|
||||
Q[2][l][k],
|
||||
Q[1][l][k]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
const float3 flux = LxF_2D_flux(Qm, Qp, g_, dy_, dt_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
extern "C" {
|
||||
__global__
|
||||
void LxFKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 1;
|
||||
const unsigned int gc_y = 1;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
__shared__ float Q[vars][h+2][w+2];
|
||||
__shared__ float F[vars][h ][w+1];
|
||||
__shared__ float G[vars][h+1][w ];
|
||||
|
||||
//Read from global memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Compute fluxes along the x and y axis
|
||||
computeFluxF<w, h>(Q, F, g_, dx_, dt_);
|
||||
computeFluxG<w, h>(Q, G, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Evolve for all cells
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
const int i = tx + 1; //Skip local ghost cells, i.e., +1
|
||||
const int j = ty + 1;
|
||||
|
||||
Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
|
||||
__syncthreads();
|
||||
|
||||
//Write to main memory
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
||||
|
@ -1,178 +0,0 @@
|
||||
/*
|
||||
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
const float3 Ql2 = make_float3(Q[0][j][i-1], Q[1][j][i-1], Q[2][j][i-1]);
|
||||
const float3 Ql1 = make_float3(Q[0][j][i ], Q[1][j][i ], Q[2][j][i ]);
|
||||
const float3 Qr1 = make_float3(Q[0][j][i+1], Q[1][j][i+1], Q[2][j][i+1]);
|
||||
const float3 Qr2 = make_float3(Q[0][j][i+2], Q[1][j][i+2], Q[2][j][i+2]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dx_, dt_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Ql2 = make_float3(Q[0][j-1][i], Q[2][j-1][i], Q[1][j-1][i]);
|
||||
const float3 Ql1 = make_float3(Q[0][j ][i], Q[2][j ][i], Q[1][j ][i]);
|
||||
const float3 Qr1 = make_float3(Q[0][j+1][i], Q[2][j+1][i], Q[1][j+1][i]);
|
||||
const float3 Qr2 = make_float3(Q[0][j+2][i], Q[2][j+2][i], Q[1][j+2][i]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
const float3 flux = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dy_, dt_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern "C" {
|
||||
__global__ void WAFKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][h+4][w+4];
|
||||
__shared__ float F[3][h+4][w+4];
|
||||
|
||||
|
||||
|
||||
//Read into shared memory Q from global memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
|
||||
//Step 0 => evolve x first, then y
|
||||
if (step_ == 0) {
|
||||
//Compute fluxes along the x axis and evolve
|
||||
computeFluxF(Q, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
computeFluxG(Q, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
else {
|
||||
//Compute fluxes along the y axis and evolve
|
||||
computeFluxG(Q, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
computeFluxF(Q, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -1,179 +0,0 @@
|
||||
#include "hip/hip_runtime.h"
|
||||
/*
|
||||
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
const float3 Ql2 = make_float3(Q[0][j][i-1], Q[1][j][i-1], Q[2][j][i-1]);
|
||||
const float3 Ql1 = make_float3(Q[0][j][i ], Q[1][j][i ], Q[2][j][i ]);
|
||||
const float3 Qr1 = make_float3(Q[0][j][i+1], Q[1][j][i+1], Q[2][j][i+1]);
|
||||
const float3 Qr2 = make_float3(Q[0][j][i+2], Q[1][j][i+2], Q[2][j][i+2]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dx_, dt_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Ql2 = make_float3(Q[0][j-1][i], Q[2][j-1][i], Q[1][j-1][i]);
|
||||
const float3 Ql1 = make_float3(Q[0][j ][i], Q[2][j ][i], Q[1][j ][i]);
|
||||
const float3 Qr1 = make_float3(Q[0][j+1][i], Q[2][j+1][i], Q[1][j+1][i]);
|
||||
const float3 Qr2 = make_float3(Q[0][j+2][i], Q[2][j+2][i], Q[1][j+2][i]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
const float3 flux = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dy_, dt_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern "C" {
|
||||
__global__ void WAFKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][h+4][w+4];
|
||||
__shared__ float F[3][h+4][w+4];
|
||||
|
||||
|
||||
|
||||
//Read into shared memory Q from global memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
|
||||
//Step 0 => evolve x first, then y
|
||||
if (step_ == 0) {
|
||||
//Compute fluxes along the x axis and evolve
|
||||
computeFluxF(Q, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
computeFluxG(Q, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
else {
|
||||
//Compute fluxes along the y axis and evolve
|
||||
computeFluxG(Q, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
computeFluxF(Q, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -1,533 +0,0 @@
|
||||
/*
|
||||
These CUDA functions implement different types of numerical flux
|
||||
functions for the shallow water equations
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF Digital
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ float3 F_func(const float3 Q, const float g) {
|
||||
float3 F;
|
||||
|
||||
F.x = Q.y; //hu
|
||||
F.y = Q.y*Q.y / Q.x + 0.5f*g*Q.x*Q.x; //hu*hu/h + 0.5f*g*h*h;
|
||||
F.z = Q.y*Q.z / Q.x; //hu*hv/h;
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Superbee flux limiter for WAF.
|
||||
* Related to superbee limiter so that WAF_superbee(r, c) = 1 - (1-|c|)*superbee(r)
|
||||
* @param r_ the ratio of upwind change (see Toro 2001, p. 203/204)
|
||||
* @param c_ the courant number for wave k, dt*S_k/dx
|
||||
*/
|
||||
__device__ float WAF_superbee(float r_, float c_) {
|
||||
// r <= 0.0
|
||||
if (r_ <= 0.0f) {
|
||||
return 1.0f;
|
||||
}
|
||||
// 0.0 <= r <= 1/2
|
||||
else if (r_ <= 0.5f) {
|
||||
return 1.0f - 2.0f*(1.0f - fabsf(c_))*r_;
|
||||
}
|
||||
// 1/2 <= r <= 1
|
||||
else if (r_ <= 1.0f) {
|
||||
return fabs(c_);
|
||||
}
|
||||
// 1 <= r <= 2
|
||||
else if (r_ <= 2.0f) {
|
||||
return 1.0f - (1.0f - fabsf(c_))*r_;
|
||||
}
|
||||
// r >= 2
|
||||
else {
|
||||
return 2.0f*fabsf(c_) - 1.0f;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ float WAF_albada(float r_, float c_) {
|
||||
if (r_ <= 0.0f) {
|
||||
return 1.0f;
|
||||
}
|
||||
else {
|
||||
return 1.0f - (1.0f - fabsf(c_)) * r_ * (1.0f + r_) / (1.0f + r_*r_);
|
||||
}
|
||||
}
|
||||
|
||||
__device__ float WAF_minbee(float r_, float c_) {
|
||||
r_ = fmaxf(-1.0f, fminf(2.0f, r_));
|
||||
if (r_ <= 0.0f) {
|
||||
return 1.0f;
|
||||
}
|
||||
if (r_ >= 0.0f && r_ <= 1.0f) {
|
||||
return 1.0f - (1.0f - fabsf(c_)) * r_;
|
||||
}
|
||||
else {
|
||||
return fabsf(c_);
|
||||
}
|
||||
}
|
||||
|
||||
__device__ float WAF_minmod(float r_, float c_) {
|
||||
return 1.0f - (1.0f - fabsf(c_)) * fmaxf(0.0f, fminf(1.0f, r_));
|
||||
}
|
||||
|
||||
__device__ float limiterToWAFLimiter(float r_, float c_) {
|
||||
return 1.0f - (1.0f - fabsf(c_))*r_;
|
||||
}
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
__device__ __inline__ float computeHStar(float h_l, float h_r, float u_l, float u_r, float c_l, float c_r, float g_) {
|
||||
|
||||
//This estimate for the h* gives rise to spurious oscillations.
|
||||
//return 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float h_tmp = 0.5f * (c_l + c_r) + 0.25f * (u_l - u_r);
|
||||
return h_tmp*h_tmp / g_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Weighted average flux (Toro 2001, p 200) for interface {i+1/2}
|
||||
* @param r_ The flux limiter parameter (see Toro 2001, p. 203)
|
||||
* @param Q_l2 Q_{i-1}
|
||||
* @param Q_l1 Q_{i}
|
||||
* @param Q_r1 Q_{i+1}
|
||||
* @param Q_r2 Q_{i+2}
|
||||
*/
|
||||
__device__ float3 WAF_1D_flux(const float3 Q_l2, const float3 Q_l1, const float3 Q_r1, const float3 Q_r2, const float g_, const float dx_, const float dt_) {
|
||||
const float h_l = Q_l1.x;
|
||||
const float h_r = Q_r1.x;
|
||||
|
||||
const float h_l2 = Q_l2.x;
|
||||
const float h_r2 = Q_r2.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l1.y / h_l;
|
||||
const float u_r = Q_r1.y / h_r;
|
||||
|
||||
const float u_l2 = Q_l2.y / h_l2;
|
||||
const float u_r2 = Q_r2.y / h_r2;
|
||||
|
||||
const float v_l = Q_l1.z / h_l;
|
||||
const float v_r = Q_r1.z / h_r;
|
||||
|
||||
const float v_l2 = Q_l2.z / h_l2;
|
||||
const float v_r2 = Q_r2.z / h_r2;
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(g_*h_l);
|
||||
const float c_r = sqrt(g_*h_r);
|
||||
|
||||
const float c_l2 = sqrt(g_*h_l2);
|
||||
const float c_r2 = sqrt(g_*h_r2);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag_l = computeHStar(h_l2, h_l, u_l2, u_l, c_l2, c_l, g_);
|
||||
const float h_dag = computeHStar( h_l, h_r, u_l, u_r, c_l, c_r, g_);
|
||||
const float h_dag_r = computeHStar( h_r, h_r2, u_r, u_r2, c_r, c_r2, g_);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag ) ) / h_l;
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag ) ) / h_r;
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
|
||||
|
||||
const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1.0, S_star, v_l);
|
||||
const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1.0, S_star, v_r);
|
||||
|
||||
// Estimate the fluxes in the four regions
|
||||
const float3 F_1 = F_func(Q_l1, g_);
|
||||
const float3 F_4 = F_func(Q_r1, g_);
|
||||
|
||||
const float3 F_2 = F_1 + S_l*(Q_star_l - Q_l1);
|
||||
const float3 F_3 = F_4 + S_r*(Q_star_r - Q_r1);
|
||||
//const float3 F_2 = F_func(Q_star_l, g_);
|
||||
//const float3 F_3 = F_func(Q_star_r, g_);
|
||||
|
||||
// Compute the courant numbers for the waves
|
||||
const float c_1 = S_l * dt_ / dx_;
|
||||
const float c_2 = S_star * dt_ / dx_;
|
||||
const float c_3 = S_r * dt_ / dx_;
|
||||
|
||||
// Compute the "upwind change" vectors for the i-3/2 and i+3/2 interfaces
|
||||
const float eps = 1.0e-6f;
|
||||
const float r_1 = desingularize( (c_1 > 0.0f) ? (h_dag_l - h_l2) : (h_dag_r - h_r), eps) / desingularize((h_dag - h_l), eps);
|
||||
const float r_2 = desingularize( (c_2 > 0.0f) ? (v_l - v_l2) : (v_r2 - v_r), eps ) / desingularize((v_r - v_l), eps);
|
||||
const float r_3 = desingularize( (c_3 > 0.0f) ? (h_l - h_dag_l) : (h_r2 - h_dag_r), eps ) / desingularize((h_r - h_dag), eps);
|
||||
|
||||
// Compute the limiter
|
||||
// We use h for the nonlinear waves, and v for the middle shear wave
|
||||
const float A_1 = copysign(1.0f, c_1) * limiterToWAFLimiter(generalized_minmod(r_1, 1.9f), c_1);
|
||||
const float A_2 = copysign(1.0f, c_2) * limiterToWAFLimiter(generalized_minmod(r_2, 1.9f), c_2);
|
||||
const float A_3 = copysign(1.0f, c_3) * limiterToWAFLimiter(generalized_minmod(r_3, 1.9f), c_3);
|
||||
|
||||
//Average the fluxes
|
||||
const float3 flux = 0.5f*( F_1 + F_4 )
|
||||
- 0.5f*( A_1 * (F_2 - F_1)
|
||||
+ A_2 * (F_3 - F_2)
|
||||
+ A_3 * (F_4 - F_3) );
|
||||
|
||||
return flux;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Central upwind flux function
|
||||
*/
|
||||
__device__ float3 CentralUpwindFlux(const float3 Qm, float3 Qp, const float g) {
|
||||
const float3 Fp = F_func(Qp, g);
|
||||
const float up = Qp.y / Qp.x; // hu / h
|
||||
const float cp = sqrt(g*Qp.x); // sqrt(g*h)
|
||||
|
||||
const float3 Fm = F_func(Qm, g);
|
||||
const float um = Qm.y / Qm.x; // hu / h
|
||||
const float cm = sqrt(g*Qm.x); // sqrt(g*h)
|
||||
|
||||
const float am = min(min(um-cm, up-cp), 0.0f); // largest negative wave speed
|
||||
const float ap = max(max(um+cm, up+cp), 0.0f); // largest positive wave speed
|
||||
|
||||
return ((ap*Fm - am*Fp) + ap*am*(Qp-Qm))/(ap-am);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Godunovs centered scheme (Toro 2001, p 165)
|
||||
*/
|
||||
__device__ float3 GodC_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
const float3 Q_godc = 0.5f*(Q_l + Q_r) + (dt_/dx_)*(F_l - F_r);
|
||||
|
||||
return F_func(Q_godc, g_);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 180)
|
||||
*/
|
||||
__device__ float3 HLL_flux(const float3 Q_l, const float3 Q_r, const float g_) {
|
||||
const float h_l = Q_l.x;
|
||||
const float h_r = Q_r.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l.y / h_l;
|
||||
const float u_r = Q_r.y / h_r;
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(g_*h_l);
|
||||
const float c_r = sqrt(g_*h_r);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
|
||||
//Upwind selection
|
||||
if (S_l >= 0.0f) {
|
||||
return F_func(Q_l, g_);
|
||||
}
|
||||
else if (S_r <= 0.0f) {
|
||||
return F_func(Q_r, g_);
|
||||
}
|
||||
//Or estimate flux in the star region
|
||||
else {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
const float3 flux = (S_r*F_l - S_l*F_r + S_r*S_l*(Q_r - Q_l)) / (S_r-S_l);
|
||||
return flux;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 181)
|
||||
*/
|
||||
__device__ float3 HLLC_flux(const float3 Q_l, const float3 Q_r, const float g_) {
|
||||
const float h_l = Q_l.x;
|
||||
const float h_r = Q_r.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l.y / h_l;
|
||||
const float u_r = Q_r.y / h_r;
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(g_*h_l);
|
||||
const float c_r = sqrt(g_*h_r);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
|
||||
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
//Upwind selection
|
||||
if (S_l >= 0.0f) {
|
||||
return F_l;
|
||||
}
|
||||
else if (S_r <= 0.0f) {
|
||||
return F_r;
|
||||
}
|
||||
//Or estimate flux in the "left star" region
|
||||
else if (S_l <= 0.0f && 0.0f <=S_star) {
|
||||
const float v_l = Q_l.z / h_l;
|
||||
const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1, S_star, v_l);
|
||||
const float3 flux = F_l + S_l*(Q_star_l - Q_l);
|
||||
return flux;
|
||||
}
|
||||
//Or estimate flux in the "righ star" region
|
||||
else if (S_star <= 0.0f && 0.0f <=S_r) {
|
||||
const float v_r = Q_r.z / h_r;
|
||||
const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1, S_star, v_r);
|
||||
const float3 flux = F_r + S_r*(Q_star_r - Q_r);
|
||||
return flux;
|
||||
}
|
||||
else {
|
||||
return make_float3(-99999.9f, -99999.9f, -99999.9f); //Something wrong here
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Lax-Friedrichs flux (Toro 2001, p 163)
|
||||
*/
|
||||
__device__ float3 LxF_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
return 0.5f*(F_l + F_r) + (dx_/(2.0f*dt_))*(Q_l - Q_r);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Lax-Friedrichs extended to 2D
|
||||
*/
|
||||
__device__ float3 LxF_2D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
//Note numerical diffusion for 2D here (0.25)
|
||||
return 0.5f*(F_l + F_r) + (dx_/(4.0f*dt_))*(Q_l - Q_r);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Richtmeyer / Two-step Lax-Wendroff flux (Toro 2001, p 164)
|
||||
*/
|
||||
__device__ float3 LxW2_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
const float3 Q_lw2 = 0.5f*(Q_l + Q_r) + (dt_/(2.0f*dx_))*(F_l - F_r);
|
||||
|
||||
return F_func(Q_lw2, g_);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* First Ordered Centered (Toro 2001, p.163)
|
||||
*/
|
||||
__device__ float3 FORCE_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_lf = LxF_1D_flux(Q_l, Q_r, g_, dx_, dt_);
|
||||
const float3 F_lw2 = LxW2_1D_flux(Q_l, Q_r, g_, dx_, dt_);
|
||||
return 0.5f*(F_lf + F_lw2);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void writeCfl(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float shmem[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
const float dx_, const float dy_, const float g_,
|
||||
float* output_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x + gc_x;
|
||||
const int ty = threadIdx.y + gc_y;
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = blockDim.x*blockIdx.x + tx;
|
||||
const int tj = blockDim.y*blockIdx.y + ty;
|
||||
|
||||
//Only internal cells
|
||||
if (ti < nx_+gc_x && tj < ny_+gc_y) {
|
||||
const float h = Q[0][ty][tx];
|
||||
const float u = Q[1][ty][tx] / h;
|
||||
const float v = Q[2][ty][tx] / h;
|
||||
|
||||
const float max_u = dx_ / (fabsf(u) + sqrtf(g_*h));
|
||||
const float max_v = dy_ / (fabsf(v) + sqrtf(g_*h));
|
||||
|
||||
shmem[ty][tx] = fminf(max_u, max_v);
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//One row of threads loop over all rows
|
||||
if (ti < nx_+gc_x && tj < ny_+gc_y) {
|
||||
if (ty == gc_y) {
|
||||
float min_val = shmem[ty][tx];
|
||||
const int max_y = min(h, ny_+gc_y - tj);
|
||||
for (int j=gc_y; j<max_y+gc_y; j++) {
|
||||
min_val = fminf(min_val, shmem[j][tx]);
|
||||
}
|
||||
shmem[ty][tx] = min_val;
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//One thread loops over first row to find global max
|
||||
if (tx == gc_x && ty == gc_y) {
|
||||
float min_val = shmem[ty][tx];
|
||||
const int max_x = min(w, nx_+gc_x - ti);
|
||||
for (int i=gc_x; i<max_x+gc_x; ++i) {
|
||||
min_val = fminf(min_val, shmem[ty][i]);
|
||||
}
|
||||
|
||||
const int idx = gridDim.x*blockIdx.y + blockIdx.x;
|
||||
output_[idx] = min_val;
|
||||
}
|
||||
}
|
||||
|
@ -1,557 +0,0 @@
|
||||
/*
|
||||
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
|
||||
/**
|
||||
* Float3 operators
|
||||
*/
|
||||
inline __device__ float3 operator*(const float a, const float3 b) {
|
||||
return make_float3(a*b.x, a*b.y, a*b.z);
|
||||
}
|
||||
|
||||
inline __device__ float3 operator/(const float3 a, const float b) {
|
||||
return make_float3(a.x/b, a.y/b, a.z/b);
|
||||
}
|
||||
|
||||
inline __device__ float3 operator-(const float3 a, const float3 b) {
|
||||
return make_float3(a.x-b.x, a.y-b.y, a.z-b.z);
|
||||
}
|
||||
|
||||
inline __device__ float3 operator+(const float3 a, const float3 b) {
|
||||
return make_float3(a.x+b.x, a.y+b.y, a.z+b.z);
|
||||
}
|
||||
|
||||
/**
|
||||
* Float4 operators
|
||||
*/
|
||||
inline __device__ float4 operator*(const float a, const float4 b) {
|
||||
return make_float4(a*b.x, a*b.y, a*b.z, a*b.w);
|
||||
}
|
||||
|
||||
inline __device__ float4 operator/(const float4 a, const float b) {
|
||||
return make_float4(a.x/b, a.y/b, a.z/b, a.w/b);
|
||||
}
|
||||
|
||||
inline __device__ float4 operator-(const float4 a, const float4 b) {
|
||||
return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);
|
||||
}
|
||||
|
||||
inline __device__ float4 operator+(const float4 a, const float4 b) {
|
||||
return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
inline __device__ __host__ float clamp(const float f, const float a, const float b) {
|
||||
return fmaxf(a, fminf(f, b));
|
||||
}
|
||||
|
||||
inline __device__ __host__ int clamp(const int f, const int a, const int b) {
|
||||
return (f < b) ? ( (f > a) ? f : a) : b;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ float desingularize(float x_, float eps_) {
|
||||
return copysign(1.0f, x_)*fmaxf(fabsf(x_), fminf(x_*x_/(2.0f*eps_)+0.5f*eps_, eps_));
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns the step stored in the leftmost 16 bits
|
||||
* of the 32 bit step-order integer
|
||||
*/
|
||||
inline __device__ int getStep(int step_order_) {
|
||||
return step_order_ >> 16;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the order stored in the rightmost 16 bits
|
||||
* of the 32 bit step-order integer
|
||||
*/
|
||||
inline __device__ int getOrder(int step_order_) {
|
||||
return step_order_ & 0x0000FFFF;
|
||||
}
|
||||
|
||||
|
||||
enum BoundaryCondition {
|
||||
Dirichlet = 0,
|
||||
Neumann = 1,
|
||||
Periodic = 2,
|
||||
Reflective = 3
|
||||
};
|
||||
|
||||
inline __device__ BoundaryCondition getBCNorth(int bc_) {
|
||||
return static_cast<BoundaryCondition>((bc_ >> 24) & 0x0000000F);
|
||||
}
|
||||
|
||||
inline __device__ BoundaryCondition getBCSouth(int bc_) {
|
||||
return static_cast<BoundaryCondition>((bc_ >> 16) & 0x0000000F);
|
||||
}
|
||||
|
||||
inline __device__ BoundaryCondition getBCEast(int bc_) {
|
||||
return static_cast<BoundaryCondition>((bc_ >> 8) & 0x0000000F);
|
||||
}
|
||||
|
||||
inline __device__ BoundaryCondition getBCWest(int bc_) {
|
||||
return static_cast<BoundaryCondition>((bc_ >> 0) & 0x0000000F);
|
||||
}
|
||||
|
||||
|
||||
// West boundary
|
||||
template<int w, int h, int gc_x, int gc_y, int sign>
|
||||
__device__ void bcWestReflective(float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int ti = blockDim.x*blockIdx.x + i;
|
||||
|
||||
if (gc_x >= 1 && ti == gc_x) {
|
||||
Q[j][i-1] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 2 && ti == gc_x + 1) {
|
||||
Q[j][i-3] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 3 && ti == gc_x + 2) {
|
||||
Q[j][i-5] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 4 && ti == gc_x + 3) {
|
||||
Q[j][i-7] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 5 && ti == gc_x + 4) {
|
||||
Q[j][i-9] = sign*Q[j][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// East boundary
|
||||
template<int w, int h, int gc_x, int gc_y, int sign>
|
||||
__device__ void bcEastReflective(float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int ti = blockDim.x*blockIdx.x + i;
|
||||
|
||||
if (gc_x >= 1 && ti == nx_ + gc_x - 1) {
|
||||
Q[j][i+1] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 2 && ti == nx_ + gc_x - 2) {
|
||||
Q[j][i+3] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 3 && ti == nx_ + gc_x - 3) {
|
||||
Q[j][i+5] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 4 && ti == nx_ + gc_x - 4) {
|
||||
Q[j][i+7] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 5 && ti == nx_ + gc_x - 5) {
|
||||
Q[j][i+9] = sign*Q[j][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// South boundary
|
||||
template<int w, int h, int gc_x, int gc_y, int sign>
|
||||
__device__ void bcSouthReflective(float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const int tj = blockDim.y*blockIdx.y + j;
|
||||
|
||||
if (gc_y >= 1 && tj == gc_y) {
|
||||
Q[j-1][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 2 && tj == gc_y + 1) {
|
||||
Q[j-3][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 3 && tj == gc_y + 2) {
|
||||
Q[j-5][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 4 && tj == gc_y + 3) {
|
||||
Q[j-7][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 5 && tj == gc_y + 4) {
|
||||
Q[j-9][i] = sign*Q[j][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// North boundary
|
||||
template<int w, int h, int gc_x, int gc_y, int sign>
|
||||
__device__ void bcNorthReflective(float Q[h+2*gc_y][w+2*gc_x], const int nx_, const int ny_) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const int tj = blockDim.y*blockIdx.y + j;
|
||||
|
||||
if (gc_y >= 1 && tj == ny_ + gc_y - 1) {
|
||||
Q[j+1][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 2 && tj == ny_ + gc_y - 2) {
|
||||
Q[j+3][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 3 && tj == ny_ + gc_y - 3) {
|
||||
Q[j+5][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 4 && tj == ny_ + gc_y - 4) {
|
||||
Q[j+7][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 5 && tj == ny_ + gc_y - 5) {
|
||||
Q[j+9][i] = sign*Q[j][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Alter the index l so that it gives periodic boundary conditions when reading
|
||||
*/
|
||||
template<int gc_x>
|
||||
inline __device__ int handlePeriodicBoundaryX(int k, int nx_, int boundary_conditions_) {
|
||||
const int gc_pad = gc_x;
|
||||
|
||||
//West boundary: add an offset to read from east of domain
|
||||
if (gc_x > 0) {
|
||||
if ((k < gc_pad)
|
||||
&& getBCWest(boundary_conditions_) == Periodic) {
|
||||
k += (nx_+2*gc_x - 2*gc_pad);
|
||||
}
|
||||
//East boundary: subtract an offset to read from west of domain
|
||||
else if ((k >= nx_+2*gc_x-gc_pad)
|
||||
&& getBCEast(boundary_conditions_) == Periodic) {
|
||||
k -= (nx_+2*gc_x - 2*gc_pad);
|
||||
}
|
||||
}
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
/**
|
||||
* Alter the index l so that it gives periodic boundary conditions when reading
|
||||
*/
|
||||
template<int gc_y>
|
||||
inline __device__ int handlePeriodicBoundaryY(int l, int ny_, int boundary_conditions_) {
|
||||
const int gc_pad = gc_y;
|
||||
|
||||
//South boundary: add an offset to read from north of domain
|
||||
if (gc_y > 0) {
|
||||
if ((l < gc_pad)
|
||||
&& getBCSouth(boundary_conditions_) == Periodic) {
|
||||
l += (ny_+2*gc_y - 2*gc_pad);
|
||||
}
|
||||
//North boundary: subtract an offset to read from south of domain
|
||||
else if ((l >= ny_+2*gc_y-gc_pad)
|
||||
&& getBCNorth(boundary_conditions_) == Periodic) {
|
||||
l -= (ny_+2*gc_y - 2*gc_pad);
|
||||
}
|
||||
}
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
|
||||
template<int w, int h, int gc_x, int gc_y, int sign_x, int sign_y>
|
||||
inline __device__
|
||||
void handleReflectiveBoundary(
|
||||
float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
const int boundary_conditions_) {
|
||||
|
||||
//Handle reflective boundary conditions
|
||||
if (getBCNorth(boundary_conditions_) == Reflective) {
|
||||
bcNorthReflective<w, h, gc_x, gc_y, sign_y>(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
}
|
||||
if (getBCSouth(boundary_conditions_) == Reflective) {
|
||||
bcSouthReflective<w, h, gc_x, gc_y, sign_y>(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
}
|
||||
if (getBCEast(boundary_conditions_) == Reflective) {
|
||||
bcEastReflective<w, h, gc_x, gc_y, sign_x>(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
}
|
||||
if (getBCWest(boundary_conditions_) == Reflective) {
|
||||
bcWestReflective<w, h, gc_x, gc_y, sign_x>(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a block of data with ghost cells
|
||||
*/
|
||||
template<int w, int h, int gc_x, int gc_y, int sign_x, int sign_y>
|
||||
inline __device__ void readBlock(float* ptr_, int pitch_,
|
||||
float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
const int boundary_conditions_,
|
||||
int x0, int y0,
|
||||
int x1, int y1) {
|
||||
//Index of block within domain
|
||||
const int bx = blockDim.x * blockIdx.x;
|
||||
const int by = blockDim.y * blockIdx.y;
|
||||
|
||||
//Read into shared memory
|
||||
//Loop over all variables
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
//Handle periodic boundary conditions here
|
||||
int l = handlePeriodicBoundaryY<gc_y>(by + j + y0, ny_, boundary_conditions_);
|
||||
l = min(l, min(ny_+2*gc_y-1, y1+2*gc_y-1));
|
||||
float* row = (float*) ((char*) ptr_ + pitch_*l);
|
||||
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
//Handle periodic boundary conditions here
|
||||
int k = handlePeriodicBoundaryX<gc_x>(bx + i + x0, nx_, boundary_conditions_);
|
||||
k = min(k, min(nx_+2*gc_x-1, x1+2*gc_x-1));
|
||||
|
||||
//Read from global memory
|
||||
Q[j][i] = row[k];
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
handleReflectiveBoundary<w, h, gc_x, gc_y, sign_x, sign_y>(Q, nx_, ny_, boundary_conditions_);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Writes a block of data to global memory for the shallow water equations.
|
||||
*/
|
||||
template<int w, int h, int gc_x, int gc_y>
|
||||
inline __device__ void writeBlock(float* ptr_, int pitch_,
|
||||
float shmem[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
int rk_step_, int rk_order_,
|
||||
int x0, int y0,
|
||||
int x1, int y1) {
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = blockDim.x*blockIdx.x + threadIdx.x + gc_x + x0;
|
||||
const int tj = blockDim.y*blockIdx.y + threadIdx.y + gc_y + y0;
|
||||
|
||||
//In case we are writing only to a subarea given by (x0, y0) x (x1, y1)
|
||||
const int max_ti = min(nx_+gc_x, x1+gc_x);
|
||||
const int max_tj = min(ny_+gc_y, y1+gc_y);
|
||||
|
||||
//Only write internal cells
|
||||
if ((x0+gc_x <= ti) && (ti < max_ti) && (y0+gc_y <= tj) && (tj < max_tj)) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x + gc_x;
|
||||
const int ty = threadIdx.y + gc_y;
|
||||
|
||||
float* const row = (float*) ((char*) ptr_ + pitch_*tj);
|
||||
|
||||
//Handle runge-kutta timestepping here
|
||||
row[ti] = shmem[ty][tx];
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* SSPRK1 (forward Euler)
|
||||
* u^1 = u^n + dt*f(u^n)
|
||||
*/
|
||||
if (rk_order_ == 1) {
|
||||
row[ti] = shmem[ty][tx];
|
||||
}
|
||||
/**
|
||||
* SSPRK2
|
||||
* u^1 = u^n + dt*f(u^n)
|
||||
* u^n+1 = 1/2*u^n + 1/2*(u^1 + dt*f(u^1))
|
||||
*/
|
||||
else if (rk_order_ == 2) {
|
||||
if (rk_step_ == 0) {
|
||||
row[ti] = shmem[ty][tx];
|
||||
}
|
||||
else if (rk_step_ == 1) {
|
||||
row[ti] = 0.5f*row[ti] + 0.5f*shmem[ty][tx];
|
||||
}
|
||||
}
|
||||
/**
|
||||
* SSPRK3
|
||||
* u^1 = u^n + dt*f(u^n)
|
||||
* u^2 = 3/4 * u^n + 1/4 * (u^1 + dt*f(u^1))
|
||||
* u^n+1 = 1/3 * u^n + 2/3 * (u^2 + dt*f(u^2))
|
||||
* FIXME: This is not correct now, need a temporary to hold intermediate step u^2
|
||||
*/
|
||||
else if (rk_order_ == 3) {
|
||||
if (rk_step_ == 0) {
|
||||
row[ti] = shmem[ty][tx];
|
||||
}
|
||||
else if (rk_step_ == 1) {
|
||||
row[ti] = 0.75f*row[ti] + 0.25f*shmem[ty][tx];
|
||||
}
|
||||
else if (rk_step_ == 2) {
|
||||
const float t = 1.0f / 3.0f; //Not representable in base 2
|
||||
row[ti] = t*row[ti] + (1.0f-t)*shmem[ty][tx];
|
||||
}
|
||||
}
|
||||
|
||||
// DEBUG
|
||||
//row[ti] = 99.0;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void evolveF(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float F[vars][h+2*gc_y][w+2*gc_x],
|
||||
const float dx_, const float dt_) {
|
||||
for (int var=0; var < vars; ++var) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
for (int i=threadIdx.x+gc_x; i<w+gc_x; i+=w) {
|
||||
Q[var][j][i] = Q[var][j][i] + (F[var][j][i-1] - F[var][j][i]) * dt_ / dx_;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Evolves the solution in time along the y axis (dimensional splitting)
|
||||
*/
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void evolveG(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float G[vars][h+2*gc_y][w+2*gc_x],
|
||||
const float dy_, const float dt_) {
|
||||
for (int var=0; var < vars; ++var) {
|
||||
for (int j=threadIdx.y+gc_y; j<h+gc_y; j+=h) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
Q[var][j][i] = Q[var][j][i] + (G[var][j-1][i] - G[var][j][i]) * dt_ / dy_;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Helper function for debugging etc.
|
||||
*/
|
||||
template<int shmem_width, int shmem_height, int vars>
|
||||
__device__ void memset(float Q[vars][shmem_height][shmem_width], float value) {
|
||||
for (int k=0; k<vars; ++k) {
|
||||
for (int j=threadIdx.y; j<shmem_height; ++j) {
|
||||
for (int i=threadIdx.x; i<shmem_width; ++i) {
|
||||
Q[k][j][i] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
template <unsigned int threads>
|
||||
__device__ void reduce_max(float* data, unsigned int n) {
|
||||
__shared__ float sdata[threads];
|
||||
unsigned int tid = threadIdx.x;
|
||||
|
||||
//Reduce to "threads" elements
|
||||
sdata[tid] = FLT_MIN;
|
||||
for (unsigned int i=tid; i<n; i += threads) {
|
||||
sdata[tid] = max(sdata[tid], dt_ctx.L[i]);
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//Now, reduce all elements into a single element
|
||||
if (threads >= 512) {
|
||||
if (tid < 256) {
|
||||
sdata[tid] = max(sdata[tid], sdata[tid + 256]);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (threads >= 256) {
|
||||
if (tid < 128) {
|
||||
sdata[tid] = max(sdata[tid], sdata[tid + 128]);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (threads >= 128) {
|
||||
if (tid < 64) {
|
||||
sdata[tid] = max(sdata[tid], sdata[tid + 64]);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (tid < 32) {
|
||||
volatile float* sdata_volatile = sdata;
|
||||
if (threads >= 64) {
|
||||
sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 32]);
|
||||
}
|
||||
if (tid < 16) {
|
||||
if (threads >= 32) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 16]);
|
||||
if (threads >= 16) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 8]);
|
||||
if (threads >= 8) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 4]);
|
||||
if (threads >= 4) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 2]);
|
||||
if (threads >= 2) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 1]);
|
||||
}
|
||||
|
||||
if (tid == 0) {
|
||||
return sdata_volatile[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -1,118 +0,0 @@
|
||||
/*
|
||||
This file implements different flux and slope limiters
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Reconstructs a slope using the generalized minmod limiter based on three
|
||||
* consecutive values
|
||||
*/
|
||||
__device__ __inline__ float minmodSlope(float left, float center, float right, float theta) {
|
||||
const float backward = (center - left) * theta;
|
||||
const float central = (right - left) * 0.5f;
|
||||
const float forward = (right - center) * theta;
|
||||
|
||||
return 0.25f
|
||||
*copysign(1.0f, backward)
|
||||
*(copysign(1.0f, backward) + copysign(1.0f, central))
|
||||
*(copysign(1.0f, central) + copysign(1.0f, forward))
|
||||
*min( min(fabs(backward), fabs(central)), fabs(forward) );
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Reconstructs a minmod slope for a whole block along the abscissa
|
||||
*/
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void minmodSlopeX(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float Qx[vars][h+2*gc_y][w+2*gc_x],
|
||||
const float theta_) {
|
||||
//Reconstruct slopes along x axis
|
||||
for (int p=0; p<vars; ++p) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
for (int i=threadIdx.x+1; i<w+2*gc_x-1; i+=w) {
|
||||
Qx[p][j][i] = minmodSlope(Q[p][j][i-1], Q[p][j][i], Q[p][j][i+1], theta_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reconstructs a minmod slope for a whole block along the ordinate
|
||||
*/
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void minmodSlopeY(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float Qy[vars][h+2*gc_y][w+2*gc_x],
|
||||
const float theta_) {
|
||||
//Reconstruct slopes along y axis
|
||||
for (int p=0; p<vars; ++p) {
|
||||
for (int j=threadIdx.y+1; j<h+2*gc_y-1; j+=h) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
Qy[p][j][i] = minmodSlope(Q[p][j-1][i], Q[p][j][i], Q[p][j+1][i], theta_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ float monotonized_central(float r_) {
|
||||
return fmaxf(0.0f, fminf(2.0f, fminf(2.0f*r_, 0.5f*(1.0f+r_))));
|
||||
}
|
||||
|
||||
__device__ float osher(float r_, float beta_) {
|
||||
return fmaxf(0.0f, fminf(beta_, r_));
|
||||
}
|
||||
|
||||
__device__ float sweby(float r_, float beta_) {
|
||||
return fmaxf(0.0f, fmaxf(fminf(r_, beta_), fminf(beta_*r_, 1.0f)));
|
||||
}
|
||||
|
||||
__device__ float minmod(float r_) {
|
||||
return fmaxf(0.0f, fminf(1.0f, r_));
|
||||
}
|
||||
|
||||
__device__ float generalized_minmod(float r_, float theta_) {
|
||||
return fmaxf(0.0f, fminf(theta_*r_, fminf( (1.0f + r_) / 2.0f, theta_)));
|
||||
}
|
||||
|
||||
__device__ float superbee(float r_) {
|
||||
return fmaxf(0.0f, fmaxf(fminf(2.0f*r_, 1.0f), fminf(r_, 2.0f)));
|
||||
}
|
||||
|
||||
__device__ float vanAlbada1(float r_) {
|
||||
return (r_*r_ + r_) / (r_*r_ + 1.0f);
|
||||
}
|
||||
|
||||
__device__ float vanAlbada2(float r_) {
|
||||
return 2.0f*r_ / (r_*r_* + 1.0f);
|
||||
}
|
||||
|
||||
__device__ float vanLeer(float r_) {
|
||||
return (r_ + fabsf(r_)) / (1.0f + fabsf(r_));
|
||||
}
|
@ -1,355 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements Cuda context handling
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
from GPUSimulators.Simulator import BoundaryCondition
|
||||
import numpy as np
|
||||
import gc
|
||||
|
||||
|
||||
def getExtent(width, height, nx, ny, grid, index=None):
|
||||
if grid is not None:
|
||||
gx = grid.grid[0]
|
||||
gy = grid.grid[1]
|
||||
if index is not None:
|
||||
i, j = grid.getCoordinate(index)
|
||||
else:
|
||||
i, j = grid.getCoordinate()
|
||||
|
||||
dx = (width / gx) / nx
|
||||
dy = (height / gy) / ny
|
||||
|
||||
x0 = width*i/gx + 0.5*dx
|
||||
y0 = height*j/gy + 0.5*dy
|
||||
x1 = width*(i+1)/gx - 0.5*dx
|
||||
y1 = height*(j+1)/gy - 0.5*dx
|
||||
|
||||
else:
|
||||
dx = width / nx
|
||||
dy = height / ny
|
||||
|
||||
x0 = 0.5*dx
|
||||
y0 = 0.5*dy
|
||||
x1 = width-0.5*dx
|
||||
y1 = height-0.5*dy
|
||||
|
||||
return [x0, x1, y0, y1, dx, dy]
|
||||
|
||||
|
||||
def downsample(highres_solution, x_factor, y_factor=None):
|
||||
if (y_factor == None):
|
||||
y_factor = x_factor
|
||||
|
||||
assert(highres_solution.shape[1] % x_factor == 0)
|
||||
assert(highres_solution.shape[0] % y_factor == 0)
|
||||
|
||||
if (x_factor*y_factor == 1):
|
||||
return highres_solution
|
||||
|
||||
if (len(highres_solution.shape) == 1):
|
||||
highres_solution = highres_solution.reshape((1, highres_solution.size))
|
||||
|
||||
nx = highres_solution.shape[1] / x_factor
|
||||
ny = highres_solution.shape[0] / y_factor
|
||||
|
||||
return highres_solution.reshape([int(ny), int(y_factor), int(nx), int(x_factor)]).mean(3).mean(1)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def bump(nx, ny, width, height,
|
||||
bump_size=None,
|
||||
ref_nx=None, ref_ny=None,
|
||||
x_center=0.5, y_center=0.5,
|
||||
h_ref=0.5, h_amp=0.1, u_ref=0.0, u_amp=0.1, v_ref=0.0, v_amp=0.1):
|
||||
|
||||
if (ref_nx == None):
|
||||
ref_nx = nx
|
||||
assert(ref_nx >= nx)
|
||||
|
||||
if (ref_ny == None):
|
||||
ref_ny = ny
|
||||
assert(ref_ny >= ny)
|
||||
|
||||
if (bump_size == None):
|
||||
bump_size = width/5.0
|
||||
|
||||
ref_dx = width / float(ref_nx)
|
||||
ref_dy = height / float(ref_ny)
|
||||
|
||||
x_center = ref_dx*ref_nx*x_center
|
||||
y_center = ref_dy*ref_ny*y_center
|
||||
|
||||
x = ref_dx*(np.arange(0, ref_nx, dtype=np.float32)+0.5) - x_center
|
||||
y = ref_dy*(np.arange(0, ref_ny, dtype=np.float32)+0.5) - y_center
|
||||
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
|
||||
r = np.sqrt(xv**2 + yv**2)
|
||||
xv = None
|
||||
yv = None
|
||||
gc.collect()
|
||||
|
||||
#Generate highres then downsample
|
||||
#h_highres = 0.5 + 0.1*np.exp(-(xv**2/size + yv**2/size))
|
||||
h_highres = h_ref + h_amp*0.5*(1.0 + np.cos(np.pi*r/bump_size)) * (r < bump_size)
|
||||
h = downsample(h_highres, ref_nx/nx, ref_ny/ny)
|
||||
h_highres = None
|
||||
gc.collect()
|
||||
|
||||
#hu_highres = 0.1*np.exp(-(xv**2/size + yv**2/size))
|
||||
u_highres = u_ref + u_amp*0.5*(1.0 + np.cos(np.pi*r/bump_size)) * (r < bump_size)
|
||||
hu = downsample(u_highres, ref_nx/nx, ref_ny/ny)*h
|
||||
u_highres = None
|
||||
gc.collect()
|
||||
|
||||
#hu_highres = 0.1*np.exp(-(xv**2/size + yv**2/size))
|
||||
v_highres = v_ref + v_amp*0.5*(1.0 + np.cos(np.pi*r/bump_size)) * (r < bump_size)
|
||||
hv = downsample(v_highres, ref_nx/nx, ref_ny/ny)*h
|
||||
v_highres = None
|
||||
gc.collect()
|
||||
|
||||
dx = width/nx
|
||||
dy = height/ny
|
||||
|
||||
return h, hu, hv, dx, dy
|
||||
|
||||
|
||||
def genShockBubble(nx, ny, gamma, grid=None):
|
||||
"""
|
||||
Generate Shock-bubble interaction case for the Euler equations
|
||||
"""
|
||||
|
||||
width = 4.0
|
||||
height = 1.0
|
||||
g = 0.0
|
||||
|
||||
|
||||
rho = np.ones((ny, nx), dtype=np.float32)
|
||||
u = np.zeros((ny, nx), dtype=np.float32)
|
||||
v = np.zeros((ny, nx), dtype=np.float32)
|
||||
E = np.zeros((ny, nx), dtype=np.float32)
|
||||
p = np.ones((ny, nx), dtype=np.float32)
|
||||
|
||||
|
||||
x0, x1, y0, y1, dx, dy = getExtent(width, height, nx, ny, grid)
|
||||
x = np.linspace(x0, x1, nx, dtype=np.float32)
|
||||
y = np.linspace(y0, y1, ny, dtype=np.float32)
|
||||
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
|
||||
|
||||
#Bubble
|
||||
radius = 0.25
|
||||
x_center = 0.5
|
||||
y_center = 0.5
|
||||
bubble = np.sqrt((xv-x_center)**2+(yv-y_center)**2) <= radius
|
||||
rho = np.where(bubble, 0.1, rho)
|
||||
|
||||
#Left boundary
|
||||
left = (xv < 0.1)
|
||||
rho = np.where(left, 3.81250, rho)
|
||||
u = np.where(left, 2.57669, u)
|
||||
|
||||
#Energy
|
||||
p = np.where(left, 10.0, p)
|
||||
E = 0.5*rho*(u**2+v**2) + p/(gamma-1.0)
|
||||
|
||||
|
||||
bc = BoundaryCondition({
|
||||
'north': BoundaryCondition.Type.Reflective,
|
||||
'south': BoundaryCondition.Type.Reflective,
|
||||
'east': BoundaryCondition.Type.Periodic,
|
||||
'west': BoundaryCondition.Type.Periodic
|
||||
})
|
||||
|
||||
#Construct simulator
|
||||
arguments = {
|
||||
'rho': rho, 'rho_u': rho*u, 'rho_v': rho*v, 'E': E,
|
||||
'nx': nx, 'ny': ny,
|
||||
'dx': dx, 'dy': dy,
|
||||
'g': g,
|
||||
'gamma': gamma,
|
||||
'boundary_conditions': bc
|
||||
}
|
||||
return arguments
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def genKelvinHelmholtz(nx, ny, gamma, roughness=0.125, grid=None, index=None):
|
||||
"""
|
||||
Roughness parameter in (0, 1.0] determines how "squiggly"
|
||||
the interface betweeen the zones is
|
||||
"""
|
||||
|
||||
def genZones(nx, ny, n):
|
||||
"""
|
||||
Generates the zones of the two fluids of K-H
|
||||
"""
|
||||
zone = np.zeros((ny, nx), dtype=np.int32)
|
||||
|
||||
|
||||
def genSmoothRandom(nx, n):
|
||||
n = max(1, min(n, nx))
|
||||
|
||||
if n == nx:
|
||||
return np.random.random(nx)-0.5
|
||||
else:
|
||||
from scipy.interpolate import interp1d
|
||||
|
||||
#Control points and interpolator
|
||||
xp = np.linspace(0.0, 1.0, n)
|
||||
yp = np.random.random(n) - 0.5
|
||||
|
||||
if (n == 1):
|
||||
kind = 'nearest'
|
||||
elif (n == 2):
|
||||
kind = 'linear'
|
||||
elif (n == 3):
|
||||
kind = 'quadratic'
|
||||
else:
|
||||
kind = 'cubic'
|
||||
|
||||
f = interp1d(xp, yp, kind=kind)
|
||||
|
||||
#Interpolation points
|
||||
x = np.linspace(0.0, 1.0, nx)
|
||||
return f(x)
|
||||
|
||||
|
||||
|
||||
x0, x1, y0, y1, _, dy = getExtent(1.0, 1.0, nx, ny, grid, index)
|
||||
x = np.linspace(x0, x1, nx)
|
||||
y = np.linspace(y0, y1, ny)
|
||||
_, y = np.meshgrid(x, y)
|
||||
|
||||
#print(y+a[0])
|
||||
|
||||
a = genSmoothRandom(nx, n)*dy
|
||||
zone = np.where(y > 0.25+a, zone, 1)
|
||||
|
||||
a = genSmoothRandom(nx, n)*dy
|
||||
zone = np.where(y < 0.75+a, zone, 1)
|
||||
|
||||
return zone
|
||||
|
||||
width = 2.0
|
||||
height = 1.0
|
||||
g = 0.0
|
||||
gamma = 1.4
|
||||
|
||||
rho = np.empty((ny, nx), dtype=np.float32)
|
||||
u = np.empty((ny, nx), dtype=np.float32)
|
||||
v = np.zeros((ny, nx), dtype=np.float32)
|
||||
p = 2.5*np.ones((ny, nx), dtype=np.float32)
|
||||
|
||||
#Generate the different zones
|
||||
zones = genZones(nx, ny, max(1, min(nx, int(nx*roughness))))
|
||||
|
||||
#Zone 0
|
||||
zone0 = zones == 0
|
||||
rho = np.where(zone0, 1.0, rho)
|
||||
u = np.where(zone0, 0.5, u)
|
||||
|
||||
#Zone 1
|
||||
zone1 = zones == 1
|
||||
rho = np.where(zone1, 2.0, rho)
|
||||
u = np.where(zone1, -0.5, u)
|
||||
|
||||
E = 0.5*rho*(u**2+v**2) + p/(gamma-1.0)
|
||||
|
||||
_, _, _, _, dx, dy = getExtent(width, height, nx, ny, grid, index)
|
||||
|
||||
|
||||
bc = BoundaryCondition({
|
||||
'north': BoundaryCondition.Type.Periodic,
|
||||
'south': BoundaryCondition.Type.Periodic,
|
||||
'east': BoundaryCondition.Type.Periodic,
|
||||
'west': BoundaryCondition.Type.Periodic
|
||||
})
|
||||
|
||||
#Construct simulator
|
||||
arguments = {
|
||||
'rho': rho, 'rho_u': rho*u, 'rho_v': rho*v, 'E': E,
|
||||
'nx': nx, 'ny': ny,
|
||||
'dx': dx, 'dy': dy,
|
||||
'g': g,
|
||||
'gamma': gamma,
|
||||
'boundary_conditions': bc
|
||||
}
|
||||
|
||||
return arguments
|
||||
|
||||
|
||||
|
||||
def genRayleighTaylor(nx, ny, gamma, version=0, grid=None):
|
||||
"""
|
||||
Generates Rayleigh-Taylor instability case
|
||||
"""
|
||||
width = 0.5
|
||||
height = 1.5
|
||||
g = 0.1
|
||||
|
||||
rho = np.zeros((ny, nx), dtype=np.float32)
|
||||
u = np.zeros((ny, nx), dtype=np.float32)
|
||||
v = np.zeros((ny, nx), dtype=np.float32)
|
||||
p = np.zeros((ny, nx), dtype=np.float32)
|
||||
|
||||
|
||||
x0, x1, y0, y1, dx, dy = getExtent(width, height, nx, ny, grid)
|
||||
x = np.linspace(x0, x1, nx, dtype=np.float32)-width*0.5
|
||||
y = np.linspace(y0, y1, ny, dtype=np.float32)-height*0.5
|
||||
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
|
||||
|
||||
#This gives a squigly interfact
|
||||
if (version == 0):
|
||||
y_threshold = 0.01*np.cos(2*np.pi*np.abs(x)/0.5)
|
||||
rho = np.where(yv <= y_threshold, 1.0, rho)
|
||||
rho = np.where(yv > y_threshold, 2.0, rho)
|
||||
elif (version == 1):
|
||||
rho = np.where(yv <= 0.0, 1.0, rho)
|
||||
rho = np.where(yv > 0.0, 2.0, rho)
|
||||
v = 0.01*(1.0 + np.cos(2*np.pi*xv/0.5))/4
|
||||
else:
|
||||
assert False, "Invalid version"
|
||||
|
||||
p = 2.5 - rho*g*yv
|
||||
E = 0.5*rho*(u**2+v**2) + p/(gamma-1.0)
|
||||
|
||||
bc = BoundaryCondition({
|
||||
'north': BoundaryCondition.Type.Reflective,
|
||||
'south': BoundaryCondition.Type.Reflective,
|
||||
'east': BoundaryCondition.Type.Reflective,
|
||||
'west': BoundaryCondition.Type.Reflective
|
||||
})
|
||||
|
||||
#Construct simulator
|
||||
arguments = {
|
||||
'rho': rho, 'rho_u': rho*u, 'rho_v': rho*v, 'E': E,
|
||||
'nx': nx, 'ny': ny,
|
||||
'dx': dx, 'dy': dy,
|
||||
'g': g,
|
||||
'gamma': gamma,
|
||||
'boundary_conditions': bc
|
||||
}
|
||||
|
||||
return arguments
|
@ -1,61 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements visualization techniques/modes
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
import numpy as np
|
||||
|
||||
from matplotlib.colors import Normalize
|
||||
|
||||
|
||||
|
||||
def genSchlieren(rho):
|
||||
#Compute length of z-component of normalized gradient vector
|
||||
normal = np.gradient(rho) #[x, y, 1]
|
||||
length = 1.0 / np.sqrt(normal[0]**2 + normal[1]**2 + 1.0)
|
||||
schlieren = np.power(length, 128)
|
||||
return schlieren
|
||||
|
||||
|
||||
def genVorticity(rho, rho_u, rho_v):
|
||||
u = rho_u / rho
|
||||
v = rho_v / rho
|
||||
u = np.sqrt(u**2 + v**2)
|
||||
u_max = u.max()
|
||||
|
||||
du_dy, _ = np.gradient(u)
|
||||
_, dv_dx = np.gradient(v)
|
||||
|
||||
#Length of curl
|
||||
curl = dv_dx - du_dy
|
||||
return curl
|
||||
|
||||
|
||||
def genColors(rho, rho_u, rho_v, cmap, vmax, vmin):
|
||||
schlieren = genSchlieren(rho)
|
||||
curl = genVorticity(rho, rho_u, rho_v)
|
||||
|
||||
colors = Normalize(vmin, vmax, clip=True)(curl)
|
||||
colors = cmap(colors)
|
||||
for k in range(3):
|
||||
colors[:,:,k] = colors[:,:,k]*schlieren
|
||||
|
||||
return colors
|
@ -1,34 +0,0 @@
|
||||
#!/bin/bash -e
|
||||
#SBATCH --job-name=lumi
|
||||
#SBATCH --account=project_4650000xx
|
||||
#SBATCH --time=00:10:00
|
||||
#SBATCH --partition=dev-g
|
||||
#SBATCH --nodes=1
|
||||
#SBATCH --ntasks-per-node=2
|
||||
#SBATCH --gpus=2
|
||||
#SBATCH --gpus-per-node=2
|
||||
#SBATCH -o %x-%j.out
|
||||
#
|
||||
|
||||
N=$SLURM_JOB_NUM_NODES
|
||||
echo "--nbr of nodes:", $N
|
||||
echo "--total nbr of gpus:", $SLURM_NTASKS
|
||||
|
||||
Mydir=/project/project_4650000xx
|
||||
Myapplication=${Mydir}/FiniteVolumeGPU_hip/mpiTesting.py
|
||||
|
||||
#modules
|
||||
ml LUMI/23.03 partition/G
|
||||
ml lumi-container-wrapper
|
||||
ml cray-python/3.9.13.1
|
||||
ml rocm/5.2.3
|
||||
|
||||
ml craype-accel-amd-gfx90a
|
||||
ml cray-mpich/8.1.27
|
||||
|
||||
#Enable GPU-aware MPI
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
|
||||
export PATH="/project/project_4650000xx/FiniteVolumeGPU_hip/MyCondaEnv/bin:$PATH"
|
||||
|
||||
srun python ${Myapplication} -nx 1024 -ny 1024 --profile
|
674
LICENSE
674
LICENSE
@ -1,674 +0,0 @@
|
||||
GNU GENERAL PUBLIC LICENSE
|
||||
Version 3, 29 June 2007
|
||||
|
||||
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
|
||||
Everyone is permitted to copy and distribute verbatim copies
|
||||
of this license document, but changing it is not allowed.
|
||||
|
||||
Preamble
|
||||
|
||||
The GNU General Public License is a free, copyleft license for
|
||||
software and other kinds of works.
|
||||
|
||||
The licenses for most software and other practical works are designed
|
||||
to take away your freedom to share and change the works. By contrast,
|
||||
the GNU General Public License is intended to guarantee your freedom to
|
||||
share and change all versions of a program--to make sure it remains free
|
||||
software for all its users. We, the Free Software Foundation, use the
|
||||
GNU General Public License for most of our software; it applies also to
|
||||
any other work released this way by its authors. You can apply it to
|
||||
your programs, too.
|
||||
|
||||
When we speak of free software, we are referring to freedom, not
|
||||
price. Our General Public Licenses are designed to make sure that you
|
||||
have the freedom to distribute copies of free software (and charge for
|
||||
them if you wish), that you receive source code or can get it if you
|
||||
want it, that you can change the software or use pieces of it in new
|
||||
free programs, and that you know you can do these things.
|
||||
|
||||
To protect your rights, we need to prevent others from denying you
|
||||
these rights or asking you to surrender the rights. Therefore, you have
|
||||
certain responsibilities if you distribute copies of the software, or if
|
||||
you modify it: responsibilities to respect the freedom of others.
|
||||
|
||||
For example, if you distribute copies of such a program, whether
|
||||
gratis or for a fee, you must pass on to the recipients the same
|
||||
freedoms that you received. You must make sure that they, too, receive
|
||||
or can get the source code. And you must show them these terms so they
|
||||
know their rights.
|
||||
|
||||
Developers that use the GNU GPL protect your rights with two steps:
|
||||
(1) assert copyright on the software, and (2) offer you this License
|
||||
giving you legal permission to copy, distribute and/or modify it.
|
||||
|
||||
For the developers' and authors' protection, the GPL clearly explains
|
||||
that there is no warranty for this free software. For both users' and
|
||||
authors' sake, the GPL requires that modified versions be marked as
|
||||
changed, so that their problems will not be attributed erroneously to
|
||||
authors of previous versions.
|
||||
|
||||
Some devices are designed to deny users access to install or run
|
||||
modified versions of the software inside them, although the manufacturer
|
||||
can do so. This is fundamentally incompatible with the aim of
|
||||
protecting users' freedom to change the software. The systematic
|
||||
pattern of such abuse occurs in the area of products for individuals to
|
||||
use, which is precisely where it is most unacceptable. Therefore, we
|
||||
have designed this version of the GPL to prohibit the practice for those
|
||||
products. If such problems arise substantially in other domains, we
|
||||
stand ready to extend this provision to those domains in future versions
|
||||
of the GPL, as needed to protect the freedom of users.
|
||||
|
||||
Finally, every program is threatened constantly by software patents.
|
||||
States should not allow patents to restrict development and use of
|
||||
software on general-purpose computers, but in those that do, we wish to
|
||||
avoid the special danger that patents applied to a free program could
|
||||
make it effectively proprietary. To prevent this, the GPL assures that
|
||||
patents cannot be used to render the program non-free.
|
||||
|
||||
The precise terms and conditions for copying, distribution and
|
||||
modification follow.
|
||||
|
||||
TERMS AND CONDITIONS
|
||||
|
||||
0. Definitions.
|
||||
|
||||
"This License" refers to version 3 of the GNU General Public License.
|
||||
|
||||
"Copyright" also means copyright-like laws that apply to other kinds of
|
||||
works, such as semiconductor masks.
|
||||
|
||||
"The Program" refers to any copyrightable work licensed under this
|
||||
License. Each licensee is addressed as "you". "Licensees" and
|
||||
"recipients" may be individuals or organizations.
|
||||
|
||||
To "modify" a work means to copy from or adapt all or part of the work
|
||||
in a fashion requiring copyright permission, other than the making of an
|
||||
exact copy. The resulting work is called a "modified version" of the
|
||||
earlier work or a work "based on" the earlier work.
|
||||
|
||||
A "covered work" means either the unmodified Program or a work based
|
||||
on the Program.
|
||||
|
||||
To "propagate" a work means to do anything with it that, without
|
||||
permission, would make you directly or secondarily liable for
|
||||
infringement under applicable copyright law, except executing it on a
|
||||
computer or modifying a private copy. Propagation includes copying,
|
||||
distribution (with or without modification), making available to the
|
||||
public, and in some countries other activities as well.
|
||||
|
||||
To "convey" a work means any kind of propagation that enables other
|
||||
parties to make or receive copies. Mere interaction with a user through
|
||||
a computer network, with no transfer of a copy, is not conveying.
|
||||
|
||||
An interactive user interface displays "Appropriate Legal Notices"
|
||||
to the extent that it includes a convenient and prominently visible
|
||||
feature that (1) displays an appropriate copyright notice, and (2)
|
||||
tells the user that there is no warranty for the work (except to the
|
||||
extent that warranties are provided), that licensees may convey the
|
||||
work under this License, and how to view a copy of this License. If
|
||||
the interface presents a list of user commands or options, such as a
|
||||
menu, a prominent item in the list meets this criterion.
|
||||
|
||||
1. Source Code.
|
||||
|
||||
The "source code" for a work means the preferred form of the work
|
||||
for making modifications to it. "Object code" means any non-source
|
||||
form of a work.
|
||||
|
||||
A "Standard Interface" means an interface that either is an official
|
||||
standard defined by a recognized standards body, or, in the case of
|
||||
interfaces specified for a particular programming language, one that
|
||||
is widely used among developers working in that language.
|
||||
|
||||
The "System Libraries" of an executable work include anything, other
|
||||
than the work as a whole, that (a) is included in the normal form of
|
||||
packaging a Major Component, but which is not part of that Major
|
||||
Component, and (b) serves only to enable use of the work with that
|
||||
Major Component, or to implement a Standard Interface for which an
|
||||
implementation is available to the public in source code form. A
|
||||
"Major Component", in this context, means a major essential component
|
||||
(kernel, window system, and so on) of the specific operating system
|
||||
(if any) on which the executable work runs, or a compiler used to
|
||||
produce the work, or an object code interpreter used to run it.
|
||||
|
||||
The "Corresponding Source" for a work in object code form means all
|
||||
the source code needed to generate, install, and (for an executable
|
||||
work) run the object code and to modify the work, including scripts to
|
||||
control those activities. However, it does not include the work's
|
||||
System Libraries, or general-purpose tools or generally available free
|
||||
programs which are used unmodified in performing those activities but
|
||||
which are not part of the work. For example, Corresponding Source
|
||||
includes interface definition files associated with source files for
|
||||
the work, and the source code for shared libraries and dynamically
|
||||
linked subprograms that the work is specifically designed to require,
|
||||
such as by intimate data communication or control flow between those
|
||||
subprograms and other parts of the work.
|
||||
|
||||
The Corresponding Source need not include anything that users
|
||||
can regenerate automatically from other parts of the Corresponding
|
||||
Source.
|
||||
|
||||
The Corresponding Source for a work in source code form is that
|
||||
same work.
|
||||
|
||||
2. Basic Permissions.
|
||||
|
||||
All rights granted under this License are granted for the term of
|
||||
copyright on the Program, and are irrevocable provided the stated
|
||||
conditions are met. This License explicitly affirms your unlimited
|
||||
permission to run the unmodified Program. The output from running a
|
||||
covered work is covered by this License only if the output, given its
|
||||
content, constitutes a covered work. This License acknowledges your
|
||||
rights of fair use or other equivalent, as provided by copyright law.
|
||||
|
||||
You may make, run and propagate covered works that you do not
|
||||
convey, without conditions so long as your license otherwise remains
|
||||
in force. You may convey covered works to others for the sole purpose
|
||||
of having them make modifications exclusively for you, or provide you
|
||||
with facilities for running those works, provided that you comply with
|
||||
the terms of this License in conveying all material for which you do
|
||||
not control copyright. Those thus making or running the covered works
|
||||
for you must do so exclusively on your behalf, under your direction
|
||||
and control, on terms that prohibit them from making any copies of
|
||||
your copyrighted material outside their relationship with you.
|
||||
|
||||
Conveying under any other circumstances is permitted solely under
|
||||
the conditions stated below. Sublicensing is not allowed; section 10
|
||||
makes it unnecessary.
|
||||
|
||||
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
|
||||
|
||||
No covered work shall be deemed part of an effective technological
|
||||
measure under any applicable law fulfilling obligations under article
|
||||
11 of the WIPO copyright treaty adopted on 20 December 1996, or
|
||||
similar laws prohibiting or restricting circumvention of such
|
||||
measures.
|
||||
|
||||
When you convey a covered work, you waive any legal power to forbid
|
||||
circumvention of technological measures to the extent such circumvention
|
||||
is effected by exercising rights under this License with respect to
|
||||
the covered work, and you disclaim any intention to limit operation or
|
||||
modification of the work as a means of enforcing, against the work's
|
||||
users, your or third parties' legal rights to forbid circumvention of
|
||||
technological measures.
|
||||
|
||||
4. Conveying Verbatim Copies.
|
||||
|
||||
You may convey verbatim copies of the Program's source code as you
|
||||
receive it, in any medium, provided that you conspicuously and
|
||||
appropriately publish on each copy an appropriate copyright notice;
|
||||
keep intact all notices stating that this License and any
|
||||
non-permissive terms added in accord with section 7 apply to the code;
|
||||
keep intact all notices of the absence of any warranty; and give all
|
||||
recipients a copy of this License along with the Program.
|
||||
|
||||
You may charge any price or no price for each copy that you convey,
|
||||
and you may offer support or warranty protection for a fee.
|
||||
|
||||
5. Conveying Modified Source Versions.
|
||||
|
||||
You may convey a work based on the Program, or the modifications to
|
||||
produce it from the Program, in the form of source code under the
|
||||
terms of section 4, provided that you also meet all of these conditions:
|
||||
|
||||
a) The work must carry prominent notices stating that you modified
|
||||
it, and giving a relevant date.
|
||||
|
||||
b) The work must carry prominent notices stating that it is
|
||||
released under this License and any conditions added under section
|
||||
7. This requirement modifies the requirement in section 4 to
|
||||
"keep intact all notices".
|
||||
|
||||
c) You must license the entire work, as a whole, under this
|
||||
License to anyone who comes into possession of a copy. This
|
||||
License will therefore apply, along with any applicable section 7
|
||||
additional terms, to the whole of the work, and all its parts,
|
||||
regardless of how they are packaged. This License gives no
|
||||
permission to license the work in any other way, but it does not
|
||||
invalidate such permission if you have separately received it.
|
||||
|
||||
d) If the work has interactive user interfaces, each must display
|
||||
Appropriate Legal Notices; however, if the Program has interactive
|
||||
interfaces that do not display Appropriate Legal Notices, your
|
||||
work need not make them do so.
|
||||
|
||||
A compilation of a covered work with other separate and independent
|
||||
works, which are not by their nature extensions of the covered work,
|
||||
and which are not combined with it such as to form a larger program,
|
||||
in or on a volume of a storage or distribution medium, is called an
|
||||
"aggregate" if the compilation and its resulting copyright are not
|
||||
used to limit the access or legal rights of the compilation's users
|
||||
beyond what the individual works permit. Inclusion of a covered work
|
||||
in an aggregate does not cause this License to apply to the other
|
||||
parts of the aggregate.
|
||||
|
||||
6. Conveying Non-Source Forms.
|
||||
|
||||
You may convey a covered work in object code form under the terms
|
||||
of sections 4 and 5, provided that you also convey the
|
||||
machine-readable Corresponding Source under the terms of this License,
|
||||
in one of these ways:
|
||||
|
||||
a) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by the
|
||||
Corresponding Source fixed on a durable physical medium
|
||||
customarily used for software interchange.
|
||||
|
||||
b) Convey the object code in, or embodied in, a physical product
|
||||
(including a physical distribution medium), accompanied by a
|
||||
written offer, valid for at least three years and valid for as
|
||||
long as you offer spare parts or customer support for that product
|
||||
model, to give anyone who possesses the object code either (1) a
|
||||
copy of the Corresponding Source for all the software in the
|
||||
product that is covered by this License, on a durable physical
|
||||
medium customarily used for software interchange, for a price no
|
||||
more than your reasonable cost of physically performing this
|
||||
conveying of source, or (2) access to copy the
|
||||
Corresponding Source from a network server at no charge.
|
||||
|
||||
c) Convey individual copies of the object code with a copy of the
|
||||
written offer to provide the Corresponding Source. This
|
||||
alternative is allowed only occasionally and noncommercially, and
|
||||
only if you received the object code with such an offer, in accord
|
||||
with subsection 6b.
|
||||
|
||||
d) Convey the object code by offering access from a designated
|
||||
place (gratis or for a charge), and offer equivalent access to the
|
||||
Corresponding Source in the same way through the same place at no
|
||||
further charge. You need not require recipients to copy the
|
||||
Corresponding Source along with the object code. If the place to
|
||||
copy the object code is a network server, the Corresponding Source
|
||||
may be on a different server (operated by you or a third party)
|
||||
that supports equivalent copying facilities, provided you maintain
|
||||
clear directions next to the object code saying where to find the
|
||||
Corresponding Source. Regardless of what server hosts the
|
||||
Corresponding Source, you remain obligated to ensure that it is
|
||||
available for as long as needed to satisfy these requirements.
|
||||
|
||||
e) Convey the object code using peer-to-peer transmission, provided
|
||||
you inform other peers where the object code and Corresponding
|
||||
Source of the work are being offered to the general public at no
|
||||
charge under subsection 6d.
|
||||
|
||||
A separable portion of the object code, whose source code is excluded
|
||||
from the Corresponding Source as a System Library, need not be
|
||||
included in conveying the object code work.
|
||||
|
||||
A "User Product" is either (1) a "consumer product", which means any
|
||||
tangible personal property which is normally used for personal, family,
|
||||
or household purposes, or (2) anything designed or sold for incorporation
|
||||
into a dwelling. In determining whether a product is a consumer product,
|
||||
doubtful cases shall be resolved in favor of coverage. For a particular
|
||||
product received by a particular user, "normally used" refers to a
|
||||
typical or common use of that class of product, regardless of the status
|
||||
of the particular user or of the way in which the particular user
|
||||
actually uses, or expects or is expected to use, the product. A product
|
||||
is a consumer product regardless of whether the product has substantial
|
||||
commercial, industrial or non-consumer uses, unless such uses represent
|
||||
the only significant mode of use of the product.
|
||||
|
||||
"Installation Information" for a User Product means any methods,
|
||||
procedures, authorization keys, or other information required to install
|
||||
and execute modified versions of a covered work in that User Product from
|
||||
a modified version of its Corresponding Source. The information must
|
||||
suffice to ensure that the continued functioning of the modified object
|
||||
code is in no case prevented or interfered with solely because
|
||||
modification has been made.
|
||||
|
||||
If you convey an object code work under this section in, or with, or
|
||||
specifically for use in, a User Product, and the conveying occurs as
|
||||
part of a transaction in which the right of possession and use of the
|
||||
User Product is transferred to the recipient in perpetuity or for a
|
||||
fixed term (regardless of how the transaction is characterized), the
|
||||
Corresponding Source conveyed under this section must be accompanied
|
||||
by the Installation Information. But this requirement does not apply
|
||||
if neither you nor any third party retains the ability to install
|
||||
modified object code on the User Product (for example, the work has
|
||||
been installed in ROM).
|
||||
|
||||
The requirement to provide Installation Information does not include a
|
||||
requirement to continue to provide support service, warranty, or updates
|
||||
for a work that has been modified or installed by the recipient, or for
|
||||
the User Product in which it has been modified or installed. Access to a
|
||||
network may be denied when the modification itself materially and
|
||||
adversely affects the operation of the network or violates the rules and
|
||||
protocols for communication across the network.
|
||||
|
||||
Corresponding Source conveyed, and Installation Information provided,
|
||||
in accord with this section must be in a format that is publicly
|
||||
documented (and with an implementation available to the public in
|
||||
source code form), and must require no special password or key for
|
||||
unpacking, reading or copying.
|
||||
|
||||
7. Additional Terms.
|
||||
|
||||
"Additional permissions" are terms that supplement the terms of this
|
||||
License by making exceptions from one or more of its conditions.
|
||||
Additional permissions that are applicable to the entire Program shall
|
||||
be treated as though they were included in this License, to the extent
|
||||
that they are valid under applicable law. If additional permissions
|
||||
apply only to part of the Program, that part may be used separately
|
||||
under those permissions, but the entire Program remains governed by
|
||||
this License without regard to the additional permissions.
|
||||
|
||||
When you convey a copy of a covered work, you may at your option
|
||||
remove any additional permissions from that copy, or from any part of
|
||||
it. (Additional permissions may be written to require their own
|
||||
removal in certain cases when you modify the work.) You may place
|
||||
additional permissions on material, added by you to a covered work,
|
||||
for which you have or can give appropriate copyright permission.
|
||||
|
||||
Notwithstanding any other provision of this License, for material you
|
||||
add to a covered work, you may (if authorized by the copyright holders of
|
||||
that material) supplement the terms of this License with terms:
|
||||
|
||||
a) Disclaiming warranty or limiting liability differently from the
|
||||
terms of sections 15 and 16 of this License; or
|
||||
|
||||
b) Requiring preservation of specified reasonable legal notices or
|
||||
author attributions in that material or in the Appropriate Legal
|
||||
Notices displayed by works containing it; or
|
||||
|
||||
c) Prohibiting misrepresentation of the origin of that material, or
|
||||
requiring that modified versions of such material be marked in
|
||||
reasonable ways as different from the original version; or
|
||||
|
||||
d) Limiting the use for publicity purposes of names of licensors or
|
||||
authors of the material; or
|
||||
|
||||
e) Declining to grant rights under trademark law for use of some
|
||||
trade names, trademarks, or service marks; or
|
||||
|
||||
f) Requiring indemnification of licensors and authors of that
|
||||
material by anyone who conveys the material (or modified versions of
|
||||
it) with contractual assumptions of liability to the recipient, for
|
||||
any liability that these contractual assumptions directly impose on
|
||||
those licensors and authors.
|
||||
|
||||
All other non-permissive additional terms are considered "further
|
||||
restrictions" within the meaning of section 10. If the Program as you
|
||||
received it, or any part of it, contains a notice stating that it is
|
||||
governed by this License along with a term that is a further
|
||||
restriction, you may remove that term. If a license document contains
|
||||
a further restriction but permits relicensing or conveying under this
|
||||
License, you may add to a covered work material governed by the terms
|
||||
of that license document, provided that the further restriction does
|
||||
not survive such relicensing or conveying.
|
||||
|
||||
If you add terms to a covered work in accord with this section, you
|
||||
must place, in the relevant source files, a statement of the
|
||||
additional terms that apply to those files, or a notice indicating
|
||||
where to find the applicable terms.
|
||||
|
||||
Additional terms, permissive or non-permissive, may be stated in the
|
||||
form of a separately written license, or stated as exceptions;
|
||||
the above requirements apply either way.
|
||||
|
||||
8. Termination.
|
||||
|
||||
You may not propagate or modify a covered work except as expressly
|
||||
provided under this License. Any attempt otherwise to propagate or
|
||||
modify it is void, and will automatically terminate your rights under
|
||||
this License (including any patent licenses granted under the third
|
||||
paragraph of section 11).
|
||||
|
||||
However, if you cease all violation of this License, then your
|
||||
license from a particular copyright holder is reinstated (a)
|
||||
provisionally, unless and until the copyright holder explicitly and
|
||||
finally terminates your license, and (b) permanently, if the copyright
|
||||
holder fails to notify you of the violation by some reasonable means
|
||||
prior to 60 days after the cessation.
|
||||
|
||||
Moreover, your license from a particular copyright holder is
|
||||
reinstated permanently if the copyright holder notifies you of the
|
||||
violation by some reasonable means, this is the first time you have
|
||||
received notice of violation of this License (for any work) from that
|
||||
copyright holder, and you cure the violation prior to 30 days after
|
||||
your receipt of the notice.
|
||||
|
||||
Termination of your rights under this section does not terminate the
|
||||
licenses of parties who have received copies or rights from you under
|
||||
this License. If your rights have been terminated and not permanently
|
||||
reinstated, you do not qualify to receive new licenses for the same
|
||||
material under section 10.
|
||||
|
||||
9. Acceptance Not Required for Having Copies.
|
||||
|
||||
You are not required to accept this License in order to receive or
|
||||
run a copy of the Program. Ancillary propagation of a covered work
|
||||
occurring solely as a consequence of using peer-to-peer transmission
|
||||
to receive a copy likewise does not require acceptance. However,
|
||||
nothing other than this License grants you permission to propagate or
|
||||
modify any covered work. These actions infringe copyright if you do
|
||||
not accept this License. Therefore, by modifying or propagating a
|
||||
covered work, you indicate your acceptance of this License to do so.
|
||||
|
||||
10. Automatic Licensing of Downstream Recipients.
|
||||
|
||||
Each time you convey a covered work, the recipient automatically
|
||||
receives a license from the original licensors, to run, modify and
|
||||
propagate that work, subject to this License. You are not responsible
|
||||
for enforcing compliance by third parties with this License.
|
||||
|
||||
An "entity transaction" is a transaction transferring control of an
|
||||
organization, or substantially all assets of one, or subdividing an
|
||||
organization, or merging organizations. If propagation of a covered
|
||||
work results from an entity transaction, each party to that
|
||||
transaction who receives a copy of the work also receives whatever
|
||||
licenses to the work the party's predecessor in interest had or could
|
||||
give under the previous paragraph, plus a right to possession of the
|
||||
Corresponding Source of the work from the predecessor in interest, if
|
||||
the predecessor has it or can get it with reasonable efforts.
|
||||
|
||||
You may not impose any further restrictions on the exercise of the
|
||||
rights granted or affirmed under this License. For example, you may
|
||||
not impose a license fee, royalty, or other charge for exercise of
|
||||
rights granted under this License, and you may not initiate litigation
|
||||
(including a cross-claim or counterclaim in a lawsuit) alleging that
|
||||
any patent claim is infringed by making, using, selling, offering for
|
||||
sale, or importing the Program or any portion of it.
|
||||
|
||||
11. Patents.
|
||||
|
||||
A "contributor" is a copyright holder who authorizes use under this
|
||||
License of the Program or a work on which the Program is based. The
|
||||
work thus licensed is called the contributor's "contributor version".
|
||||
|
||||
A contributor's "essential patent claims" are all patent claims
|
||||
owned or controlled by the contributor, whether already acquired or
|
||||
hereafter acquired, that would be infringed by some manner, permitted
|
||||
by this License, of making, using, or selling its contributor version,
|
||||
but do not include claims that would be infringed only as a
|
||||
consequence of further modification of the contributor version. For
|
||||
purposes of this definition, "control" includes the right to grant
|
||||
patent sublicenses in a manner consistent with the requirements of
|
||||
this License.
|
||||
|
||||
Each contributor grants you a non-exclusive, worldwide, royalty-free
|
||||
patent license under the contributor's essential patent claims, to
|
||||
make, use, sell, offer for sale, import and otherwise run, modify and
|
||||
propagate the contents of its contributor version.
|
||||
|
||||
In the following three paragraphs, a "patent license" is any express
|
||||
agreement or commitment, however denominated, not to enforce a patent
|
||||
(such as an express permission to practice a patent or covenant not to
|
||||
sue for patent infringement). To "grant" such a patent license to a
|
||||
party means to make such an agreement or commitment not to enforce a
|
||||
patent against the party.
|
||||
|
||||
If you convey a covered work, knowingly relying on a patent license,
|
||||
and the Corresponding Source of the work is not available for anyone
|
||||
to copy, free of charge and under the terms of this License, through a
|
||||
publicly available network server or other readily accessible means,
|
||||
then you must either (1) cause the Corresponding Source to be so
|
||||
available, or (2) arrange to deprive yourself of the benefit of the
|
||||
patent license for this particular work, or (3) arrange, in a manner
|
||||
consistent with the requirements of this License, to extend the patent
|
||||
license to downstream recipients. "Knowingly relying" means you have
|
||||
actual knowledge that, but for the patent license, your conveying the
|
||||
covered work in a country, or your recipient's use of the covered work
|
||||
in a country, would infringe one or more identifiable patents in that
|
||||
country that you have reason to believe are valid.
|
||||
|
||||
If, pursuant to or in connection with a single transaction or
|
||||
arrangement, you convey, or propagate by procuring conveyance of, a
|
||||
covered work, and grant a patent license to some of the parties
|
||||
receiving the covered work authorizing them to use, propagate, modify
|
||||
or convey a specific copy of the covered work, then the patent license
|
||||
you grant is automatically extended to all recipients of the covered
|
||||
work and works based on it.
|
||||
|
||||
A patent license is "discriminatory" if it does not include within
|
||||
the scope of its coverage, prohibits the exercise of, or is
|
||||
conditioned on the non-exercise of one or more of the rights that are
|
||||
specifically granted under this License. You may not convey a covered
|
||||
work if you are a party to an arrangement with a third party that is
|
||||
in the business of distributing software, under which you make payment
|
||||
to the third party based on the extent of your activity of conveying
|
||||
the work, and under which the third party grants, to any of the
|
||||
parties who would receive the covered work from you, a discriminatory
|
||||
patent license (a) in connection with copies of the covered work
|
||||
conveyed by you (or copies made from those copies), or (b) primarily
|
||||
for and in connection with specific products or compilations that
|
||||
contain the covered work, unless you entered into that arrangement,
|
||||
or that patent license was granted, prior to 28 March 2007.
|
||||
|
||||
Nothing in this License shall be construed as excluding or limiting
|
||||
any implied license or other defenses to infringement that may
|
||||
otherwise be available to you under applicable patent law.
|
||||
|
||||
12. No Surrender of Others' Freedom.
|
||||
|
||||
If conditions are imposed on you (whether by court order, agreement or
|
||||
otherwise) that contradict the conditions of this License, they do not
|
||||
excuse you from the conditions of this License. If you cannot convey a
|
||||
covered work so as to satisfy simultaneously your obligations under this
|
||||
License and any other pertinent obligations, then as a consequence you may
|
||||
not convey it at all. For example, if you agree to terms that obligate you
|
||||
to collect a royalty for further conveying from those to whom you convey
|
||||
the Program, the only way you could satisfy both those terms and this
|
||||
License would be to refrain entirely from conveying the Program.
|
||||
|
||||
13. Use with the GNU Affero General Public License.
|
||||
|
||||
Notwithstanding any other provision of this License, you have
|
||||
permission to link or combine any covered work with a work licensed
|
||||
under version 3 of the GNU Affero General Public License into a single
|
||||
combined work, and to convey the resulting work. The terms of this
|
||||
License will continue to apply to the part which is the covered work,
|
||||
but the special requirements of the GNU Affero General Public License,
|
||||
section 13, concerning interaction through a network will apply to the
|
||||
combination as such.
|
||||
|
||||
14. Revised Versions of this License.
|
||||
|
||||
The Free Software Foundation may publish revised and/or new versions of
|
||||
the GNU General Public License from time to time. Such new versions will
|
||||
be similar in spirit to the present version, but may differ in detail to
|
||||
address new problems or concerns.
|
||||
|
||||
Each version is given a distinguishing version number. If the
|
||||
Program specifies that a certain numbered version of the GNU General
|
||||
Public License "or any later version" applies to it, you have the
|
||||
option of following the terms and conditions either of that numbered
|
||||
version or of any later version published by the Free Software
|
||||
Foundation. If the Program does not specify a version number of the
|
||||
GNU General Public License, you may choose any version ever published
|
||||
by the Free Software Foundation.
|
||||
|
||||
If the Program specifies that a proxy can decide which future
|
||||
versions of the GNU General Public License can be used, that proxy's
|
||||
public statement of acceptance of a version permanently authorizes you
|
||||
to choose that version for the Program.
|
||||
|
||||
Later license versions may give you additional or different
|
||||
permissions. However, no additional obligations are imposed on any
|
||||
author or copyright holder as a result of your choosing to follow a
|
||||
later version.
|
||||
|
||||
15. Disclaimer of Warranty.
|
||||
|
||||
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
|
||||
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
|
||||
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
|
||||
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
|
||||
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
|
||||
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
|
||||
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
|
||||
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
|
||||
|
||||
16. Limitation of Liability.
|
||||
|
||||
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
|
||||
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
|
||||
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
|
||||
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
|
||||
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
|
||||
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
|
||||
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
|
||||
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
|
||||
SUCH DAMAGES.
|
||||
|
||||
17. Interpretation of Sections 15 and 16.
|
||||
|
||||
If the disclaimer of warranty and limitation of liability provided
|
||||
above cannot be given local legal effect according to their terms,
|
||||
reviewing courts shall apply local law that most closely approximates
|
||||
an absolute waiver of all civil liability in connection with the
|
||||
Program, unless a warranty or assumption of liability accompanies a
|
||||
copy of the Program in return for a fee.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
How to Apply These Terms to Your New Programs
|
||||
|
||||
If you develop a new program, and you want it to be of the greatest
|
||||
possible use to the public, the best way to achieve this is to make it
|
||||
free software which everyone can redistribute and change under these terms.
|
||||
|
||||
To do so, attach the following notices to the program. It is safest
|
||||
to attach them to the start of each source file to most effectively
|
||||
state the exclusion of warranty; and each file should have at least
|
||||
the "copyright" line and a pointer to where the full notice is found.
|
||||
|
||||
<one line to give the program's name and a brief idea of what it does.>
|
||||
Copyright (C) <year> <name of author>
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <https://www.gnu.org/licenses/>.
|
||||
|
||||
Also add information on how to contact you by electronic and paper mail.
|
||||
|
||||
If the program does terminal interaction, make it output a short
|
||||
notice like this when it starts in an interactive mode:
|
||||
|
||||
<program> Copyright (C) <year> <name of author>
|
||||
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
|
||||
This is free software, and you are welcome to redistribute it
|
||||
under certain conditions; type `show c' for details.
|
||||
|
||||
The hypothetical commands `show w' and `show c' should show the appropriate
|
||||
parts of the General Public License. Of course, your program's commands
|
||||
might be different; for a GUI interface, you would use an "about box".
|
||||
|
||||
You should also get your employer (if you work as a programmer) or school,
|
||||
if any, to sign a "copyright disclaimer" for the program, if necessary.
|
||||
For more information on this, and how to apply and follow the GNU GPL, see
|
||||
<https://www.gnu.org/licenses/>.
|
||||
|
||||
The GNU General Public License does not permit incorporating your program
|
||||
into proprietary programs. If your program is a subroutine library, you
|
||||
may consider it more useful to permit linking proprietary applications with
|
||||
the library. If this is what you want to do, use the GNU Lesser General
|
||||
Public License instead of this License. But first, please read
|
||||
<https://www.gnu.org/licenses/why-not-lgpl.html>.
|
45
README.md
45
README.md
@ -1,45 +0,0 @@
|
||||
# FiniteVolumeGPU
|
||||
|
||||
This is a HIP version of the [FiniteVolume code](https://github.com/babrodtk/FiniteVolumeGPU) (work in progress). It is a Python software package that implements several finite volume discretizations on Cartesian grids for the shallow water equations and the Euler equations.
|
||||
|
||||
## Setup
|
||||
A good place to start exploring this codebase is the notebooks. Complete the following steps to run the notebooks:
|
||||
|
||||
1. Install conda (see e.g. Miniconda or Anaconda)
|
||||
2. Change directory to the repository root and run the following commands
|
||||
3. conda env create -f conda_environment.yml
|
||||
4. conda activate ShallowWaterGPU
|
||||
5. jupyter notebook
|
||||
|
||||
Make sure you are running the correct kernel ("conda:ShallowWaterGPU"). If not, change kernel using the "Kernel"-menu in the notebook.
|
||||
|
||||
If you do not need to run notebooks you may use the conda environment found in conda_environment_hpc.yml
|
||||
|
||||
## Troubleshooting
|
||||
Have a look at the conda documentation and https://towardsdatascience.com/how-to-set-up-anaconda-and-jupyter-notebook-the-right-way-de3b7623ea4a
|
||||
|
||||
## Setup on LUMI-G
|
||||
Here is a step-by-step guide on installing packages on LUMI-G
|
||||
|
||||
### Step 0: load modules
|
||||
```
|
||||
ml LUMI/23.03
|
||||
ml lumi-container-wrapper
|
||||
ml cray-python/3.9.13.1
|
||||
```
|
||||
|
||||
### Step 1: run conda-container
|
||||
Installation via conda can be done as:
|
||||
```
|
||||
conda-containerize new --prefix MyCondaEnv conda_environment_lumi.yml
|
||||
```
|
||||
where the file `conda_environment_lumi.yml` contains packages to be installed.
|
||||
|
||||
### Step 2: Set the env. variable to search for binaries
|
||||
```
|
||||
export the bin path: export PATH="$PWD/MyCondaEnv/bin:$PATH"
|
||||
```
|
||||
### An alternative: Convert to a singularity container with cotainr
|
||||
```
|
||||
cotainr build my_container.sif --system=lumi-g --conda-env=conda_environment_lumi.yml
|
||||
```
|
62
SYSTEMS.md
62
SYSTEMS.md
@ -1,62 +0,0 @@
|
||||
$ANSIBLE_VAULT;1.1;AES256
|
||||
61316265663939333638336466323036663861343233316466646432313138653633623662353937
|
||||
3232313165656633346432376237383566363537366534310a303231343936663438653835373161
|
||||
35616161323432653062323164623861353065333761663136313137333732313230626665386336
|
||||
6166656538396463370a356166316363326133313864386536323236346634323537393639653038
|
||||
66336337336132613061353964613638326233356336323962366531333932366539366339623563
|
||||
36333365326463616634323939333062363263636663373635653064626138363464666233316561
|
||||
63393735393233616437386537393739393433663631313864646535636262616336333631396166
|
||||
38643636323530386565396338623366393232313838356536303537393338393634666632656234
|
||||
65353930303762333639376638336364303439306132626531326132376264623063376464636430
|
||||
32333536386134333136313139313861306364333037323363393463333664633764653937623866
|
||||
34313064346261313330373132353563343761323435393930303136353865303163373937623831
|
||||
64343038373162333039653161643233353764633337366434396638376530636261323362373434
|
||||
38393630613065356632663533333331633039663935663732353234643131306665343339373265
|
||||
64356563653838613337663132663234356462343333623139626662363764656239326637653832
|
||||
35396636643937336431623531306133643137623831333936313839333738333730373136666336
|
||||
35623965643664343164373630313362656663386638376237616134343631386366313336626138
|
||||
62376436383837376539663438346431633138383363633862356366376537393932626262383637
|
||||
31323365333139653736623233636233323162343039663035346135326638633430303134396337
|
||||
36353264313835346130643736663665386364343835643166383361316631373338663731373335
|
||||
30313530326662663937666330643565363565616566633333363535656539656531666266613638
|
||||
30306264613438363265646332386535383238373433396337633636616532626161343236336533
|
||||
36366362653137333739353737386563613136653164383437316237643533633133313735633363
|
||||
64326433356266363133343339626333633063326533383632353639613163663966376465396231
|
||||
36663034363534396430316463386564663465323036613636343136643262666566303533346439
|
||||
63396466656639623836613130363835346435633437666463363333356231343038356434343861
|
||||
66643636633739336666316566653136363862346336353862653130346335363334616430366435
|
||||
30376365383262326438306266366265363030353764633630333034663037643037343132303631
|
||||
39316364366234363339363130333765616432306331373566393530653963356539636437383062
|
||||
34633938643563656363633864656361643539663833356638356365373061663964363530393535
|
||||
37646533386235613763396638393539303062326239633238373763326561313634313265613135
|
||||
64646138313562313732393732303133343234323438616165326530333234626363393735636530
|
||||
62353735313231353662353533636134306530623339383730306332613636663366653566313935
|
||||
32343935353566656130393533323639353863666436333839386463396337336635356663373136
|
||||
61323734613239396236393266363631313465363630306565636663396235626132336339623938
|
||||
62383435643661623938393662363262376566613365613465323432343534356433323330666133
|
||||
30303963656635303734316539333038663962626331313366666337663165323230646564623935
|
||||
61316630353739386365323339626166323562616630383538393733353864396565353039656333
|
||||
30343038636231363531383061613836653038373937616163643963393231356235626531366239
|
||||
62343333326434636665363931376235313535343135626261336439636663323233383565633964
|
||||
65333830613131396630336337646230393038386536336365313738316335386261393838383961
|
||||
64656331363738616539346663613261386639353437316231636533353031336464383432623939
|
||||
65386164396231393735643563663337643563633233373338643630313739373861356166616463
|
||||
35306263333963663434376263396464323135346663376334356134393066653439376263376231
|
||||
30333730383163366636323533393334336331633234306536376634313735613263366537346536
|
||||
62366564383861656662353738366665396639313833323038356661306135393338333466333563
|
||||
32653861346166663163383036386432343833333137663462343030363762663139366534326466
|
||||
66313864623438336164333430613766373430656536323964633863333931643036656563353639
|
||||
30313835666366383035343031643265386263316165323537613636656533376239633964393866
|
||||
61646163343032313036303738643763383364663134356634373262633361383035306231636364
|
||||
39333232636538643033313438396332383962656131363365666566633239366532326336363133
|
||||
38393064643030333538333562643435663434343863383834663266373337336433313663646164
|
||||
36343334343965623830613736393231666361643239663062393239613233376335383362666161
|
||||
66383035653330373736613234303631386163656561383138613363613539396332376162316131
|
||||
61313532653531653836343731636535623066383231613635316432323331623761383833623333
|
||||
39343632623961613561373261653939636363366531303839336237383166363733303538363237
|
||||
36373362636263666334316163633766303334373033636539353464393536356466636664333665
|
||||
32643135626366666137626464393961366165383334343063356334373534633764326162363837
|
||||
38643662326266313464343464646166643235663663303761313639376537306337353863336264
|
||||
66376335333738366265343636376363366365306137336665623466626261653937656461303332
|
||||
32613561616662383032393562613831626666373134303032626134313262363830326530643632
|
||||
61366133663564313933366430396430353762386133396436633839303766653765
|
@ -1,36 +0,0 @@
|
||||
# Assumes that conda, pip, build-essentials and cuda are installed
|
||||
---
|
||||
name: ShallowWaterGPU_HPC
|
||||
channels:
|
||||
- conda-forge
|
||||
|
||||
dependencies:
|
||||
- python=3.9
|
||||
- numpy
|
||||
- mpi4py
|
||||
- six
|
||||
- pytools
|
||||
- netcdf4
|
||||
- scipy
|
||||
- pip:
|
||||
- hip-python
|
||||
- hip-python-as-cuda
|
||||
- -i https://test.pypi.org/simple/
|
||||
|
||||
|
||||
#On LUMI-G
|
||||
#module load LUMI/23.03
|
||||
#module load lumi-container-wrapper
|
||||
#ml cray-python/3.9.13.1
|
||||
#conda-containerize new --prefix MyCondaEnv conda_environment_lumi.yml
|
||||
# export the bin path: export PATH="$PWD/MyCondaEnv/bin:$PATH"
|
||||
#
|
||||
#
|
||||
#
|
||||
# Install conda environment (one-time operation):
|
||||
# $ conda env create -f conda_environment_hpc.yml
|
||||
# Activate environment and install the following packages using pip:
|
||||
# $ conda activate ShallowWaterGPU_HPC
|
||||
# - pycuda: $ pip3 install --no-deps -U pycuda
|
||||
# on Windows: make sure your visual studio c++ compiler is available in PATH
|
||||
# PATH should have something like C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC\bin\
|
230
mpiTesting.py
230
mpiTesting.py
@ -1,230 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements MPI simulations for benchmarking
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
import numpy as np
|
||||
import gc
|
||||
import time
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
|
||||
#GPU-aware MPI
|
||||
from os import environ
|
||||
if environ.get("MPICH_GPU_SUPPORT_ENABLED", False):
|
||||
from ctypes import CDLL, RTLD_GLOBAL
|
||||
CDLL(f"{environ.get('CRAY_MPICH_ROOTDIR')}/gtl/lib/libmpi_gtl_hsa.so", mode=RTLD_GLOBAL)
|
||||
|
||||
# MPI
|
||||
from mpi4py import MPI
|
||||
|
||||
# CUDA
|
||||
#import pycuda.driver as cuda
|
||||
from hip import hip
|
||||
|
||||
# Simulator engine etc
|
||||
from GPUSimulators import MPISimulator, Common, CudaContext
|
||||
from GPUSimulators import EE2D_KP07_dimsplit
|
||||
from GPUSimulators.helpers import InitialConditions as IC
|
||||
from GPUSimulators.Simulator import BoundaryCondition as BC
|
||||
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser(description='Strong and weak scaling experiments.')
|
||||
parser.add_argument('-nx', type=int, default=128)
|
||||
parser.add_argument('-ny', type=int, default=128)
|
||||
parser.add_argument('--profile', action='store_true') # default: False
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if(args.profile):
|
||||
profiling_data = {}
|
||||
# profiling: total run time
|
||||
t_total_start = time.time()
|
||||
t_init_start = time.time()
|
||||
|
||||
|
||||
# Get MPI COMM to use
|
||||
comm = MPI.COMM_WORLD
|
||||
|
||||
|
||||
####
|
||||
# Initialize logging
|
||||
####
|
||||
log_level_console = 20
|
||||
log_level_file = 10
|
||||
log_filename = 'mpi_' + str(comm.rank) + '.log'
|
||||
logger = logging.getLogger('GPUSimulators')
|
||||
logger.setLevel(min(log_level_console, log_level_file))
|
||||
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(log_level_console)
|
||||
logger.addHandler(ch)
|
||||
logger.info("Console logger using level %s",
|
||||
logging.getLevelName(log_level_console))
|
||||
|
||||
fh = logging.FileHandler(log_filename)
|
||||
formatter = logging.Formatter(
|
||||
'%(asctime)s:%(name)s:%(levelname)s: %(message)s')
|
||||
fh.setFormatter(formatter)
|
||||
fh.setLevel(log_level_file)
|
||||
logger.addHandler(fh)
|
||||
logger.info("File logger using level %s to %s",
|
||||
logging.getLevelName(log_level_file), log_filename)
|
||||
|
||||
|
||||
####
|
||||
# Initialize MPI grid etc
|
||||
####
|
||||
logger.info("Creating MPI grid")
|
||||
grid = MPISimulator.MPIGrid(MPI.COMM_WORLD)
|
||||
|
||||
|
||||
####
|
||||
# Initialize CUDA
|
||||
####
|
||||
#cuda.init(flags=0)
|
||||
#logger.info("Initializing CUDA")
|
||||
local_rank = grid.getLocalRank()
|
||||
#num_cuda_devices = cuda.Device.count()
|
||||
num_cuda_devices = hip_check(hip.hipGetDeviceCount())
|
||||
cuda_device = local_rank % num_cuda_devices
|
||||
logger.info("Process %s using CUDA device %s", str(local_rank), str(cuda_device))
|
||||
cuda_context = CudaContext.CudaContext(device=cuda_device, autotuning=False)
|
||||
|
||||
|
||||
####
|
||||
# Set initial conditions
|
||||
####
|
||||
|
||||
# DEBUGGING - setting random seed
|
||||
np.random.seed(42)
|
||||
|
||||
logger.info("Generating initial conditions")
|
||||
nx = args.nx
|
||||
ny = args.ny
|
||||
|
||||
dt = 0.000001
|
||||
|
||||
gamma = 1.4
|
||||
#save_times = np.linspace(0, 0.000009, 2)
|
||||
#save_times = np.linspace(0, 0.000099, 11)
|
||||
#save_times = np.linspace(0, 0.000099, 2)
|
||||
save_times = np.linspace(0, 0.0000999, 2)
|
||||
outfile = "mpi_out_" + str(MPI.COMM_WORLD.rank) + ".nc"
|
||||
save_var_names = ['rho', 'rho_u', 'rho_v', 'E']
|
||||
|
||||
arguments = IC.genKelvinHelmholtz(nx, ny, gamma, grid=grid)
|
||||
arguments['context'] = cuda_context
|
||||
arguments['theta'] = 1.2
|
||||
arguments['grid'] = grid
|
||||
|
||||
if(args.profile):
|
||||
t_init_end = time.time()
|
||||
t_init = t_init_end - t_init_start
|
||||
profiling_data["t_init"] = t_init
|
||||
|
||||
####
|
||||
# Run simulation
|
||||
####
|
||||
logger.info("Running simulation")
|
||||
# Helper function to create MPI simulator
|
||||
|
||||
|
||||
def genSim(grid, **kwargs):
|
||||
local_sim = EE2D_KP07_dimsplit.EE2D_KP07_dimsplit(**kwargs)
|
||||
sim = MPISimulator.MPISimulator(local_sim, grid)
|
||||
return sim
|
||||
|
||||
|
||||
outfile, sim_runner_profiling_data, sim_profiling_data = Common.runSimulation(
|
||||
genSim, arguments, outfile, save_times, save_var_names, dt)
|
||||
|
||||
if(args.profile):
|
||||
t_total_end = time.time()
|
||||
t_total = t_total_end - t_total_start
|
||||
profiling_data["t_total"] = t_total
|
||||
print("Total run time on rank " + str(MPI.COMM_WORLD.rank) + " is " + str(t_total) + " s")
|
||||
|
||||
# write profiling to json file
|
||||
if(args.profile and MPI.COMM_WORLD.rank == 0):
|
||||
job_id = ""
|
||||
if "SLURM_JOB_ID" in os.environ:
|
||||
job_id = int(os.environ["SLURM_JOB_ID"])
|
||||
allocated_nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
|
||||
allocated_gpus = int(os.environ["HIP_VISIBLE_DEVICES"].count(",") + 1)
|
||||
# allocated_gpus = int(os.environ["CUDA_VISIBLE_DEVICES"].count(",") + 1)
|
||||
profiling_file = "MPI_jobid_" + \
|
||||
str(job_id) + "_" + str(allocated_nodes) + "_nodes_and_" + str(allocated_gpus) + "_GPUs_profiling.json"
|
||||
profiling_data["outfile"] = outfile
|
||||
else:
|
||||
profiling_file = "MPI_" + str(MPI.COMM_WORLD.size) + "_procs_and_" + str(num_cuda_devices) + "_GPUs_profiling.json"
|
||||
|
||||
for stage in sim_runner_profiling_data["start"].keys():
|
||||
profiling_data[stage] = sim_runner_profiling_data["end"][stage] - sim_runner_profiling_data["start"][stage]
|
||||
|
||||
for stage in sim_profiling_data["start"].keys():
|
||||
profiling_data[stage] = sim_profiling_data["end"][stage] - sim_profiling_data["start"][stage]
|
||||
|
||||
profiling_data["nx"] = nx
|
||||
profiling_data["ny"] = ny
|
||||
profiling_data["dt"] = dt
|
||||
profiling_data["n_time_steps"] = sim_profiling_data["n_time_steps"]
|
||||
|
||||
profiling_data["slurm_job_id"] = job_id
|
||||
profiling_data["n_cuda_devices"] = str(num_cuda_devices)
|
||||
profiling_data["n_processes"] = str(MPI.COMM_WORLD.size)
|
||||
profiling_data["git_hash"] = Common.getGitHash()
|
||||
profiling_data["git_status"] = Common.getGitStatus()
|
||||
|
||||
with open(profiling_file, "w") as write_file:
|
||||
json.dump(profiling_data, write_file)
|
||||
|
||||
####
|
||||
# Clean shutdown
|
||||
####
|
||||
sim = None
|
||||
local_sim = None
|
||||
cuda_context = None
|
||||
arguments = None
|
||||
logging.shutdown()
|
||||
gc.collect()
|
||||
|
||||
|
||||
|
||||
####
|
||||
# Print completion and exit
|
||||
####
|
||||
print("Completed!")
|
||||
exit(0)
|
File diff suppressed because it is too large
Load Diff
@ -1,146 +0,0 @@
|
||||
##############################################################################
|
||||
# Generated by SWASHES version 1.03.00, 2016-01-29
|
||||
##############################################################################
|
||||
# Dimension: 1
|
||||
# Type: 3 (=Dam break)
|
||||
# Domain: 1
|
||||
# Choice: 1 (=on a wet domain without friction (Stoker's solution))
|
||||
##############################################################################
|
||||
# PARAMETERS OF THE SOLUTION
|
||||
#
|
||||
# Length of the domain: 10 meters
|
||||
# Space step: 0.078125 meters
|
||||
# Number of cells: 128
|
||||
# Position of the dam: x=5 meters
|
||||
# Time value: 6 seconds
|
||||
##############################################################################
|
||||
#
|
||||
#(i-0.5)*dx h[i] u[i] topo[i] q[i] topo[i]+h[i] Fr[i]=Froude topo[i]+hc[i]
|
||||
0.0390625 0.005 0 0 0 0.005 0 0
|
||||
0.117188 0.005 0 0 0 0.005 0 0
|
||||
0.195312 0.005 0 0 0 0.005 0 0
|
||||
0.273438 0.005 0 0 0 0.005 0 0
|
||||
0.351562 0.005 0 0 0 0.005 0 0
|
||||
0.429688 0.005 0 0 0 0.005 0 0
|
||||
0.507812 0.005 0 0 0 0.005 0 0
|
||||
0.585938 0.005 0 0 0 0.005 0 0
|
||||
0.664062 0.005 0 0 0 0.005 0 0
|
||||
0.742188 0.005 0 0 0 0.005 0 0
|
||||
0.820312 0.005 0 0 0 0.005 0 0
|
||||
0.898438 0.005 0 0 0 0.005 0 0
|
||||
0.976562 0.005 0 0 0 0.005 0 0
|
||||
1.05469 0.005 0 0 0 0.005 0 0
|
||||
1.13281 0.005 0 0 0 0.005 0 0
|
||||
1.21094 0.005 0 0 0 0.005 0 0
|
||||
1.28906 0.005 0 0 0 0.005 0 0
|
||||
1.36719 0.005 0 0 0 0.005 0 0
|
||||
1.44531 0.005 0 0 0 0.005 0 0
|
||||
1.52344 0.005 0 0 0 0.005 0 0
|
||||
1.60156 0.005 0 0 0 0.005 0 0
|
||||
1.67969 0.005 0 0 0 0.005 0 0
|
||||
1.75781 0.005 0 0 0 0.005 0 0
|
||||
1.83594 0.005 0 0 0 0.005 0 0
|
||||
1.91406 0.005 0 0 0 0.005 0 0
|
||||
1.99219 0.005 0 0 0 0.005 0 0
|
||||
2.07031 0.005 0 0 0 0.005 0 0
|
||||
2.14844 0.005 0 0 0 0.005 0 0
|
||||
2.22656 0.005 0 0 0 0.005 0 0
|
||||
2.30469 0.005 0 0 0 0.005 0 0
|
||||
2.38281 0.005 0 0 0 0.005 0 0
|
||||
2.46094 0.005 0 0 0 0.005 0 0
|
||||
2.53906 0.005 0 0 0 0.005 0 0
|
||||
2.61719 0.005 0 0 0 0.005 0 0
|
||||
2.69531 0.005 0 0 0 0.005 0 0
|
||||
2.77344 0.005 0 0 0 0.005 0 0
|
||||
2.85156 0.005 0 0 0 0.005 0 0
|
||||
2.92969 0.005 0 0 0 0.005 0 0
|
||||
3.00781 0.005 0 0 0 0.005 0 0
|
||||
3.08594 0.005 0 0 0 0.005 0 0
|
||||
3.16406 0.005 0 0 0 0.005 0 0
|
||||
3.24219 0.005 0 0 0 0.005 0 0
|
||||
3.32031 0.005 0 0 0 0.005 0 0
|
||||
3.39844 0.005 0 0 0 0.005 0 0
|
||||
3.47656 0.005 0 0 0 0.005 0 0
|
||||
3.55469 0.005 0 0 0 0.005 0 0
|
||||
3.63281 0.005 0 0 0 0.005 0 0
|
||||
3.71094 0.00490073 0.00441906 0 2.16566e-005 0.00490073 0.0201542 0.000362943
|
||||
3.78906 0.00470863 0.0130996 0 6.16813e-005 0.00470863 0.0609504 0.000729255
|
||||
3.86719 0.00452038 0.0217802 0 9.84546e-005 0.00452038 0.103428 0.000996019
|
||||
3.94531 0.00433596 0.0304607 0 0.000132076 0.00433596 0.147694 0.00121151
|
||||
4.02344 0.00415538 0.0391413 0 0.000162647 0.00415538 0.193863 0.0013919
|
||||
4.10156 0.00397865 0.0478218 0 0.000190266 0.00397865 0.242061 0.00154532
|
||||
4.17969 0.00380575 0.0565024 0 0.000215034 0.00380575 0.292423 0.00167667
|
||||
4.25781 0.0036367 0.065183 0 0.000237051 0.0036367 0.345101 0.00178925
|
||||
4.33594 0.00347148 0.0738635 0 0.000256416 0.00347148 0.400256 0.00188541
|
||||
4.41406 0.00331011 0.0825441 0 0.00027323 0.00331011 0.458068 0.00196696
|
||||
4.49219 0.00315257 0.0912246 0 0.000287592 0.00315257 0.518734 0.0020353
|
||||
4.57031 0.00299888 0.0999052 0 0.000299604 0.00299888 0.58247 0.00209158
|
||||
4.64844 0.00284903 0.108586 0 0.000309364 0.00284903 0.649516 0.00213677
|
||||
4.72656 0.00270302 0.117266 0 0.000316973 0.00270302 0.720135 0.00217166
|
||||
4.80469 0.00256085 0.125947 0 0.000322531 0.00256085 0.794623 0.00219697
|
||||
4.88281 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.96094 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.03906 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.11719 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.19531 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.27344 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.35156 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.42969 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.50781 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.58594 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.66406 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.74219 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.82031 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.89844 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.97656 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.05469 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.13281 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.21094 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.28906 0.001 0 0 0 0.001 0 0
|
||||
6.36719 0.001 0 0 0 0.001 0 0
|
||||
6.44531 0.001 0 0 0 0.001 0 0
|
||||
6.52344 0.001 0 0 0 0.001 0 0
|
||||
6.60156 0.001 0 0 0 0.001 0 0
|
||||
6.67969 0.001 0 0 0 0.001 0 0
|
||||
6.75781 0.001 0 0 0 0.001 0 0
|
||||
6.83594 0.001 0 0 0 0.001 0 0
|
||||
6.91406 0.001 0 0 0 0.001 0 0
|
||||
6.99219 0.001 0 0 0 0.001 0 0
|
||||
7.07031 0.001 0 0 0 0.001 0 0
|
||||
7.14844 0.001 0 0 0 0.001 0 0
|
||||
7.22656 0.001 0 0 0 0.001 0 0
|
||||
7.30469 0.001 0 0 0 0.001 0 0
|
||||
7.38281 0.001 0 0 0 0.001 0 0
|
||||
7.46094 0.001 0 0 0 0.001 0 0
|
||||
7.53906 0.001 0 0 0 0.001 0 0
|
||||
7.61719 0.001 0 0 0 0.001 0 0
|
||||
7.69531 0.001 0 0 0 0.001 0 0
|
||||
7.77344 0.001 0 0 0 0.001 0 0
|
||||
7.85156 0.001 0 0 0 0.001 0 0
|
||||
7.92969 0.001 0 0 0 0.001 0 0
|
||||
8.00781 0.001 0 0 0 0.001 0 0
|
||||
8.08594 0.001 0 0 0 0.001 0 0
|
||||
8.16406 0.001 0 0 0 0.001 0 0
|
||||
8.24219 0.001 0 0 0 0.001 0 0
|
||||
8.32031 0.001 0 0 0 0.001 0 0
|
||||
8.39844 0.001 0 0 0 0.001 0 0
|
||||
8.47656 0.001 0 0 0 0.001 0 0
|
||||
8.55469 0.001 0 0 0 0.001 0 0
|
||||
8.63281 0.001 0 0 0 0.001 0 0
|
||||
8.71094 0.001 0 0 0 0.001 0 0
|
||||
8.78906 0.001 0 0 0 0.001 0 0
|
||||
8.86719 0.001 0 0 0 0.001 0 0
|
||||
8.94531 0.001 0 0 0 0.001 0 0
|
||||
9.02344 0.001 0 0 0 0.001 0 0
|
||||
9.10156 0.001 0 0 0 0.001 0 0
|
||||
9.17969 0.001 0 0 0 0.001 0 0
|
||||
9.25781 0.001 0 0 0 0.001 0 0
|
||||
9.33594 0.001 0 0 0 0.001 0 0
|
||||
9.41406 0.001 0 0 0 0.001 0 0
|
||||
9.49219 0.001 0 0 0 0.001 0 0
|
||||
9.57031 0.001 0 0 0 0.001 0 0
|
||||
9.64844 0.001 0 0 0 0.001 0 0
|
||||
9.72656 0.001 0 0 0 0.001 0 0
|
||||
9.80469 0.001 0 0 0 0.001 0 0
|
||||
9.88281 0.001 0 0 0 0.001 0 0
|
||||
9.96094 0.001 0 0 0 0.001 0 0
|
Can't render this file because it has a wrong number of fields in line 18.
|
File diff suppressed because it is too large
Load Diff
@ -1,274 +0,0 @@
|
||||
##############################################################################
|
||||
# Generated by SWASHES version 1.03.00, 2016-01-29
|
||||
##############################################################################
|
||||
# Dimension: 1
|
||||
# Type: 3 (=Dam break)
|
||||
# Domain: 1
|
||||
# Choice: 1 (=on a wet domain without friction (Stoker's solution))
|
||||
##############################################################################
|
||||
# PARAMETERS OF THE SOLUTION
|
||||
#
|
||||
# Length of the domain: 10 meters
|
||||
# Space step: 0.0390625 meters
|
||||
# Number of cells: 256
|
||||
# Position of the dam: x=5 meters
|
||||
# Time value: 6 seconds
|
||||
##############################################################################
|
||||
#
|
||||
#(i-0.5)*dx h[i] u[i] topo[i] q[i] topo[i]+h[i] Fr[i]=Froude topo[i]+hc[i]
|
||||
0.0195312 0.005 0 0 0 0.005 0 0
|
||||
0.0585938 0.005 0 0 0 0.005 0 0
|
||||
0.0976562 0.005 0 0 0 0.005 0 0
|
||||
0.136719 0.005 0 0 0 0.005 0 0
|
||||
0.175781 0.005 0 0 0 0.005 0 0
|
||||
0.214844 0.005 0 0 0 0.005 0 0
|
||||
0.253906 0.005 0 0 0 0.005 0 0
|
||||
0.292969 0.005 0 0 0 0.005 0 0
|
||||
0.332031 0.005 0 0 0 0.005 0 0
|
||||
0.371094 0.005 0 0 0 0.005 0 0
|
||||
0.410156 0.005 0 0 0 0.005 0 0
|
||||
0.449219 0.005 0 0 0 0.005 0 0
|
||||
0.488281 0.005 0 0 0 0.005 0 0
|
||||
0.527344 0.005 0 0 0 0.005 0 0
|
||||
0.566406 0.005 0 0 0 0.005 0 0
|
||||
0.605469 0.005 0 0 0 0.005 0 0
|
||||
0.644531 0.005 0 0 0 0.005 0 0
|
||||
0.683594 0.005 0 0 0 0.005 0 0
|
||||
0.722656 0.005 0 0 0 0.005 0 0
|
||||
0.761719 0.005 0 0 0 0.005 0 0
|
||||
0.800781 0.005 0 0 0 0.005 0 0
|
||||
0.839844 0.005 0 0 0 0.005 0 0
|
||||
0.878906 0.005 0 0 0 0.005 0 0
|
||||
0.917969 0.005 0 0 0 0.005 0 0
|
||||
0.957031 0.005 0 0 0 0.005 0 0
|
||||
0.996094 0.005 0 0 0 0.005 0 0
|
||||
1.03516 0.005 0 0 0 0.005 0 0
|
||||
1.07422 0.005 0 0 0 0.005 0 0
|
||||
1.11328 0.005 0 0 0 0.005 0 0
|
||||
1.15234 0.005 0 0 0 0.005 0 0
|
||||
1.19141 0.005 0 0 0 0.005 0 0
|
||||
1.23047 0.005 0 0 0 0.005 0 0
|
||||
1.26953 0.005 0 0 0 0.005 0 0
|
||||
1.30859 0.005 0 0 0 0.005 0 0
|
||||
1.34766 0.005 0 0 0 0.005 0 0
|
||||
1.38672 0.005 0 0 0 0.005 0 0
|
||||
1.42578 0.005 0 0 0 0.005 0 0
|
||||
1.46484 0.005 0 0 0 0.005 0 0
|
||||
1.50391 0.005 0 0 0 0.005 0 0
|
||||
1.54297 0.005 0 0 0 0.005 0 0
|
||||
1.58203 0.005 0 0 0 0.005 0 0
|
||||
1.62109 0.005 0 0 0 0.005 0 0
|
||||
1.66016 0.005 0 0 0 0.005 0 0
|
||||
1.69922 0.005 0 0 0 0.005 0 0
|
||||
1.73828 0.005 0 0 0 0.005 0 0
|
||||
1.77734 0.005 0 0 0 0.005 0 0
|
||||
1.81641 0.005 0 0 0 0.005 0 0
|
||||
1.85547 0.005 0 0 0 0.005 0 0
|
||||
1.89453 0.005 0 0 0 0.005 0 0
|
||||
1.93359 0.005 0 0 0 0.005 0 0
|
||||
1.97266 0.005 0 0 0 0.005 0 0
|
||||
2.01172 0.005 0 0 0 0.005 0 0
|
||||
2.05078 0.005 0 0 0 0.005 0 0
|
||||
2.08984 0.005 0 0 0 0.005 0 0
|
||||
2.12891 0.005 0 0 0 0.005 0 0
|
||||
2.16797 0.005 0 0 0 0.005 0 0
|
||||
2.20703 0.005 0 0 0 0.005 0 0
|
||||
2.24609 0.005 0 0 0 0.005 0 0
|
||||
2.28516 0.005 0 0 0 0.005 0 0
|
||||
2.32422 0.005 0 0 0 0.005 0 0
|
||||
2.36328 0.005 0 0 0 0.005 0 0
|
||||
2.40234 0.005 0 0 0 0.005 0 0
|
||||
2.44141 0.005 0 0 0 0.005 0 0
|
||||
2.48047 0.005 0 0 0 0.005 0 0
|
||||
2.51953 0.005 0 0 0 0.005 0 0
|
||||
2.55859 0.005 0 0 0 0.005 0 0
|
||||
2.59766 0.005 0 0 0 0.005 0 0
|
||||
2.63672 0.005 0 0 0 0.005 0 0
|
||||
2.67578 0.005 0 0 0 0.005 0 0
|
||||
2.71484 0.005 0 0 0 0.005 0 0
|
||||
2.75391 0.005 0 0 0 0.005 0 0
|
||||
2.79297 0.005 0 0 0 0.005 0 0
|
||||
2.83203 0.005 0 0 0 0.005 0 0
|
||||
2.87109 0.005 0 0 0 0.005 0 0
|
||||
2.91016 0.005 0 0 0 0.005 0 0
|
||||
2.94922 0.005 0 0 0 0.005 0 0
|
||||
2.98828 0.005 0 0 0 0.005 0 0
|
||||
3.02734 0.005 0 0 0 0.005 0 0
|
||||
3.06641 0.005 0 0 0 0.005 0 0
|
||||
3.10547 0.005 0 0 0 0.005 0 0
|
||||
3.14453 0.005 0 0 0 0.005 0 0
|
||||
3.18359 0.005 0 0 0 0.005 0 0
|
||||
3.22266 0.005 0 0 0 0.005 0 0
|
||||
3.26172 0.005 0 0 0 0.005 0 0
|
||||
3.30078 0.005 0 0 0 0.005 0 0
|
||||
3.33984 0.005 0 0 0 0.005 0 0
|
||||
3.37891 0.005 0 0 0 0.005 0 0
|
||||
3.41797 0.005 0 0 0 0.005 0 0
|
||||
3.45703 0.005 0 0 0 0.005 0 0
|
||||
3.49609 0.005 0 0 0 0.005 0 0
|
||||
3.53516 0.005 0 0 0 0.005 0 0
|
||||
3.57422 0.005 0 0 0 0.005 0 0
|
||||
3.61328 0.005 0 0 0 0.005 0 0
|
||||
3.65234 0.005 0 0 0 0.005 0 0
|
||||
3.69141 0.00494936 0.00224893 0 1.11307e-005 0.00494936 0.0102062 0.000232877
|
||||
3.73047 0.00485235 0.0065892 0 3.19731e-005 0.00485235 0.0302011 0.00047058
|
||||
3.76953 0.0047563 0.0109295 0 5.19839e-005 0.0047563 0.0505977 0.000650663
|
||||
3.80859 0.00466121 0.0152698 0 7.11755e-005 0.00466121 0.0714082 0.000802289
|
||||
3.84766 0.00456708 0.01961 0 8.95606e-005 0.00456708 0.0926456 0.000935093
|
||||
3.88672 0.00447391 0.0239503 0 0.000107152 0.00447391 0.114323 0.00105384
|
||||
3.92578 0.0043817 0.0282906 0 0.000123961 0.0043817 0.136454 0.00116136
|
||||
3.96484 0.00429045 0.0326309 0 0.000140001 0.00429045 0.159053 0.0012595
|
||||
4.00391 0.00420017 0.0369711 0 0.000155285 0.00420017 0.182136 0.00134957
|
||||
4.04297 0.00411084 0.0413114 0 0.000169825 0.00411084 0.205717 0.00143255
|
||||
4.08203 0.00402247 0.0456517 0 0.000183633 0.00402247 0.229814 0.00150919
|
||||
4.12109 0.00393506 0.049992 0 0.000196722 0.00393506 0.254443 0.00158008
|
||||
4.16016 0.00384861 0.0543323 0 0.000209104 0.00384861 0.279622 0.0016457
|
||||
4.19922 0.00376313 0.0586725 0 0.000220792 0.00376313 0.30537 0.00170647
|
||||
4.23828 0.0036786 0.0630128 0 0.000231799 0.0036786 0.331706 0.00176272
|
||||
4.27734 0.00359503 0.0673531 0 0.000242137 0.00359503 0.358651 0.00181475
|
||||
4.31641 0.00351242 0.0716934 0 0.000251818 0.00351242 0.386226 0.00186281
|
||||
4.35547 0.00343078 0.0760336 0 0.000260855 0.00343078 0.414453 0.00190711
|
||||
4.39453 0.00335009 0.0803739 0 0.00026926 0.00335009 0.443356 0.00194786
|
||||
4.43359 0.00327036 0.0847142 0 0.000277046 0.00327036 0.472959 0.00198523
|
||||
4.47266 0.0031916 0.0890545 0 0.000284226 0.0031916 0.503289 0.00201939
|
||||
4.51172 0.00311379 0.0933948 0 0.000290812 0.00311379 0.534371 0.00205046
|
||||
4.55078 0.00303694 0.097735 0 0.000296816 0.00303694 0.566236 0.00207859
|
||||
4.58984 0.00296106 0.102075 0 0.000302251 0.00296106 0.598912 0.00210389
|
||||
4.62891 0.00288613 0.106416 0 0.000307129 0.00288613 0.63243 0.00212646
|
||||
4.66797 0.00281217 0.110756 0 0.000311464 0.00281217 0.666825 0.00214642
|
||||
4.70703 0.00273916 0.115096 0 0.000315267 0.00273916 0.70213 0.00216386
|
||||
4.74609 0.00266712 0.119436 0 0.000318551 0.00266712 0.738383 0.00217886
|
||||
4.78516 0.00259603 0.123777 0 0.000321328 0.00259603 0.775621 0.00219151
|
||||
4.82422 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.86328 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.90234 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.94141 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.98047 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.01953 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.05859 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.09766 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.13672 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.17578 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.21484 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.25391 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.29297 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.33203 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.37109 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.41016 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.44922 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.48828 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.52734 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.56641 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.60547 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.64453 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.68359 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.72266 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.76172 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.80078 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.83984 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.87891 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.91797 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.95703 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.99609 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.03516 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.07422 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.11328 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.15234 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.19141 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.23047 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.26953 0.001 0 0 0 0.001 0 0
|
||||
6.30859 0.001 0 0 0 0.001 0 0
|
||||
6.34766 0.001 0 0 0 0.001 0 0
|
||||
6.38672 0.001 0 0 0 0.001 0 0
|
||||
6.42578 0.001 0 0 0 0.001 0 0
|
||||
6.46484 0.001 0 0 0 0.001 0 0
|
||||
6.50391 0.001 0 0 0 0.001 0 0
|
||||
6.54297 0.001 0 0 0 0.001 0 0
|
||||
6.58203 0.001 0 0 0 0.001 0 0
|
||||
6.62109 0.001 0 0 0 0.001 0 0
|
||||
6.66016 0.001 0 0 0 0.001 0 0
|
||||
6.69922 0.001 0 0 0 0.001 0 0
|
||||
6.73828 0.001 0 0 0 0.001 0 0
|
||||
6.77734 0.001 0 0 0 0.001 0 0
|
||||
6.81641 0.001 0 0 0 0.001 0 0
|
||||
6.85547 0.001 0 0 0 0.001 0 0
|
||||
6.89453 0.001 0 0 0 0.001 0 0
|
||||
6.93359 0.001 0 0 0 0.001 0 0
|
||||
6.97266 0.001 0 0 0 0.001 0 0
|
||||
7.01172 0.001 0 0 0 0.001 0 0
|
||||
7.05078 0.001 0 0 0 0.001 0 0
|
||||
7.08984 0.001 0 0 0 0.001 0 0
|
||||
7.12891 0.001 0 0 0 0.001 0 0
|
||||
7.16797 0.001 0 0 0 0.001 0 0
|
||||
7.20703 0.001 0 0 0 0.001 0 0
|
||||
7.24609 0.001 0 0 0 0.001 0 0
|
||||
7.28516 0.001 0 0 0 0.001 0 0
|
||||
7.32422 0.001 0 0 0 0.001 0 0
|
||||
7.36328 0.001 0 0 0 0.001 0 0
|
||||
7.40234 0.001 0 0 0 0.001 0 0
|
||||
7.44141 0.001 0 0 0 0.001 0 0
|
||||
7.48047 0.001 0 0 0 0.001 0 0
|
||||
7.51953 0.001 0 0 0 0.001 0 0
|
||||
7.55859 0.001 0 0 0 0.001 0 0
|
||||
7.59766 0.001 0 0 0 0.001 0 0
|
||||
7.63672 0.001 0 0 0 0.001 0 0
|
||||
7.67578 0.001 0 0 0 0.001 0 0
|
||||
7.71484 0.001 0 0 0 0.001 0 0
|
||||
7.75391 0.001 0 0 0 0.001 0 0
|
||||
7.79297 0.001 0 0 0 0.001 0 0
|
||||
7.83203 0.001 0 0 0 0.001 0 0
|
||||
7.87109 0.001 0 0 0 0.001 0 0
|
||||
7.91016 0.001 0 0 0 0.001 0 0
|
||||
7.94922 0.001 0 0 0 0.001 0 0
|
||||
7.98828 0.001 0 0 0 0.001 0 0
|
||||
8.02734 0.001 0 0 0 0.001 0 0
|
||||
8.06641 0.001 0 0 0 0.001 0 0
|
||||
8.10547 0.001 0 0 0 0.001 0 0
|
||||
8.14453 0.001 0 0 0 0.001 0 0
|
||||
8.18359 0.001 0 0 0 0.001 0 0
|
||||
8.22266 0.001 0 0 0 0.001 0 0
|
||||
8.26172 0.001 0 0 0 0.001 0 0
|
||||
8.30078 0.001 0 0 0 0.001 0 0
|
||||
8.33984 0.001 0 0 0 0.001 0 0
|
||||
8.37891 0.001 0 0 0 0.001 0 0
|
||||
8.41797 0.001 0 0 0 0.001 0 0
|
||||
8.45703 0.001 0 0 0 0.001 0 0
|
||||
8.49609 0.001 0 0 0 0.001 0 0
|
||||
8.53516 0.001 0 0 0 0.001 0 0
|
||||
8.57422 0.001 0 0 0 0.001 0 0
|
||||
8.61328 0.001 0 0 0 0.001 0 0
|
||||
8.65234 0.001 0 0 0 0.001 0 0
|
||||
8.69141 0.001 0 0 0 0.001 0 0
|
||||
8.73047 0.001 0 0 0 0.001 0 0
|
||||
8.76953 0.001 0 0 0 0.001 0 0
|
||||
8.80859 0.001 0 0 0 0.001 0 0
|
||||
8.84766 0.001 0 0 0 0.001 0 0
|
||||
8.88672 0.001 0 0 0 0.001 0 0
|
||||
8.92578 0.001 0 0 0 0.001 0 0
|
||||
8.96484 0.001 0 0 0 0.001 0 0
|
||||
9.00391 0.001 0 0 0 0.001 0 0
|
||||
9.04297 0.001 0 0 0 0.001 0 0
|
||||
9.08203 0.001 0 0 0 0.001 0 0
|
||||
9.12109 0.001 0 0 0 0.001 0 0
|
||||
9.16016 0.001 0 0 0 0.001 0 0
|
||||
9.19922 0.001 0 0 0 0.001 0 0
|
||||
9.23828 0.001 0 0 0 0.001 0 0
|
||||
9.27734 0.001 0 0 0 0.001 0 0
|
||||
9.31641 0.001 0 0 0 0.001 0 0
|
||||
9.35547 0.001 0 0 0 0.001 0 0
|
||||
9.39453 0.001 0 0 0 0.001 0 0
|
||||
9.43359 0.001 0 0 0 0.001 0 0
|
||||
9.47266 0.001 0 0 0 0.001 0 0
|
||||
9.51172 0.001 0 0 0 0.001 0 0
|
||||
9.55078 0.001 0 0 0 0.001 0 0
|
||||
9.58984 0.001 0 0 0 0.001 0 0
|
||||
9.62891 0.001 0 0 0 0.001 0 0
|
||||
9.66797 0.001 0 0 0 0.001 0 0
|
||||
9.70703 0.001 0 0 0 0.001 0 0
|
||||
9.74609 0.001 0 0 0 0.001 0 0
|
||||
9.78516 0.001 0 0 0 0.001 0 0
|
||||
9.82422 0.001 0 0 0 0.001 0 0
|
||||
9.86328 0.001 0 0 0 0.001 0 0
|
||||
9.90234 0.001 0 0 0 0.001 0 0
|
||||
9.94141 0.001 0 0 0 0.001 0 0
|
||||
9.98047 0.001 0 0 0 0.001 0 0
|
Can't render this file because it has a wrong number of fields in line 18.
|
File diff suppressed because it is too large
Load Diff
@ -1,530 +0,0 @@
|
||||
##############################################################################
|
||||
# Generated by SWASHES version 1.03.00, 2016-01-29
|
||||
##############################################################################
|
||||
# Dimension: 1
|
||||
# Type: 3 (=Dam break)
|
||||
# Domain: 1
|
||||
# Choice: 1 (=on a wet domain without friction (Stoker's solution))
|
||||
##############################################################################
|
||||
# PARAMETERS OF THE SOLUTION
|
||||
#
|
||||
# Length of the domain: 10 meters
|
||||
# Space step: 0.0195312 meters
|
||||
# Number of cells: 512
|
||||
# Position of the dam: x=5 meters
|
||||
# Time value: 6 seconds
|
||||
##############################################################################
|
||||
#
|
||||
#(i-0.5)*dx h[i] u[i] topo[i] q[i] topo[i]+h[i] Fr[i]=Froude topo[i]+hc[i]
|
||||
0.00976562 0.005 0 0 0 0.005 0 0
|
||||
0.0292969 0.005 0 0 0 0.005 0 0
|
||||
0.0488281 0.005 0 0 0 0.005 0 0
|
||||
0.0683594 0.005 0 0 0 0.005 0 0
|
||||
0.0878906 0.005 0 0 0 0.005 0 0
|
||||
0.107422 0.005 0 0 0 0.005 0 0
|
||||
0.126953 0.005 0 0 0 0.005 0 0
|
||||
0.146484 0.005 0 0 0 0.005 0 0
|
||||
0.166016 0.005 0 0 0 0.005 0 0
|
||||
0.185547 0.005 0 0 0 0.005 0 0
|
||||
0.205078 0.005 0 0 0 0.005 0 0
|
||||
0.224609 0.005 0 0 0 0.005 0 0
|
||||
0.244141 0.005 0 0 0 0.005 0 0
|
||||
0.263672 0.005 0 0 0 0.005 0 0
|
||||
0.283203 0.005 0 0 0 0.005 0 0
|
||||
0.302734 0.005 0 0 0 0.005 0 0
|
||||
0.322266 0.005 0 0 0 0.005 0 0
|
||||
0.341797 0.005 0 0 0 0.005 0 0
|
||||
0.361328 0.005 0 0 0 0.005 0 0
|
||||
0.380859 0.005 0 0 0 0.005 0 0
|
||||
0.400391 0.005 0 0 0 0.005 0 0
|
||||
0.419922 0.005 0 0 0 0.005 0 0
|
||||
0.439453 0.005 0 0 0 0.005 0 0
|
||||
0.458984 0.005 0 0 0 0.005 0 0
|
||||
0.478516 0.005 0 0 0 0.005 0 0
|
||||
0.498047 0.005 0 0 0 0.005 0 0
|
||||
0.517578 0.005 0 0 0 0.005 0 0
|
||||
0.537109 0.005 0 0 0 0.005 0 0
|
||||
0.556641 0.005 0 0 0 0.005 0 0
|
||||
0.576172 0.005 0 0 0 0.005 0 0
|
||||
0.595703 0.005 0 0 0 0.005 0 0
|
||||
0.615234 0.005 0 0 0 0.005 0 0
|
||||
0.634766 0.005 0 0 0 0.005 0 0
|
||||
0.654297 0.005 0 0 0 0.005 0 0
|
||||
0.673828 0.005 0 0 0 0.005 0 0
|
||||
0.693359 0.005 0 0 0 0.005 0 0
|
||||
0.712891 0.005 0 0 0 0.005 0 0
|
||||
0.732422 0.005 0 0 0 0.005 0 0
|
||||
0.751953 0.005 0 0 0 0.005 0 0
|
||||
0.771484 0.005 0 0 0 0.005 0 0
|
||||
0.791016 0.005 0 0 0 0.005 0 0
|
||||
0.810547 0.005 0 0 0 0.005 0 0
|
||||
0.830078 0.005 0 0 0 0.005 0 0
|
||||
0.849609 0.005 0 0 0 0.005 0 0
|
||||
0.869141 0.005 0 0 0 0.005 0 0
|
||||
0.888672 0.005 0 0 0 0.005 0 0
|
||||
0.908203 0.005 0 0 0 0.005 0 0
|
||||
0.927734 0.005 0 0 0 0.005 0 0
|
||||
0.947266 0.005 0 0 0 0.005 0 0
|
||||
0.966797 0.005 0 0 0 0.005 0 0
|
||||
0.986328 0.005 0 0 0 0.005 0 0
|
||||
1.00586 0.005 0 0 0 0.005 0 0
|
||||
1.02539 0.005 0 0 0 0.005 0 0
|
||||
1.04492 0.005 0 0 0 0.005 0 0
|
||||
1.06445 0.005 0 0 0 0.005 0 0
|
||||
1.08398 0.005 0 0 0 0.005 0 0
|
||||
1.10352 0.005 0 0 0 0.005 0 0
|
||||
1.12305 0.005 0 0 0 0.005 0 0
|
||||
1.14258 0.005 0 0 0 0.005 0 0
|
||||
1.16211 0.005 0 0 0 0.005 0 0
|
||||
1.18164 0.005 0 0 0 0.005 0 0
|
||||
1.20117 0.005 0 0 0 0.005 0 0
|
||||
1.2207 0.005 0 0 0 0.005 0 0
|
||||
1.24023 0.005 0 0 0 0.005 0 0
|
||||
1.25977 0.005 0 0 0 0.005 0 0
|
||||
1.2793 0.005 0 0 0 0.005 0 0
|
||||
1.29883 0.005 0 0 0 0.005 0 0
|
||||
1.31836 0.005 0 0 0 0.005 0 0
|
||||
1.33789 0.005 0 0 0 0.005 0 0
|
||||
1.35742 0.005 0 0 0 0.005 0 0
|
||||
1.37695 0.005 0 0 0 0.005 0 0
|
||||
1.39648 0.005 0 0 0 0.005 0 0
|
||||
1.41602 0.005 0 0 0 0.005 0 0
|
||||
1.43555 0.005 0 0 0 0.005 0 0
|
||||
1.45508 0.005 0 0 0 0.005 0 0
|
||||
1.47461 0.005 0 0 0 0.005 0 0
|
||||
1.49414 0.005 0 0 0 0.005 0 0
|
||||
1.51367 0.005 0 0 0 0.005 0 0
|
||||
1.5332 0.005 0 0 0 0.005 0 0
|
||||
1.55273 0.005 0 0 0 0.005 0 0
|
||||
1.57227 0.005 0 0 0 0.005 0 0
|
||||
1.5918 0.005 0 0 0 0.005 0 0
|
||||
1.61133 0.005 0 0 0 0.005 0 0
|
||||
1.63086 0.005 0 0 0 0.005 0 0
|
||||
1.65039 0.005 0 0 0 0.005 0 0
|
||||
1.66992 0.005 0 0 0 0.005 0 0
|
||||
1.68945 0.005 0 0 0 0.005 0 0
|
||||
1.70898 0.005 0 0 0 0.005 0 0
|
||||
1.72852 0.005 0 0 0 0.005 0 0
|
||||
1.74805 0.005 0 0 0 0.005 0 0
|
||||
1.76758 0.005 0 0 0 0.005 0 0
|
||||
1.78711 0.005 0 0 0 0.005 0 0
|
||||
1.80664 0.005 0 0 0 0.005 0 0
|
||||
1.82617 0.005 0 0 0 0.005 0 0
|
||||
1.8457 0.005 0 0 0 0.005 0 0
|
||||
1.86523 0.005 0 0 0 0.005 0 0
|
||||
1.88477 0.005 0 0 0 0.005 0 0
|
||||
1.9043 0.005 0 0 0 0.005 0 0
|
||||
1.92383 0.005 0 0 0 0.005 0 0
|
||||
1.94336 0.005 0 0 0 0.005 0 0
|
||||
1.96289 0.005 0 0 0 0.005 0 0
|
||||
1.98242 0.005 0 0 0 0.005 0 0
|
||||
2.00195 0.005 0 0 0 0.005 0 0
|
||||
2.02148 0.005 0 0 0 0.005 0 0
|
||||
2.04102 0.005 0 0 0 0.005 0 0
|
||||
2.06055 0.005 0 0 0 0.005 0 0
|
||||
2.08008 0.005 0 0 0 0.005 0 0
|
||||
2.09961 0.005 0 0 0 0.005 0 0
|
||||
2.11914 0.005 0 0 0 0.005 0 0
|
||||
2.13867 0.005 0 0 0 0.005 0 0
|
||||
2.1582 0.005 0 0 0 0.005 0 0
|
||||
2.17773 0.005 0 0 0 0.005 0 0
|
||||
2.19727 0.005 0 0 0 0.005 0 0
|
||||
2.2168 0.005 0 0 0 0.005 0 0
|
||||
2.23633 0.005 0 0 0 0.005 0 0
|
||||
2.25586 0.005 0 0 0 0.005 0 0
|
||||
2.27539 0.005 0 0 0 0.005 0 0
|
||||
2.29492 0.005 0 0 0 0.005 0 0
|
||||
2.31445 0.005 0 0 0 0.005 0 0
|
||||
2.33398 0.005 0 0 0 0.005 0 0
|
||||
2.35352 0.005 0 0 0 0.005 0 0
|
||||
2.37305 0.005 0 0 0 0.005 0 0
|
||||
2.39258 0.005 0 0 0 0.005 0 0
|
||||
2.41211 0.005 0 0 0 0.005 0 0
|
||||
2.43164 0.005 0 0 0 0.005 0 0
|
||||
2.45117 0.005 0 0 0 0.005 0 0
|
||||
2.4707 0.005 0 0 0 0.005 0 0
|
||||
2.49023 0.005 0 0 0 0.005 0 0
|
||||
2.50977 0.005 0 0 0 0.005 0 0
|
||||
2.5293 0.005 0 0 0 0.005 0 0
|
||||
2.54883 0.005 0 0 0 0.005 0 0
|
||||
2.56836 0.005 0 0 0 0.005 0 0
|
||||
2.58789 0.005 0 0 0 0.005 0 0
|
||||
2.60742 0.005 0 0 0 0.005 0 0
|
||||
2.62695 0.005 0 0 0 0.005 0 0
|
||||
2.64648 0.005 0 0 0 0.005 0 0
|
||||
2.66602 0.005 0 0 0 0.005 0 0
|
||||
2.68555 0.005 0 0 0 0.005 0 0
|
||||
2.70508 0.005 0 0 0 0.005 0 0
|
||||
2.72461 0.005 0 0 0 0.005 0 0
|
||||
2.74414 0.005 0 0 0 0.005 0 0
|
||||
2.76367 0.005 0 0 0 0.005 0 0
|
||||
2.7832 0.005 0 0 0 0.005 0 0
|
||||
2.80273 0.005 0 0 0 0.005 0 0
|
||||
2.82227 0.005 0 0 0 0.005 0 0
|
||||
2.8418 0.005 0 0 0 0.005 0 0
|
||||
2.86133 0.005 0 0 0 0.005 0 0
|
||||
2.88086 0.005 0 0 0 0.005 0 0
|
||||
2.90039 0.005 0 0 0 0.005 0 0
|
||||
2.91992 0.005 0 0 0 0.005 0 0
|
||||
2.93945 0.005 0 0 0 0.005 0 0
|
||||
2.95898 0.005 0 0 0 0.005 0 0
|
||||
2.97852 0.005 0 0 0 0.005 0 0
|
||||
2.99805 0.005 0 0 0 0.005 0 0
|
||||
3.01758 0.005 0 0 0 0.005 0 0
|
||||
3.03711 0.005 0 0 0 0.005 0 0
|
||||
3.05664 0.005 0 0 0 0.005 0 0
|
||||
3.07617 0.005 0 0 0 0.005 0 0
|
||||
3.0957 0.005 0 0 0 0.005 0 0
|
||||
3.11523 0.005 0 0 0 0.005 0 0
|
||||
3.13477 0.005 0 0 0 0.005 0 0
|
||||
3.1543 0.005 0 0 0 0.005 0 0
|
||||
3.17383 0.005 0 0 0 0.005 0 0
|
||||
3.19336 0.005 0 0 0 0.005 0 0
|
||||
3.21289 0.005 0 0 0 0.005 0 0
|
||||
3.23242 0.005 0 0 0 0.005 0 0
|
||||
3.25195 0.005 0 0 0 0.005 0 0
|
||||
3.27148 0.005 0 0 0 0.005 0 0
|
||||
3.29102 0.005 0 0 0 0.005 0 0
|
||||
3.31055 0.005 0 0 0 0.005 0 0
|
||||
3.33008 0.005 0 0 0 0.005 0 0
|
||||
3.34961 0.005 0 0 0 0.005 0 0
|
||||
3.36914 0.005 0 0 0 0.005 0 0
|
||||
3.38867 0.005 0 0 0 0.005 0 0
|
||||
3.4082 0.005 0 0 0 0.005 0 0
|
||||
3.42773 0.005 0 0 0 0.005 0 0
|
||||
3.44727 0.005 0 0 0 0.005 0 0
|
||||
3.4668 0.005 0 0 0 0.005 0 0
|
||||
3.48633 0.005 0 0 0 0.005 0 0
|
||||
3.50586 0.005 0 0 0 0.005 0 0
|
||||
3.52539 0.005 0 0 0 0.005 0 0
|
||||
3.54492 0.005 0 0 0 0.005 0 0
|
||||
3.56445 0.005 0 0 0 0.005 0 0
|
||||
3.58398 0.005 0 0 0 0.005 0 0
|
||||
3.60352 0.005 0 0 0 0.005 0 0
|
||||
3.62305 0.005 0 0 0 0.005 0 0
|
||||
3.64258 0.005 0 0 0 0.005 0 0
|
||||
3.66211 0.005 0 0 0 0.005 0 0
|
||||
3.68164 0.00497376 0.00116386 0 5.78874e-006 0.00497376 0.00526893 0.000150603
|
||||
3.70117 0.00492501 0.00333399 0 1.642e-005 0.00492501 0.0151679 0.000301781
|
||||
3.7207 0.00487651 0.00550413 0 2.6841e-005 0.00487651 0.0251652 0.00041877
|
||||
3.74023 0.00482825 0.00767427 0 3.70533e-005 0.00482825 0.0352621 0.000519192
|
||||
3.75977 0.00478022 0.00984441 0 4.70585e-005 0.00478022 0.0454602 0.000608885
|
||||
3.7793 0.00473244 0.0120146 0 5.68581e-005 0.00473244 0.055761 0.000690725
|
||||
3.79883 0.00468489 0.0141847 0 6.64537e-005 0.00468489 0.0661661 0.000766402
|
||||
3.81836 0.00463759 0.0163548 0 7.58469e-005 0.00463759 0.0766771 0.00083702
|
||||
3.83789 0.00459052 0.018525 0 8.50393e-005 0.00459052 0.0872955 0.000903351
|
||||
3.85742 0.0045437 0.0206951 0 9.40323e-005 0.0045437 0.0980231 0.000965966
|
||||
3.87695 0.00449711 0.0228652 0 0.000102828 0.00449711 0.108862 0.0010253
|
||||
3.89648 0.00445077 0.0250354 0 0.000111427 0.00445077 0.119813 0.00108169
|
||||
3.91602 0.00440467 0.0272055 0 0.000119831 0.00440467 0.130878 0.00113542
|
||||
3.93555 0.0043588 0.0293757 0 0.000128043 0.0043588 0.142059 0.00118672
|
||||
3.95508 0.00431318 0.0315458 0 0.000136063 0.00431318 0.153359 0.00123577
|
||||
3.97461 0.00426779 0.0337159 0 0.000143893 0.00426779 0.164778 0.00128273
|
||||
3.99414 0.00422265 0.0358861 0 0.000151534 0.00422265 0.176319 0.00132775
|
||||
4.01367 0.00417774 0.0380562 0 0.000158989 0.00417774 0.187984 0.00137095
|
||||
4.0332 0.00413308 0.0402264 0 0.000166259 0.00413308 0.199774 0.00141243
|
||||
4.05273 0.00408866 0.0423965 0 0.000173345 0.00408866 0.211692 0.00145228
|
||||
4.07227 0.00404447 0.0445666 0 0.000180248 0.00404447 0.22374 0.00149059
|
||||
4.0918 0.00400053 0.0467368 0 0.000186972 0.00400053 0.23592 0.00152743
|
||||
4.11133 0.00395682 0.0489069 0 0.000193516 0.00395682 0.248235 0.00156287
|
||||
4.13086 0.00391336 0.0510771 0 0.000199883 0.00391336 0.260685 0.00159696
|
||||
4.15039 0.00387014 0.0532472 0 0.000206074 0.00387014 0.273274 0.00162977
|
||||
4.16992 0.00382715 0.0554173 0 0.000212091 0.00382715 0.286005 0.00166134
|
||||
4.18945 0.00378441 0.0575875 0 0.000217935 0.00378441 0.298878 0.00169172
|
||||
4.20898 0.0037419 0.0597576 0 0.000223607 0.0037419 0.311898 0.00172095
|
||||
4.22852 0.00369964 0.0619277 0 0.00022911 0.00369964 0.325066 0.00174907
|
||||
4.24805 0.00365762 0.0640979 0 0.000234446 0.00365762 0.338384 0.00177612
|
||||
4.26758 0.00361583 0.066268 0 0.000239614 0.00361583 0.351856 0.00180213
|
||||
4.28711 0.00357429 0.0684382 0 0.000244618 0.00357429 0.365484 0.00182713
|
||||
4.30664 0.00353299 0.0706083 0 0.000249458 0.00353299 0.379272 0.00185115
|
||||
4.32617 0.00349192 0.0727784 0 0.000254137 0.00349192 0.39322 0.00187423
|
||||
4.3457 0.0034511 0.0749486 0 0.000258655 0.0034511 0.407334 0.00189638
|
||||
4.36523 0.00341052 0.0771187 0 0.000263015 0.00341052 0.421614 0.00191762
|
||||
4.38477 0.00337017 0.0792889 0 0.000267217 0.00337017 0.436065 0.001938
|
||||
4.4043 0.00333007 0.081459 0 0.000271264 0.00333007 0.45069 0.00195752
|
||||
4.42383 0.00329021 0.0836291 0 0.000275157 0.00329021 0.465491 0.0019762
|
||||
4.44336 0.00325058 0.0857993 0 0.000278898 0.00325058 0.480472 0.00199407
|
||||
4.46289 0.0032112 0.0879694 0 0.000282487 0.0032112 0.495637 0.00201114
|
||||
4.48242 0.00317206 0.0901396 0 0.000285928 0.00317206 0.510988 0.00202744
|
||||
4.50195 0.00313315 0.0923097 0 0.00028922 0.00313315 0.526529 0.00204297
|
||||
4.52148 0.00309449 0.0944798 0 0.000292367 0.00309449 0.542263 0.00205776
|
||||
4.54102 0.00305607 0.09665 0 0.000295369 0.00305607 0.558195 0.00207183
|
||||
4.56055 0.00301788 0.0988201 0 0.000298228 0.00301788 0.574327 0.00208517
|
||||
4.58008 0.00297994 0.10099 0 0.000300945 0.00297994 0.590665 0.00209782
|
||||
4.59961 0.00294224 0.10316 0 0.000303522 0.00294224 0.607211 0.00210978
|
||||
4.61914 0.00290477 0.105331 0 0.000305961 0.00290477 0.62397 0.00212107
|
||||
4.63867 0.00286755 0.107501 0 0.000308264 0.00286755 0.640945 0.0021317
|
||||
4.6582 0.00283057 0.109671 0 0.000310431 0.00283057 0.658142 0.00214167
|
||||
4.67773 0.00279383 0.111841 0 0.000312464 0.00279383 0.675564 0.00215102
|
||||
4.69727 0.00275732 0.114011 0 0.000314365 0.00275732 0.693216 0.00215973
|
||||
4.7168 0.00272106 0.116181 0 0.000316136 0.00272106 0.711103 0.00216784
|
||||
4.73633 0.00268504 0.118351 0 0.000317778 0.00268504 0.729228 0.00217533
|
||||
4.75586 0.00264925 0.120521 0 0.000319292 0.00264925 0.747598 0.00218224
|
||||
4.77539 0.00261371 0.122692 0 0.00032068 0.00261371 0.766217 0.00218856
|
||||
4.79492 0.00257841 0.124862 0 0.000321945 0.00257841 0.785089 0.00219431
|
||||
4.81445 0.00254335 0.127032 0 0.000323086 0.00254335 0.804221 0.00219949
|
||||
4.83398 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.85352 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.87305 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.89258 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.91211 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.93164 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.95117 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.9707 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
4.99023 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.00977 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.0293 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.04883 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.06836 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.08789 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.10742 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.12695 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.14648 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.16602 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.18555 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.20508 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.22461 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.24414 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.26367 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.2832 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.30273 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.32227 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.3418 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.36133 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.38086 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.40039 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.41992 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.43945 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.45898 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.47852 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.49805 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.51758 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.53711 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.55664 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.57617 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.5957 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.61523 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.63477 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.6543 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.67383 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.69336 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.71289 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.73242 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.75195 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.77148 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.79102 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.81055 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.83008 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.84961 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.86914 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.88867 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.9082 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.92773 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.94727 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.9668 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
5.98633 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.00586 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.02539 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.04492 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.06445 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.08398 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.10352 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.12305 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.14258 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.16211 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.18164 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.20117 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.2207 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.24023 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.25977 0.00253936 0.127279 0 0.000323208 0.00253936 0.806419 0.00220005
|
||||
6.2793 0.001 0 0 0 0.001 0 0
|
||||
6.29883 0.001 0 0 0 0.001 0 0
|
||||
6.31836 0.001 0 0 0 0.001 0 0
|
||||
6.33789 0.001 0 0 0 0.001 0 0
|
||||
6.35742 0.001 0 0 0 0.001 0 0
|
||||
6.37695 0.001 0 0 0 0.001 0 0
|
||||
6.39648 0.001 0 0 0 0.001 0 0
|
||||
6.41602 0.001 0 0 0 0.001 0 0
|
||||
6.43555 0.001 0 0 0 0.001 0 0
|
||||
6.45508 0.001 0 0 0 0.001 0 0
|
||||
6.47461 0.001 0 0 0 0.001 0 0
|
||||
6.49414 0.001 0 0 0 0.001 0 0
|
||||
6.51367 0.001 0 0 0 0.001 0 0
|
||||
6.5332 0.001 0 0 0 0.001 0 0
|
||||
6.55273 0.001 0 0 0 0.001 0 0
|
||||
6.57227 0.001 0 0 0 0.001 0 0
|
||||
6.5918 0.001 0 0 0 0.001 0 0
|
||||
6.61133 0.001 0 0 0 0.001 0 0
|
||||
6.63086 0.001 0 0 0 0.001 0 0
|
||||
6.65039 0.001 0 0 0 0.001 0 0
|
||||
6.66992 0.001 0 0 0 0.001 0 0
|
||||
6.68945 0.001 0 0 0 0.001 0 0
|
||||
6.70898 0.001 0 0 0 0.001 0 0
|
||||
6.72852 0.001 0 0 0 0.001 0 0
|
||||
6.74805 0.001 0 0 0 0.001 0 0
|
||||
6.76758 0.001 0 0 0 0.001 0 0
|
||||
6.78711 0.001 0 0 0 0.001 0 0
|
||||
6.80664 0.001 0 0 0 0.001 0 0
|
||||
6.82617 0.001 0 0 0 0.001 0 0
|
||||
6.8457 0.001 0 0 0 0.001 0 0
|
||||
6.86523 0.001 0 0 0 0.001 0 0
|
||||
6.88477 0.001 0 0 0 0.001 0 0
|
||||
6.9043 0.001 0 0 0 0.001 0 0
|
||||
6.92383 0.001 0 0 0 0.001 0 0
|
||||
6.94336 0.001 0 0 0 0.001 0 0
|
||||
6.96289 0.001 0 0 0 0.001 0 0
|
||||
6.98242 0.001 0 0 0 0.001 0 0
|
||||
7.00195 0.001 0 0 0 0.001 0 0
|
||||
7.02148 0.001 0 0 0 0.001 0 0
|
||||
7.04102 0.001 0 0 0 0.001 0 0
|
||||
7.06055 0.001 0 0 0 0.001 0 0
|
||||
7.08008 0.001 0 0 0 0.001 0 0
|
||||
7.09961 0.001 0 0 0 0.001 0 0
|
||||
7.11914 0.001 0 0 0 0.001 0 0
|
||||
7.13867 0.001 0 0 0 0.001 0 0
|
||||
7.1582 0.001 0 0 0 0.001 0 0
|
||||
7.17773 0.001 0 0 0 0.001 0 0
|
||||
7.19727 0.001 0 0 0 0.001 0 0
|
||||
7.2168 0.001 0 0 0 0.001 0 0
|
||||
7.23633 0.001 0 0 0 0.001 0 0
|
||||
7.25586 0.001 0 0 0 0.001 0 0
|
||||
7.27539 0.001 0 0 0 0.001 0 0
|
||||
7.29492 0.001 0 0 0 0.001 0 0
|
||||
7.31445 0.001 0 0 0 0.001 0 0
|
||||
7.33398 0.001 0 0 0 0.001 0 0
|
||||
7.35352 0.001 0 0 0 0.001 0 0
|
||||
7.37305 0.001 0 0 0 0.001 0 0
|
||||
7.39258 0.001 0 0 0 0.001 0 0
|
||||
7.41211 0.001 0 0 0 0.001 0 0
|
||||
7.43164 0.001 0 0 0 0.001 0 0
|
||||
7.45117 0.001 0 0 0 0.001 0 0
|
||||
7.4707 0.001 0 0 0 0.001 0 0
|
||||
7.49023 0.001 0 0 0 0.001 0 0
|
||||
7.50977 0.001 0 0 0 0.001 0 0
|
||||
7.5293 0.001 0 0 0 0.001 0 0
|
||||
7.54883 0.001 0 0 0 0.001 0 0
|
||||
7.56836 0.001 0 0 0 0.001 0 0
|
||||
7.58789 0.001 0 0 0 0.001 0 0
|
||||
7.60742 0.001 0 0 0 0.001 0 0
|
||||
7.62695 0.001 0 0 0 0.001 0 0
|
||||
7.64648 0.001 0 0 0 0.001 0 0
|
||||
7.66602 0.001 0 0 0 0.001 0 0
|
||||
7.68555 0.001 0 0 0 0.001 0 0
|
||||
7.70508 0.001 0 0 0 0.001 0 0
|
||||
7.72461 0.001 0 0 0 0.001 0 0
|
||||
7.74414 0.001 0 0 0 0.001 0 0
|
||||
7.76367 0.001 0 0 0 0.001 0 0
|
||||
7.7832 0.001 0 0 0 0.001 0 0
|
||||
7.80273 0.001 0 0 0 0.001 0 0
|
||||
7.82227 0.001 0 0 0 0.001 0 0
|
||||
7.8418 0.001 0 0 0 0.001 0 0
|
||||
7.86133 0.001 0 0 0 0.001 0 0
|
||||
7.88086 0.001 0 0 0 0.001 0 0
|
||||
7.90039 0.001 0 0 0 0.001 0 0
|
||||
7.91992 0.001 0 0 0 0.001 0 0
|
||||
7.93945 0.001 0 0 0 0.001 0 0
|
||||
7.95898 0.001 0 0 0 0.001 0 0
|
||||
7.97852 0.001 0 0 0 0.001 0 0
|
||||
7.99805 0.001 0 0 0 0.001 0 0
|
||||
8.01758 0.001 0 0 0 0.001 0 0
|
||||
8.03711 0.001 0 0 0 0.001 0 0
|
||||
8.05664 0.001 0 0 0 0.001 0 0
|
||||
8.07617 0.001 0 0 0 0.001 0 0
|
||||
8.0957 0.001 0 0 0 0.001 0 0
|
||||
8.11523 0.001 0 0 0 0.001 0 0
|
||||
8.13477 0.001 0 0 0 0.001 0 0
|
||||
8.1543 0.001 0 0 0 0.001 0 0
|
||||
8.17383 0.001 0 0 0 0.001 0 0
|
||||
8.19336 0.001 0 0 0 0.001 0 0
|
||||
8.21289 0.001 0 0 0 0.001 0 0
|
||||
8.23242 0.001 0 0 0 0.001 0 0
|
||||
8.25195 0.001 0 0 0 0.001 0 0
|
||||
8.27148 0.001 0 0 0 0.001 0 0
|
||||
8.29102 0.001 0 0 0 0.001 0 0
|
||||
8.31055 0.001 0 0 0 0.001 0 0
|
||||
8.33008 0.001 0 0 0 0.001 0 0
|
||||
8.34961 0.001 0 0 0 0.001 0 0
|
||||
8.36914 0.001 0 0 0 0.001 0 0
|
||||
8.38867 0.001 0 0 0 0.001 0 0
|
||||
8.4082 0.001 0 0 0 0.001 0 0
|
||||
8.42773 0.001 0 0 0 0.001 0 0
|
||||
8.44727 0.001 0 0 0 0.001 0 0
|
||||
8.4668 0.001 0 0 0 0.001 0 0
|
||||
8.48633 0.001 0 0 0 0.001 0 0
|
||||
8.50586 0.001 0 0 0 0.001 0 0
|
||||
8.52539 0.001 0 0 0 0.001 0 0
|
||||
8.54492 0.001 0 0 0 0.001 0 0
|
||||
8.56445 0.001 0 0 0 0.001 0 0
|
||||
8.58398 0.001 0 0 0 0.001 0 0
|
||||
8.60352 0.001 0 0 0 0.001 0 0
|
||||
8.62305 0.001 0 0 0 0.001 0 0
|
||||
8.64258 0.001 0 0 0 0.001 0 0
|
||||
8.66211 0.001 0 0 0 0.001 0 0
|
||||
8.68164 0.001 0 0 0 0.001 0 0
|
||||
8.70117 0.001 0 0 0 0.001 0 0
|
||||
8.7207 0.001 0 0 0 0.001 0 0
|
||||
8.74023 0.001 0 0 0 0.001 0 0
|
||||
8.75977 0.001 0 0 0 0.001 0 0
|
||||
8.7793 0.001 0 0 0 0.001 0 0
|
||||
8.79883 0.001 0 0 0 0.001 0 0
|
||||
8.81836 0.001 0 0 0 0.001 0 0
|
||||
8.83789 0.001 0 0 0 0.001 0 0
|
||||
8.85742 0.001 0 0 0 0.001 0 0
|
||||
8.87695 0.001 0 0 0 0.001 0 0
|
||||
8.89648 0.001 0 0 0 0.001 0 0
|
||||
8.91602 0.001 0 0 0 0.001 0 0
|
||||
8.93555 0.001 0 0 0 0.001 0 0
|
||||
8.95508 0.001 0 0 0 0.001 0 0
|
||||
8.97461 0.001 0 0 0 0.001 0 0
|
||||
8.99414 0.001 0 0 0 0.001 0 0
|
||||
9.01367 0.001 0 0 0 0.001 0 0
|
||||
9.0332 0.001 0 0 0 0.001 0 0
|
||||
9.05273 0.001 0 0 0 0.001 0 0
|
||||
9.07227 0.001 0 0 0 0.001 0 0
|
||||
9.0918 0.001 0 0 0 0.001 0 0
|
||||
9.11133 0.001 0 0 0 0.001 0 0
|
||||
9.13086 0.001 0 0 0 0.001 0 0
|
||||
9.15039 0.001 0 0 0 0.001 0 0
|
||||
9.16992 0.001 0 0 0 0.001 0 0
|
||||
9.18945 0.001 0 0 0 0.001 0 0
|
||||
9.20898 0.001 0 0 0 0.001 0 0
|
||||
9.22852 0.001 0 0 0 0.001 0 0
|
||||
9.24805 0.001 0 0 0 0.001 0 0
|
||||
9.26758 0.001 0 0 0 0.001 0 0
|
||||
9.28711 0.001 0 0 0 0.001 0 0
|
||||
9.30664 0.001 0 0 0 0.001 0 0
|
||||
9.32617 0.001 0 0 0 0.001 0 0
|
||||
9.3457 0.001 0 0 0 0.001 0 0
|
||||
9.36523 0.001 0 0 0 0.001 0 0
|
||||
9.38477 0.001 0 0 0 0.001 0 0
|
||||
9.4043 0.001 0 0 0 0.001 0 0
|
||||
9.42383 0.001 0 0 0 0.001 0 0
|
||||
9.44336 0.001 0 0 0 0.001 0 0
|
||||
9.46289 0.001 0 0 0 0.001 0 0
|
||||
9.48242 0.001 0 0 0 0.001 0 0
|
||||
9.50195 0.001 0 0 0 0.001 0 0
|
||||
9.52148 0.001 0 0 0 0.001 0 0
|
||||
9.54102 0.001 0 0 0 0.001 0 0
|
||||
9.56055 0.001 0 0 0 0.001 0 0
|
||||
9.58008 0.001 0 0 0 0.001 0 0
|
||||
9.59961 0.001 0 0 0 0.001 0 0
|
||||
9.61914 0.001 0 0 0 0.001 0 0
|
||||
9.63867 0.001 0 0 0 0.001 0 0
|
||||
9.6582 0.001 0 0 0 0.001 0 0
|
||||
9.67773 0.001 0 0 0 0.001 0 0
|
||||
9.69727 0.001 0 0 0 0.001 0 0
|
||||
9.7168 0.001 0 0 0 0.001 0 0
|
||||
9.73633 0.001 0 0 0 0.001 0 0
|
||||
9.75586 0.001 0 0 0 0.001 0 0
|
||||
9.77539 0.001 0 0 0 0.001 0 0
|
||||
9.79492 0.001 0 0 0 0.001 0 0
|
||||
9.81445 0.001 0 0 0 0.001 0 0
|
||||
9.83398 0.001 0 0 0 0.001 0 0
|
||||
9.85352 0.001 0 0 0 0.001 0 0
|
||||
9.87305 0.001 0 0 0 0.001 0 0
|
||||
9.89258 0.001 0 0 0 0.001 0 0
|
||||
9.91211 0.001 0 0 0 0.001 0 0
|
||||
9.93164 0.001 0 0 0 0.001 0 0
|
||||
9.95117 0.001 0 0 0 0.001 0 0
|
||||
9.9707 0.001 0 0 0 0.001 0 0
|
||||
9.99023 0.001 0 0 0 0.001 0 0
|
Can't render this file because it has a wrong number of fields in line 18.
|
Loading…
x
Reference in New Issue
Block a user