Refactoring - broke 2nd order

This commit is contained in:
André R. Brodtkorb 2018-11-09 11:46:34 +01:00
parent 812ebcc9ba
commit e98ae0a979
21 changed files with 499023 additions and 611 deletions

File diff suppressed because one or more lines are too long

View File

@ -217,8 +217,12 @@ class CudaArray2D:
copy.set_dst_device(self.data.gpudata)
#Set offsets of upload in destination
copy.dst_x_in_bytes = x_halo*self.data.strides[1]
copy.dst_y = y_halo
# This handles the cases where cpu_data contains ghost cell values
# and also when it does not
x_offset = (nx_halo - cpu_data.shape[1]) // 2
y_offset = (ny_halo - cpu_data.shape[0]) // 2
copy.dst_x_in_bytes = x_offset*self.data.strides[1]
copy.dst_y = y_offset
#Set destination pitch
copy.dst_pitch = self.data.strides[0]

View File

@ -98,20 +98,25 @@ class EE2D_KP07_dimsplit (BaseSimulator):
2, 2, \
[None, None, None, None])
def simulate(self, t_end):
return super().simulateDimsplit(t_end)
def step(self, dt):
if (self.order == 1):
self.substepDimsplit(dt, substep=(self.nt % 2))
elif (self.order == 2):
self.substepDimsplit(dt, substep=0)
self.substepDimsplit(dt, substep=1)
else:
raise(NotImplementedError("Order {:d} is not implemented".format(self.order)))
self.t += dt
self.nt += 1
def stepEuler(self, dt):
return self.stepDimsplitXY(dt)
def stepDimsplitXY(self, dt):
def substepDimsplit(self, dt, substep):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
self.gamma, \
self.theta, \
Simulator.stepOrderToCodedInt(step=0, order=self.order), \
Simulator.stepOrderToCodedInt(step=substep, order=self.order), \
self.boundary_conditions, \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
@ -122,29 +127,6 @@ class EE2D_KP07_dimsplit (BaseSimulator):
self.u1[2].data.gpudata, self.u1[2].data.strides[0], \
self.u1[3].data.gpudata, self.u1[3].data.strides[0])
self.u0, self.u1 = self.u1, self.u0
self.t += dt
self.nt += 1
def stepDimsplitYX(self, dt):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
self.gamma, \
self.theta, \
Simulator.stepOrderToCodedInt(step=1, order=self.order), \
self.boundary_conditions, \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \
self.u0[3].data.gpudata, self.u0[3].data.strides[0], \
self.u1[0].data.gpudata, self.u1[0].data.strides[0], \
self.u1[1].data.gpudata, self.u1[1].data.strides[0], \
self.u1[2].data.gpudata, self.u1[2].data.strides[0], \
self.u1[3].data.gpudata, self.u1[3].data.strides[0])
self.u0, self.u1 = self.u1, self.u0
self.t += dt
self.nt += 1
def download(self):
return self.u0.download(self.stream)

View File

@ -22,6 +22,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#Import packages we need
from GPUSimulators import Simulator, Common
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
import numpy as np
@ -57,6 +58,7 @@ class FORCE (Simulator.BaseSimulator):
nx, ny, \
dx, dy, dt, \
g, \
boundary_conditions=BoundaryCondition(), \
block_width=16, block_height=16):
# Call super constructor
@ -65,10 +67,11 @@ class FORCE (Simulator.BaseSimulator):
dx, dy, dt, \
block_width, block_height);
self.g = np.float32(g)
self.boundary_conditions = boundary_conditions.asCodedInt()
#Get kernels
self.kernel = context.get_prepared_kernel("cuda/SWE2D_FORCE.cu", "FORCEKernel", \
"iiffffPiPiPiPiPiPi", \
"iiffffiPiPiPiPiPiPi", \
defines={
'BLOCK_WIDTH': self.block_size[0],
'BLOCK_HEIGHT': self.block_size[1]
@ -89,14 +92,12 @@ class FORCE (Simulator.BaseSimulator):
1, 1, \
[None, None, None])
def simulate(self, t_end):
return super().simulateEuler(t_end)
def stepEuler(self, dt):
def step(self, dt):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
self.boundary_conditions, \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \

View File

@ -21,6 +21,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#Import packages we need
from GPUSimulators import Simulator, Common
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
import numpy as np
@ -52,6 +53,7 @@ class HLL (Simulator.BaseSimulator):
nx, ny, \
dx, dy, dt, \
g, \
boundary_conditions=BoundaryCondition(), \
block_width=16, block_height=16):
# Call super constructor
@ -60,10 +62,11 @@ class HLL (Simulator.BaseSimulator):
dx, dy, dt, \
block_width, block_height);
self.g = np.float32(g)
self.boundary_conditions = boundary_conditions.asCodedInt()
#Get kernels
self.kernel = context.get_prepared_kernel("cuda/SWE2D_HLL.cu", "HLLKernel", \
"iiffffPiPiPiPiPiPi", \
"iiffffiPiPiPiPiPiPi", \
defines={
'BLOCK_WIDTH': self.block_size[0],
'BLOCK_HEIGHT': self.block_size[1]
@ -84,14 +87,12 @@ class HLL (Simulator.BaseSimulator):
1, 1, \
[None, None, None])
def simulate(self, t_end):
return super().simulateEuler(t_end)
def stepEuler(self, dt):
def step(self, dt):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
self.boundary_conditions, \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \

View File

@ -21,6 +21,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#Import packages we need
from GPUSimulators import Simulator, Common
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
import numpy as np
@ -55,6 +56,8 @@ class HLL2 (Simulator.BaseSimulator):
dx, dy, dt, \
g, \
theta=1.8, \
order=2, \
boundary_conditions=BoundaryCondition(), \
block_width=16, block_height=16):
# Call super constructor
@ -63,12 +66,13 @@ class HLL2 (Simulator.BaseSimulator):
dx, dy, dt, \
block_width, block_height);
self.g = np.float32(g)
self.theta = np.float32(theta)
self.order = np.int32(order)
self.boundary_conditions = boundary_conditions.asCodedInt()
#Get kernels
self.kernel = context.get_prepared_kernel("cuda/SWE2D_HLL2.cu", "HLL2Kernel", \
"iifffffiPiPiPiPiPiPi", \
"iifffffiiPiPiPiPiPiPi", \
defines={
'BLOCK_WIDTH': self.block_size[0],
'BLOCK_HEIGHT': self.block_size[1]
@ -89,19 +93,25 @@ class HLL2 (Simulator.BaseSimulator):
2, 2, \
[None, None, None])
def simulate(self, t_end):
return super().simulateDimsplit(t_end)
def step(self, dt):
if (self.order == 1):
self.substepDimsplit(dt, substep=(self.nt % 2))
elif (self.order == 2):
self.substepDimsplit(dt, substep=0)
self.substepDimsplit(dt, substep=1)
else:
raise(NotImplementedError("Order {:d} is not implemented".format(self.order)))
self.t += dt
self.nt += 1
def stepEuler(self, dt):
return self.stepDimsplitXY(dt)
def stepDimsplitXY(self, dt):
def substepDimsplit(self, dt, substep):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
self.theta, \
np.int32(0), \
Simulator.stepOrderToCodedInt(step=substep, order=self.order), \
self.boundary_conditions, \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \
@ -109,25 +119,6 @@ class HLL2 (Simulator.BaseSimulator):
self.u1[1].data.gpudata, self.u1[1].data.strides[0], \
self.u1[2].data.gpudata, self.u1[2].data.strides[0])
self.u0, self.u1 = self.u1, self.u0
self.t += dt
self.nt += 1
def stepDimsplitYX(self, dt):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
self.theta, \
np.int32(1), \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \
self.u1[0].data.gpudata, self.u1[0].data.strides[0], \
self.u1[1].data.gpudata, self.u1[1].data.strides[0], \
self.u1[2].data.gpudata, self.u1[2].data.strides[0])
self.u0, self.u1 = self.u1, self.u0
self.t += dt
self.nt += 1
def download(self):
return self.u0.download(self.stream)

View File

@ -26,6 +26,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#Import packages we need
from GPUSimulators import Simulator, Common
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
import numpy as np
@ -56,6 +57,8 @@ class KP07 (Simulator.BaseSimulator):
dx, dy, dt, \
g, \
theta=1.3, \
order=2, \
boundary_conditions=BoundaryCondition(), \
block_width=16, block_height=16):
# Call super constructor
@ -65,10 +68,12 @@ class KP07 (Simulator.BaseSimulator):
block_width, block_height);
self.g = np.float32(g)
self.theta = np.float32(theta)
self.order = np.int32(order)
self.boundary_conditions = boundary_conditions.asCodedInt()
#Get kernels
self.kernel = context.get_prepared_kernel("cuda/SWE2D_KP07.cu", "KP07Kernel", \
"iifffffiPiPiPiPiPiPi", \
"iifffffiiPiPiPiPiPiPi", \
defines={
'BLOCK_WIDTH': self.block_size[0],
'BLOCK_HEIGHT': self.block_size[1]
@ -89,8 +94,18 @@ class KP07 (Simulator.BaseSimulator):
2, 2, \
[None, None, None])
def simulate(self, t_end):
return super().simulateRK(t_end, 2)
def step(self, dt):
if (self.order == 1):
self.substepRK(dt, substep=0)
elif (self.order == 2):
self.substepRK(dt, substep=0)
self.substepRK(dt, substep=1)
else:
raise(NotImplementedError("Order {:d} is not implemented".format(self.order)))
self.t += dt
self.nt += 1
def substepRK(self, dt, substep):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
@ -98,7 +113,8 @@ class KP07 (Simulator.BaseSimulator):
self.dx, self.dy, dt, \
self.g, \
self.theta, \
np.int32(substep), \
Simulator.stepOrderToCodedInt(step=substep, order=self.order), \
self.boundary_conditions, \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \
@ -107,18 +123,5 @@ class KP07 (Simulator.BaseSimulator):
self.u1[2].data.gpudata, self.u1[2].data.strides[0])
self.u0, self.u1 = self.u1, self.u0
def stepEuler(self, dt):
self.substepRK(dt, 0)
self.t += dt
self.nt += 1
def stepRK(self, dt, order):
if (order != 2):
raise NotImplementedError("Only second order implemented")
self.substepRK(dt, 0)
self.substepRK(dt, 1)
self.t += dt
self.nt += 1
def download(self):
return self.u0.download(self.stream)

View File

@ -26,6 +26,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#Import packages we need
from GPUSimulators import Simulator, Common
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
import numpy as np
@ -56,6 +57,8 @@ class KP07_dimsplit (Simulator.BaseSimulator):
dx, dy, dt, \
g, \
theta=1.3, \
order=2, \
boundary_conditions=BoundaryCondition(), \
block_width=16, block_height=16):
# Call super constructor
@ -65,10 +68,12 @@ class KP07_dimsplit (Simulator.BaseSimulator):
block_width, block_height)
self.g = np.float32(g)
self.theta = np.float32(theta)
self.order = np.int32(order)
self.boundary_conditions = boundary_conditions.asCodedInt()
#Get kernels
self.kernel = context.get_prepared_kernel("cuda/SWE2D_KP07_dimsplit.cu", "KP07DimsplitKernel", \
"iifffffiPiPiPiPiPiPi", \
"iifffffiiPiPiPiPiPiPi", \
defines={
'BLOCK_WIDTH': self.block_size[0],
'BLOCK_HEIGHT': self.block_size[1]
@ -89,19 +94,25 @@ class KP07_dimsplit (Simulator.BaseSimulator):
2, 2, \
[None, None, None])
def simulate(self, t_end):
return super().simulateDimsplit(t_end)
def step(self, dt):
if (self.order == 1):
self.substepDimsplit(dt, substep=(self.nt % 2))
elif (self.order == 2):
self.substepDimsplit(dt, substep=0)
self.substepDimsplit(dt, substep=1)
else:
raise(NotImplementedError("Order {:d} is not implemented".format(self.order)))
self.t += dt
self.nt += 1
def stepEuler(self, dt):
return self.stepDimsplitXY(dt)
def stepDimsplitXY(self, dt):
def substepDimsplit(self, dt, substep):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
self.theta, \
np.int32(0), \
Simulator.stepOrderToCodedInt(step=substep, order=self.order), \
self.boundary_conditions, \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \
@ -109,25 +120,6 @@ class KP07_dimsplit (Simulator.BaseSimulator):
self.u1[1].data.gpudata, self.u1[1].data.strides[0], \
self.u1[2].data.gpudata, self.u1[2].data.strides[0])
self.u0, self.u1 = self.u1, self.u0
self.t += dt
self.nt += 1
def stepDimsplitYX(self, dt):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
self.theta, \
np.int32(1), \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \
self.u1[0].data.gpudata, self.u1[0].data.strides[0], \
self.u1[1].data.gpudata, self.u1[1].data.strides[0], \
self.u1[2].data.gpudata, self.u1[2].data.strides[0])
self.u0, self.u1 = self.u1, self.u0
self.t += dt
self.nt += 1
def download(self):
return self.u0.download(self.stream)

View File

@ -21,7 +21,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
#Import packages we need
from GPUSimulators import Simulator, Common, CudaContext
from GPUSimulators import Simulator, Common
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
import numpy as np
@ -53,6 +54,7 @@ class LxF (Simulator.BaseSimulator):
nx, ny, \
dx, dy, dt, \
g, \
boundary_conditions=BoundaryCondition(),
block_width=16, block_height=16):
# Call super constructor
@ -61,10 +63,11 @@ class LxF (Simulator.BaseSimulator):
dx, dy, dt, \
block_width, block_height);
self.g = np.float32(g)
self.boundary_conditions = boundary_conditions.asCodedInt()
# Get kernels
self.kernel = context.get_prepared_kernel("cuda/SWE2D_LxF.cu", "LxFKernel", \
"iiffffPiPiPiPiPiPi", \
"iiffffiPiPiPiPiPiPi", \
defines={
'BLOCK_WIDTH': self.block_size[0],
'BLOCK_HEIGHT': self.block_size[1]
@ -85,14 +88,12 @@ class LxF (Simulator.BaseSimulator):
1, 1, \
[None, None, None])
def simulate(self, t_end):
return super().simulateEuler(t_end)
def stepEuler(self, dt):
def step(self, dt):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
self.boundary_conditions, \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \

View File

@ -160,15 +160,8 @@ class BaseSimulator(object):
def simulate(self, t_end):
"""
Function which simulates forward in time using the default simulation type
"""
raise(exceptions.NotImplementedError("Needs to be implemented in subclass"))
def simulateEuler(self, t_end):
"""
Function which simulates t_end seconds using forward Euler
Requires that the stepEuler functionality is implemented in the subclasses
Function which simulates t_end seconds using the step function
Requires that the step() function is implemented in the subclasses
"""
# Compute number of timesteps to perform
n = int(t_end / self.dt + 1)
@ -176,15 +169,16 @@ class BaseSimulator(object):
printer = Common.ProgressPrinter(n)
for i in range(0, n):
# Compute timestep for "this" iteration
# Compute timestep for "this" iteration (i.e., shorten last timestep)
local_dt = np.float32(min(self.dt, t_end-i*self.dt))
# Stop if end reached (should not happen)
if (local_dt <= 0.0):
self.logger.warning("Timestep size {:d} is less than or equal to zero!".format(self.nt + i))
break
# Step with forward Euler
self.stepEuler(local_dt)
# Step forward in time
self.step(local_dt)
#Print info
print_string = printer.getPrintString(i)
@ -200,96 +194,10 @@ class BaseSimulator(object):
#self.logger.info("%s simulated %f seconds to %f with %d steps (Euler)", self, t_end, self.t, n)
return self.t, n
def simulateRK(self, t_end, order):
def step(self, dt):
"""
Function which simulates t_end seconds using Runge-Kutta 2
Requires that the stepRK functionality is implemented in the subclasses
"""
# Compute number of timesteps to perform
n = int(t_end / self.dt + 1)
printer = Common.ProgressPrinter(n)
for i in range(0, n):
# Compute timestep for "this" iteration
local_dt = np.float32(min(self.dt, t_end-i*self.dt))
# Stop if end reached (should not happen)
if (local_dt <= 0.0):
break
# Perform all the Runge-Kutta substeps
self.stepRK(local_dt, order)
#Print info
print_string = printer.getPrintString(i)
if (print_string):
self.logger.info("%s (RK2): %s", self, print_string)
try:
self.check()
except AssertionError as e:
e.args += ("Step={:d}, time={:f}".format(self.simSteps(), self.simTime()))
raise
return self.t, n
def simulateDimsplit(self, t_end):
"""
Function which simulates t_end seconds using second order dimensional splitting (XYYX)
Requires that the stepDimsplitX and stepDimsplitY functionality is implemented in the subclasses
"""
# Compute number of timesteps to perform
n = int(t_end / (2.0*self.dt) + 1)
printer = Common.ProgressPrinter(n)
for i in range(0, n):
# Compute timestep for "this" iteration
local_dt = np.float32(0.5*min(2*self.dt, t_end-2*i*self.dt))
# Stop if end reached (should not happen)
if (local_dt <= 0.0):
break
# Perform the dimensional split substeps
self.stepDimsplitXY(local_dt)
self.stepDimsplitYX(local_dt)
#Print info
print_string = printer.getPrintString(i)
if (print_string):
self.logger.info("%s (Dimsplit): %s", self, print_string)
try:
self.check()
except AssertionError as e:
e.args += ("Step={:d}, time={:f}".format(self.simSteps(), self.simTime()))
raise
return self.t, 2*n
def stepEuler(self, dt):
"""
Function which performs one single timestep of size dt using forward euler
"""
raise(NotImplementedError("Needs to be implemented in subclass"))
def stepRK(self, dt, substep):
"""
Function which performs one single timestep of size dt using Runge-Kutta
"""
raise(NotImplementedError("Needs to be implemented in subclass"))
def stepDimsplitXY(self, dt):
"""
Function which performs one single timestep of size dt using dimensional splitting
"""
raise(NotImplementedError("Needs to be implemented in subclass"))
def stepDimsplitYX(self, dt):
"""
Function which performs one single timestep of size dt using dimensional splitting
Function which performs one single timestep of size dt
"""
raise(NotImplementedError("Needs to be implemented in subclass"))

View File

@ -22,6 +22,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#Import packages we need
from GPUSimulators import Simulator, Common
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
import numpy as np
@ -51,6 +52,8 @@ class WAF (Simulator.BaseSimulator):
nx, ny, \
dx, dy, dt, \
g, \
order=2, \
boundary_conditions=BoundaryCondition(), \
block_width=16, block_height=16):
# Call super constructor
@ -59,10 +62,12 @@ class WAF (Simulator.BaseSimulator):
dx, dy, dt, \
block_width, block_height);
self.g = np.float32(g)
self.order = np.int32(order)
self.boundary_conditions = boundary_conditions.asCodedInt()
#Get kernels
self.kernel = context.get_prepared_kernel("cuda/SWE2D_WAF.cu", "WAFKernel", \
"iiffffiPiPiPiPiPiPi", \
"iiffffiiPiPiPiPiPiPi", \
defines={
'BLOCK_WIDTH': self.block_size[0],
'BLOCK_HEIGHT': self.block_size[1]
@ -83,18 +88,24 @@ class WAF (Simulator.BaseSimulator):
2, 2, \
[None, None, None])
def simulate(self, t_end):
return super().simulateDimsplit(t_end)
def step(self, dt):
if (self.order == 1):
self.substepDimsplit(dt, substep=(self.nt % 2))
elif (self.order == 2):
self.substepDimsplit(dt, substep=0)
self.substepDimsplit(dt, substep=1)
else:
raise(NotImplementedError("Order {:d} is not implemented".format(self.order)))
self.t += dt
self.nt += 1
def stepEuler(self, dt):
return self.stepDimsplitXY(dt)
def stepDimsplitXY(self, dt):
def substepDimsplit(self, dt, substep):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
np.int32(0), \
Simulator.stepOrderToCodedInt(step=substep, order=self.order), \
self.boundary_conditions, \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \
@ -102,24 +113,6 @@ class WAF (Simulator.BaseSimulator):
self.u1[1].data.gpudata, self.u1[1].data.strides[0], \
self.u1[2].data.gpudata, self.u1[2].data.strides[0])
self.u0, self.u1 = self.u1, self.u0
self.t += dt
self.nt += 1
def stepDimsplitYX(self, dt):
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream, \
self.nx, self.ny, \
self.dx, self.dy, dt, \
self.g, \
np.int32(1), \
self.u0[0].data.gpudata, self.u0[0].data.strides[0], \
self.u0[1].data.gpudata, self.u0[1].data.strides[0], \
self.u0[2].data.gpudata, self.u0[2].data.strides[0], \
self.u1[0].data.gpudata, self.u1[0].data.strides[0], \
self.u1[1].data.gpudata, self.u1[1].data.strides[0], \
self.u1[2].data.gpudata, self.u1[2].data.strides[0])
self.u0, self.u1 = self.u1, self.u0
self.t += dt
self.nt += 1
def download(self):
return self.u0.download(self.stream)

View File

@ -157,8 +157,8 @@ __global__ void KP07DimsplitKernel(
//Read into shared memory
readBlock<w, h, gc, 1, 1>( rho0_ptr_, rho0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, -1>(rho_u0_ptr_, rho_u0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, -1, 1>(rho_v0_ptr_, rho_v0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, -1, 1>(rho_u0_ptr_, rho_u0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, -1>(rho_v0_ptr_, rho_v0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, 1>( E0_ptr_, E0_pitch_, Q[3], nx_, ny_, boundary_conditions_);
__syncthreads();
@ -226,33 +226,16 @@ __global__ void KP07DimsplitKernel(
Q[3][j][i] -= g_*rho_v*dt_;
__syncthreads();
}
//This is the RK2-part
if (getOrder(step_order_) == 2) {
const int i = threadIdx.x + gc;
const int j = threadIdx.y + gc;
const int tx = blockDim.x*blockIdx.x + i;
const int ty = blockDim.y*blockIdx.y + j;
const float q1 = ((float*) ((char*) rho1_ptr_ + rho1_pitch_*ty))[tx];
const float q2 = ((float*) ((char*) rho_u1_ptr_ + rho_u1_pitch_*ty))[tx];
const float q3 = ((float*) ((char*) rho_v1_ptr_ + rho_v1_pitch_*ty))[tx];
const float q4 = ((float*) ((char*) E1_ptr_ + E1_pitch_*ty))[tx];
Q[0][j][i] = 0.5f*( Q[0][j][i] + q1 );
Q[1][j][i] = 0.5f*( Q[1][j][i] + q2 );
Q[2][j][i] = 0.5f*( Q[2][j][i] + q3 );
Q[3][j][i] = 0.5f*( Q[3][j][i] + q4 );
__syncthreads();
}
}
// Write to main memory for all internal cells
writeBlock<w, h, gc>( rho1_ptr_, rho1_pitch_, Q[0], nx_, ny_);
writeBlock<w, h, gc>(rho_u1_ptr_, rho_u1_pitch_, Q[1], nx_, ny_);
writeBlock<w, h, gc>(rho_v1_ptr_, rho_v1_pitch_, Q[2], nx_, ny_);
writeBlock<w, h, gc>( E1_ptr_, E1_pitch_, Q[3], nx_, ny_);
const int step = getStep(step_order_);
const int order = getOrder(step_order_);
writeBlock<w, h, gc>( rho1_ptr_, rho1_pitch_, Q[0], nx_, ny_, step, order);
writeBlock<w, h, gc>(rho_u1_ptr_, rho_u1_pitch_, Q[1], nx_, ny_, step, order);
writeBlock<w, h, gc>(rho_v1_ptr_, rho_v1_pitch_, Q[2], nx_, ny_, step, order);
writeBlock<w, h, gc>( E1_ptr_, E1_pitch_, Q[3], nx_, ny_, step, order);
}
} // extern "C"

View File

@ -87,6 +87,8 @@ __global__ void FORCEKernel(
float dx_, float dy_, float dt_,
float g_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
@ -106,15 +108,9 @@ __global__ void FORCEKernel(
__shared__ float F[3][h+2][w+2];
//Read into shared memory
readBlock<w, h, gc>( h0_ptr_, h0_pitch_, Q[0], nx_+2, ny_+2);
readBlock<w, h, gc>(hu0_ptr_, hu0_pitch_, Q[1], nx_+2, ny_+2);
readBlock<w, h, gc>(hv0_ptr_, hv0_pitch_, Q[2], nx_+2, ny_+2);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
readBlock<w, h, gc, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
__syncthreads();
//Compute flux along x, and evolve
@ -123,12 +119,6 @@ __global__ void FORCEKernel(
evolveF<w, h, gc, vars>(Q, F, dx_, dt_);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
//Compute flux along y, and evolve
computeFluxG(Q, F, g_, dy_, dt_);
__syncthreads();
@ -136,9 +126,9 @@ __global__ void FORCEKernel(
__syncthreads();
//Write to main memory
writeBlock<w, h, gc>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_);
writeBlock<w, h, gc>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_);
writeBlock<w, h, gc>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_);
writeBlock<w, h, gc>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
writeBlock<w, h, gc>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
writeBlock<w, h, gc>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
}
} // extern "C"

View File

@ -103,6 +103,8 @@ __global__ void HLLKernel(
float dx_, float dy_, float dt_,
float g_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
@ -123,16 +125,9 @@ __global__ void HLLKernel(
__shared__ float F[3][h+2][w+2];
//Read into shared memory
readBlock<w, h, gc>( h0_ptr_, h0_pitch_, Q[0], nx_+2, ny_+2);
readBlock<w, h, gc>(hu0_ptr_, hu0_pitch_, Q[1], nx_+2, ny_+2);
readBlock<w, h, gc>(hv0_ptr_, hv0_pitch_, Q[2], nx_+2, ny_+2);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
readBlock<w, h, gc, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
//Compute F flux
computeFluxF(Q, F, g_);
@ -141,12 +136,6 @@ __global__ void HLLKernel(
evolveF<w, h, gc, vars>(Q, F, dx_, dt_);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
//Compute G flux
computeFluxG(Q, F, g_);
__syncthreads();
@ -155,9 +144,9 @@ __global__ void HLLKernel(
__syncthreads();
// Write to main memory for all internal cells
writeBlock<w, h, gc>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_);
writeBlock<w, h, gc>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_);
writeBlock<w, h, gc>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_);
writeBlock<w, h, gc>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
writeBlock<w, h, gc>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
writeBlock<w, h, gc>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
}
} // extern "C"

View File

@ -130,7 +130,8 @@ __global__ void HLL2Kernel(
float theta_,
int step_,
int step_order_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
@ -153,19 +154,12 @@ __global__ void HLL2Kernel(
__shared__ float F[3][h+4][w+4];
//Read into shared memory
readBlock<w, h, gc>( h0_ptr_, h0_pitch_, Q[0], nx_+2, ny_+2);
readBlock<w, h, gc>(hu0_ptr_, hu0_pitch_, Q[1], nx_+2, ny_+2);
readBlock<w, h, gc>(hv0_ptr_, hv0_pitch_, Q[2], nx_+2, ny_+2);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
readBlock<w, h, gc, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
//Step 0 => evolve x first, then y
if (step_ == 0) {
if (getStep(step_order_) == 0) {
//Compute fluxes along the x axis and evolve
minmodSlopeX<w, h, gc, vars>(Q, Qx, theta_);
__syncthreads();
@ -174,12 +168,6 @@ __global__ void HLL2Kernel(
evolveF<w, h, gc, vars>(Q, F, dx_, dt_);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
//Compute fluxes along the y axis and evolve
minmodSlopeY<w, h, gc, vars>(Q, Qx, theta_);
__syncthreads();
@ -198,12 +186,6 @@ __global__ void HLL2Kernel(
evolveG<w, h, gc, vars>(Q, F, dy_, dt_);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
//Compute fluxes along the x axis and evolve
minmodSlopeX<w, h, gc, vars>(Q, Qx, theta_);
__syncthreads();
@ -217,9 +199,11 @@ __global__ void HLL2Kernel(
// Write to main memory for all internal cells
writeBlock<w, h, 2>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_);
writeBlock<w, h, 2>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_);
writeBlock<w, h, 2>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_);
const int step = getStep(step_order_);
const int order = getOrder(step_order_);
writeBlock<w, h, 2>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, step, order);
writeBlock<w, h, 2>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, step, order);
writeBlock<w, h, 2>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, step, order);
}
} // extern "C"

View File

@ -140,7 +140,8 @@ __global__ void KP07Kernel(
float theta_,
int step_,
int step_order_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
@ -167,9 +168,6 @@ __global__ void KP07Kernel(
//Shared memory variables
__shared__ float Q[3][h+4][w+4];
//The following slightly wastes memory, but enables us to reuse the
//funcitons in common.opencl
__shared__ float Qx[3][h+2][w+2];
__shared__ float Qy[3][h+2][w+2];
__shared__ float F[3][h+1][w+1];
@ -178,17 +176,9 @@ __global__ void KP07Kernel(
//Read into shared memory
readBlock<w, h, gc>( h0_ptr_, h0_pitch_, Q[0], nx_+2, ny_+2);
readBlock<w, h, gc>(hu0_ptr_, hu0_pitch_, Q[1], nx_+2, ny_+2);
readBlock<w, h, gc>(hv0_ptr_, hv0_pitch_, Q[2], nx_+2, ny_+2);
__syncthreads();
//Fix boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
readBlock<w, h, gc, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
//Reconstruct slopes along x and axis
@ -219,31 +209,17 @@ __global__ void KP07Kernel(
float* const hu_row = (float*) ((char*) hu1_ptr_ + hu1_pitch_*tj);
float* const hv_row = (float*) ((char*) hv1_ptr_ + hv1_pitch_*tj);
if (step_ == 0) {
//First step of RK2 ODE integrator
if (getOrder(step_order_) == 2 && getStep(step_order_) == 1) {
//Write to main memory
h_row[ti] = 0.5f*(h_row[ti] + h1);
hu_row[ti] = 0.5f*(hu_row[ti] + hu1);
hv_row[ti] = 0.5f*(hv_row[ti] + hv1);
}
else {
h_row[ti] = h1;
hu_row[ti] = hu1;
hv_row[ti] = hv1;
}
else if (step_ == 1) {
//Second step of RK2 ODE integrator
//First read Q^n
const float h_a = h_row[ti];
const float hu_a = hu_row[ti];
const float hv_a = hv_row[ti];
//Compute Q^n+1
const float h_b = 0.5f*(h_a + h1);
const float hu_b = 0.5f*(hu_a + hu1);
const float hv_b = 0.5f*(hv_a + hv1);
//Write to main memory
h_row[ti] = h_b;
hu_row[ti] = hu_b;
hv_row[ti] = hv_b;
}
}
}
} //extern "C"

View File

@ -121,7 +121,8 @@ __global__ void KP07DimsplitKernel(
float theta_,
int step_,
int step_order_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
@ -147,22 +148,14 @@ __global__ void KP07DimsplitKernel(
//Read into shared memory
readBlock<w, h, gc>( h0_ptr_, h0_pitch_, Q[0], nx_+2, ny_+2);
readBlock<w, h, gc>(hu0_ptr_, hu0_pitch_, Q[1], nx_+2, ny_+2);
readBlock<w, h, gc>(hv0_ptr_, hv0_pitch_, Q[2], nx_+2, ny_+2);
__syncthreads();
//Fix boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
readBlock<w, h, gc, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
//Step 0 => evolve x first, then y
if (step_ == 0) {
if (getStep(step_order_) == 0) {
//Compute fluxes along the x axis and evolve
minmodSlopeX<w, h, gc, vars>(Q, Qx, theta_);
__syncthreads();
@ -171,18 +164,9 @@ __global__ void KP07DimsplitKernel(
evolveF<w, h, gc, vars>(Q, F, dx_, dt_);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
//Compute fluxes along the y axis and evolve
minmodSlopeY<w, h, gc, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxG(Q, Qx, F, g_, dy_, dt_);
__syncthreads();
evolveG<w, h, gc, vars>(Q, F, dy_, dt_);
@ -198,12 +182,6 @@ __global__ void KP07DimsplitKernel(
evolveG<w, h, gc, vars>(Q, F, dy_, dt_);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
//Compute fluxes along the x axis and evolve
minmodSlopeX<w, h, gc, vars>(Q, Qx, theta_);
__syncthreads();
@ -215,9 +193,11 @@ __global__ void KP07DimsplitKernel(
// Write to main memory for all internal cells
writeBlock<w, h, gc>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_);
writeBlock<w, h, gc>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_);
writeBlock<w, h, gc>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_);
const int step = getStep(step_order_);
const int order = getOrder(step_order_);
writeBlock<w, h, gc>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, step, order);
writeBlock<w, h, gc>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, step, order);
writeBlock<w, h, gc>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, step, order);
}
} // extern "C"

View File

@ -104,6 +104,8 @@ void LxFKernel(
float dx_, float dy_, float dt_,
float g_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
@ -123,16 +125,9 @@ void LxFKernel(
__shared__ float G[3][h+1][w ];
//Read from global memory
readBlock<w, h, gc>( h0_ptr_, h0_pitch_, Q[0], nx_+2, ny_+2);
readBlock<w, h, gc>(hu0_ptr_, hu0_pitch_, Q[1], nx_+2, ny_+2);
readBlock<w, h, gc>(hv0_ptr_, hv0_pitch_, Q[2], nx_+2, ny_+2);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
readBlock<w, h, gc, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, -1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, -1, 1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
//Compute fluxes along the x and y axis
computeFluxF<w, h>(Q, F, g_, dx_, dt_);
@ -154,9 +149,9 @@ void LxFKernel(
__syncthreads();
//Write to main memory
writeBlock<w, h, gc>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_);
writeBlock<w, h, gc>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_);
writeBlock<w, h, gc>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_);
writeBlock<w, h, gc>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
writeBlock<w, h, gc>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
writeBlock<w, h, gc>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
}
} // extern "C"

View File

@ -103,7 +103,10 @@ extern "C" {
__global__ void WAFKernel(
int nx_, int ny_,
float dx_, float dy_, float dt_,
float g_, int step_,
float g_,
int step_order_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
@ -127,34 +130,21 @@ __global__ void WAFKernel(
//Read into shared memory Q from global memory
readBlock<w, h, gc>( h0_ptr_, h0_pitch_, Q[0], nx_+2, ny_+2);
readBlock<w, h, gc>(hu0_ptr_, hu0_pitch_, Q[1], nx_+2, ny_+2);
readBlock<w, h, gc>(hv0_ptr_, hv0_pitch_, Q[2], nx_+2, ny_+2);
__syncthreads();
//Set boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
readBlock<w, h, gc, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
readBlock<w, h, gc, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
__syncthreads();
//Step 0 => evolve x first, then y
if (step_ == 0) {
if (getStep(step_order_) == 0) {
//Compute fluxes along the x axis and evolve
computeFluxF(Q, F, g_, dx_, dt_);
__syncthreads();
evolveF<w, h, gc, vars>(Q, F, dx_, dt_);
__syncthreads();
//Fix boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
//Compute fluxes along the y axis and evolve
computeFluxG(Q, F, g_, dy_, dt_);
__syncthreads();
@ -169,12 +159,6 @@ __global__ void WAFKernel(
evolveG<w, h, gc, vars>(Q, F, dy_, dt_);
__syncthreads();
//Fix boundary conditions
noFlowBoundary<w, h, gc, 1, 1>(Q[0], nx_, ny_);
noFlowBoundary<w, h, gc, -1, 1>(Q[1], nx_, ny_);
noFlowBoundary<w, h, gc, 1, -1>(Q[2], nx_, ny_);
__syncthreads();
//Compute fluxes along the x axis and evolve
computeFluxF(Q, F, g_, dx_, dt_);
__syncthreads();
@ -185,9 +169,11 @@ __global__ void WAFKernel(
// Write to main memory for all internal cells
writeBlock<w, h, 2>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_);
writeBlock<w, h, 2>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_);
writeBlock<w, h, 2>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_);
const int step = getStep(step_order_);
const int order = getOrder(step_order_);
writeBlock<w, h, gc>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, step, order);
writeBlock<w, h, gc>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, step, order);
writeBlock<w, h, gc>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, step, order);
}
} // extern "C"

View File

@ -134,14 +134,19 @@ inline __device__ BoundaryCondition getBCWest(int bc_) {
template<int block_width, int block_height, int ghost_cells>
/**
* Alter the index l so that it gives periodic boundary conditions when reading
*/
template<int ghost_cells>
inline __device__ int handlePeriodicBoundaryX(int k, int nx_, int boundary_conditions_) {
const int gc_pad = 2*ghost_cells;
//West boundary: add an offset to read from east of domain
if ((k < gc_pad)
&& getBCWest(boundary_conditions_) == Periodic) {
k += (nx_+2*ghost_cells - 2*gc_pad);
}
//East boundary: subtract an offset to read from west of domain
else if ((k >= nx_+2*ghost_cells-gc_pad)
&& getBCEast(boundary_conditions_) == Periodic) {
k -= (nx_+2*ghost_cells - 2*gc_pad);
@ -150,14 +155,19 @@ inline __device__ int handlePeriodicBoundaryX(int k, int nx_, int boundary_condi
return k;
}
template<int block_width, int block_height, int ghost_cells>
/**
* Alter the index l so that it gives periodic boundary conditions when reading
*/
template<int ghost_cells>
inline __device__ int handlePeriodicBoundaryY(int l, int ny_, int boundary_conditions_) {
const int gc_pad = 2*ghost_cells;
//South boundary: add an offset to read from north of domain
if ((l < gc_pad)
&& getBCSouth(boundary_conditions_) == Periodic) {
l += (ny_+2*ghost_cells - 2*gc_pad);
}
//North boundary: subtract an offset to read from south of domain
else if ((l >= ny_+2*ghost_cells-gc_pad)
&& getBCNorth(boundary_conditions_) == Periodic) {
l -= (ny_+2*ghost_cells - 2*gc_pad);
@ -167,10 +177,34 @@ inline __device__ int handlePeriodicBoundaryY(int l, int ny_, int boundary_condi
}
template<int block_width, int block_height, int ghost_cells, int sign_x, int sign_y>
inline __device__ int handleReflectiveBoundary(
float Q[block_height+2*ghost_cells][block_width+2*ghost_cells],
const int nx_, const int ny_,
const int boundary_conditions_) {
//Handle reflective boundary conditions
if (getBCNorth(boundary_conditions_) == Reflective) {
bcNorthReflective<block_width, block_height, ghost_cells, sign_y>(Q, nx_, ny_);
__syncthreads();
}
if (getBCSouth(boundary_conditions_) == Reflective) {
bcSouthReflective<block_width, block_height, ghost_cells, sign_y>(Q, nx_, ny_);
__syncthreads();
}
if (getBCEast(boundary_conditions_) == Reflective) {
bcEastReflective<block_width, block_height, ghost_cells, sign_x>(Q, nx_, ny_);
__syncthreads();
}
if (getBCWest(boundary_conditions_) == Reflective) {
bcWestReflective<block_width, block_height, ghost_cells, sign_x>(Q, nx_, ny_);
__syncthreads();
}
}
/**
* Reads a block of data with ghost cells
*/
template<int block_width, int block_height, int ghost_cells, int sign_north_south, int sign_east_west>
template<int block_width, int block_height, int ghost_cells, int sign_x, int sign_y>
inline __device__ void readBlock(float* ptr_, int pitch_,
float Q[block_height+2*ghost_cells][block_width+2*ghost_cells],
const int nx_, const int ny_,
@ -183,13 +217,13 @@ inline __device__ void readBlock(float* ptr_, int pitch_,
//Loop over all variables
for (int j=threadIdx.y; j<block_height+2*ghost_cells; j+=block_height) {
//Handle periodic boundary conditions here
int l = handlePeriodicBoundaryY<block_width, block_height, ghost_cells>(by + j, ny_, boundary_conditions_);
int l = handlePeriodicBoundaryY<ghost_cells>(by + j, ny_, boundary_conditions_);
l = min(l, ny_+2*ghost_cells-1);
float* row = (float*) ((char*) ptr_ + pitch_*l);
for (int i=threadIdx.x; i<block_width+2*ghost_cells; i+=block_width) {
//Handle periodic boundary conditions here
int k = handlePeriodicBoundaryX<block_width, block_height, ghost_cells>(bx + i, nx_, boundary_conditions_);
int k = handlePeriodicBoundaryX<ghost_cells>(bx + i, nx_, boundary_conditions_);
k = min(k, nx_+2*ghost_cells-1);
//Read from global memory
@ -198,23 +232,7 @@ inline __device__ void readBlock(float* ptr_, int pitch_,
}
__syncthreads();
//Handle reflective boundary conditions
if (getBCNorth(boundary_conditions_) == Reflective) {
bcNorthReflective<block_width, block_height, ghost_cells, sign_north_south>(Q, nx_, ny_);
__syncthreads();
}
if (getBCSouth(boundary_conditions_) == Reflective) {
bcSouthReflective<block_width, block_height, ghost_cells, sign_north_south>(Q, nx_, ny_);
__syncthreads();
}
if (getBCEast(boundary_conditions_) == Reflective) {
bcEastReflective<block_width, block_height, ghost_cells, sign_east_west>(Q, nx_, ny_);
__syncthreads();
}
if (getBCWest(boundary_conditions_) == Reflective) {
bcWestReflective<block_width, block_height, ghost_cells, sign_east_west>(Q, nx_, ny_);
__syncthreads();
}
handleReflectiveBoundary<block_width, block_height, ghost_cells, sign_x, sign_y>(Q, nx_, ny_, boundary_conditions_);
}
@ -226,7 +244,8 @@ inline __device__ void readBlock(float* ptr_, int pitch_,
template<int block_width, int block_height, int ghost_cells>
inline __device__ void writeBlock(float* ptr_, int pitch_,
float shmem[block_height+2*ghost_cells][block_width+2*ghost_cells],
const int width, const int height) {
const int width, const int height,
int rk_step_, int rk_order_) {
//Index of cell within domain
const int ti = blockDim.x*blockIdx.x + threadIdx.x + ghost_cells;
@ -239,7 +258,14 @@ inline __device__ void writeBlock(float* ptr_, int pitch_,
const int ty = threadIdx.y + ghost_cells;
float* const row = (float*) ((char*) ptr_ + pitch_*tj);
row[ti] = shmem[ty][tx];
//Handle runge-kutta timestepping here
if (rk_order_ == 2 && rk_step_ == 1) {
row[ti] = 0.5f*(row[ti] + shmem[ty][tx]);
}
else {
row[ti] = shmem[ty][tx];
}
}
}
@ -255,6 +281,7 @@ inline __device__ void writeBlock(float* ptr_, int pitch_,
// West boundary
template<int block_width, int block_height, int ghost_cells, int sign>
__device__ void bcWestReflective(float Q[block_height+2*ghost_cells][block_width+2*ghost_cells], const int nx_, const int ny_) {

File diff suppressed because one or more lines are too long