Updates, hll doesnt work yet

This commit is contained in:
André R. Brodtkorb 2018-07-25 09:39:17 +02:00
parent fcc1d0db1c
commit dd88d44162
6 changed files with 81 additions and 142 deletions

View File

@ -45,22 +45,22 @@ class CUDAArray2D:
""" """
Uploads initial data to the CL device Uploads initial data to the CL device
""" """
def __init__(self, nx, ny, halo_x, halo_y, data, stream=None): def __init__(self, stream, nx, ny, halo_x, halo_y, data):
host_data = self.convert_to_float32(data)
self.nx = nx self.nx = nx
self.ny = ny self.ny = ny
self.nx_halo = nx + 2*halo_x self.nx_halo = nx + 2*halo_x
self.ny_halo = ny + 2*halo_y self.ny_halo = ny + 2*halo_y
assert(host_data.shape[1] == self.nx_halo)
assert(host_data.shape[0] == self.ny_halo)
#Make sure data is in proper format
assert(np.issubdtype(data.dtype, np.float32))
assert(not np.isfortran(data))
assert(data.shape == (self.ny_halo, self.nx_halo)) assert(data.shape == (self.ny_halo, self.nx_halo))
#Upload data to the device #Upload data to the device
self.data = pycuda.gpuarray.to_gpu_async(host_data, stream=stream) self.data = pycuda.gpuarray.to_gpu_async(data, stream=stream)
self.bytes_per_float = host_data.itemsize self.bytes_per_float = data.itemsize
assert(self.bytes_per_float == 4) assert(self.bytes_per_float == 4)
self.pitch = np.int32((self.nx_halo)*self.bytes_per_float) self.pitch = np.int32((self.nx_halo)*self.bytes_per_float)
@ -68,9 +68,7 @@ class CUDAArray2D:
""" """
Enables downloading data from CL device to Python Enables downloading data from CL device to Python
""" """
def download(self, stream=None, async=False): def download(self, stream, async=False):
#Allocate data on the host for result
#Copy data from device to host #Copy data from device to host
if (async): if (async):
host_data = self.data.get_async(stream=stream) host_data = self.data.get_async(stream=stream)
@ -79,17 +77,6 @@ class CUDAArray2D:
host_data = self.data.get(stream=stream)#, pagelocked=True) # pagelocked causes crash on windows at least host_data = self.data.get(stream=stream)#, pagelocked=True) # pagelocked causes crash on windows at least
return host_data return host_data
"""
Converts to C-style float 32 array suitable for the GPU/OpenCL
"""
@staticmethod
def convert_to_float32(data):
if (not np.issubdtype(data.dtype, np.float32) or np.isfortran(data)):
print("WARNING: Converting DATA IN COMMON.PY")
return data.astype(np.float32, order='C')
else:
return data
@ -105,14 +92,14 @@ class SWEDataArakawaA:
""" """
Uploads initial data to the CL device Uploads initial data to the CL device
""" """
def __init__(self, nx, ny, halo_x, halo_y, h0, hu0, hv0, stream=None): def __init__(self, stream, nx, ny, halo_x, halo_y, h0, hu0, hv0):
self.h0 = CUDAArray2D(nx, ny, halo_x, halo_y, h0, stream=stream) self.h0 = CUDAArray2D(stream, nx, ny, halo_x, halo_y, h0)
self.hu0 = CUDAArray2D(nx, ny, halo_x, halo_y, hu0, stream=stream) self.hu0 = CUDAArray2D(stream, nx, ny, halo_x, halo_y, hu0)
self.hv0 = CUDAArray2D(nx, ny, halo_x, halo_y, hv0, stream=stream) self.hv0 = CUDAArray2D(stream, nx, ny, halo_x, halo_y, hv0)
self.h1 = CUDAArray2D(nx, ny, halo_x, halo_y, h0, stream=stream) self.h1 = CUDAArray2D(stream, nx, ny, halo_x, halo_y, h0)
self.hu1 = CUDAArray2D(nx, ny, halo_x, halo_y, hu0, stream=stream) self.hu1 = CUDAArray2D(stream, nx, ny, halo_x, halo_y, hu0)
self.hv1 = CUDAArray2D(nx, ny, halo_x, halo_y, hv0, stream=stream) self.hv1 = CUDAArray2D(stream, nx, ny, halo_x, halo_y, hv0)
""" """
Swaps the variables after a timestep has been completed Swaps the variables after a timestep has been completed
@ -125,61 +112,11 @@ class SWEDataArakawaA:
""" """
Enables downloading data from CL device to Python Enables downloading data from CL device to Python
""" """
def download(self, stream=None): def download(self, stream):
h_cpu = self.h0.download(stream=stream, async=True) h_cpu = self.h0.download(stream, async=True)
hu_cpu = self.hu0.download(stream=stream, async=True) hu_cpu = self.hu0.download(stream, async=True)
hv_cpu = self.hv0.download(stream=stream, async=False) hv_cpu = self.hv0.download(stream, async=False)
return h_cpu, hu_cpu, hv_cpu return h_cpu, hu_cpu, hv_cpu
"""
A class representing an Akrawa C type (staggered, u fluxes on east/west faces, v fluxes on north/south faces) grid
We use h as cell centers
"""
class SWEDataArakawaC:
"""
Uploads initial data to the CL device
"""
def __init__(self, nx, ny, halo_x, halo_y, h0, hu0, hv0):
#FIXME: This at least works for 0 and 1 ghost cells, but not convinced it generalizes
assert(halo_x <= 1 and halo_y <= 1)
self.h0 = CUDAArray2D(nx, ny, halo_x, halo_y, h0)
self.hu0 = CUDAArray2D(nx+1, ny, 0, halo_y, hu0)
self.hv0 = CUDAArray2D(nx, ny+1, halo_x, 0, hv0)
self.h1 = CUDAArray2D(nx, ny, halo_x, halo_y, h0)
self.hu1 = CUDAArray2D(nx+1, ny, 0, halo_y, hu0)
self.hv1 = CUDAArray2D(nx, ny+1, halo_x, 0, hv0)
"""
Swaps the variables after a timestep has been completed
"""
def swap(self):
#h is assumed to be constant (bottom topography really)
self.h1, self.h0 = self.h0, self.h1
self.hu1, self.hu0 = self.hu0, self.hu1
self.hv1, self.hv0 = self.hv0, self.hv1
"""
Enables downloading data from CL device to Python
"""
def download(self, stream=None):
h_cpu = self.h0.download(stream=stream, async=True)
hu_cpu = self.hu0.download(stream=stream, async=True)
hv_cpu = self.hv0.download(stream=stream, async=False)
return h_cpu, hu_cpu, hv_cpu

View File

@ -73,10 +73,10 @@ class FORCE:
#Create data by uploading to device #Create data by uploading to device
ghost_cells_x = 1 ghost_cells_x = 1
ghost_cells_y = 1 ghost_cells_y = 1
self.data = Common.SWEDataArakawaA(nx, ny, \ self.data = Common.SWEDataArakawaA(self.stream, \
ghost_cells_x, ghost_cells_y, \ nx, ny, \
h0, hu0, hv0, \ ghost_cells_x, ghost_cells_y, \
stream=self.stream) h0, hu0, hv0)
#Save input parameters #Save input parameters
#Notice that we need to specify them in the correct dataformat for the #Notice that we need to specify them in the correct dataformat for the

View File

@ -55,7 +55,6 @@ void computeFluxF(float Q[3][block_height+2][block_width+2],
F[2][j][i] = flux.z; F[2][j][i] = flux.z;
} }
} }
__syncthreads();
} }
@ -93,7 +92,6 @@ void computeFluxG(float Q[3][block_height+2][block_width+2],
G[2][j][i] = flux.y; G[2][j][i] = flux.y;
} }
} }
__syncthreads();
} }
@ -134,13 +132,7 @@ __global__ void FORCEKernel(
hv0_ptr_, hv0_pitch_, hv0_ptr_, hv0_pitch_,
Q, nx_, ny_); Q, nx_, ny_);
__syncthreads(); __syncthreads();
//Save our input variables
const float h0 = Q[0][ty+1][tx+1];
const float hu0 = Q[1][ty+1][tx+1];
const float hv0 = Q[2][ty+1][tx+1];
//Set boundary conditions //Set boundary conditions
noFlowBoundary1(Q, nx_, ny_); noFlowBoundary1(Q, nx_, ny_);
@ -148,6 +140,7 @@ __global__ void FORCEKernel(
//Compute flux along x, and evolve //Compute flux along x, and evolve
computeFluxF(Q, F, g_, dx_, dt_); computeFluxF(Q, F, g_, dx_, dt_);
__syncthreads();
evolveF1(Q, F, nx_, ny_, dx_, dt_); evolveF1(Q, F, nx_, ny_, dx_, dt_);
__syncthreads(); __syncthreads();
@ -157,6 +150,7 @@ __global__ void FORCEKernel(
//Compute flux along y, and evolve //Compute flux along y, and evolve
computeFluxG(Q, F, g_, dy_, dt_); computeFluxG(Q, F, g_, dy_, dt_);
__syncthreads();
evolveG1(Q, F, nx_, ny_, dy_, dt_); evolveG1(Q, F, nx_, ny_, dy_, dt_);
__syncthreads(); __syncthreads();

View File

@ -21,7 +21,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#Import packages we need #Import packages we need
import numpy as np import numpy as np
import pyopencl as cl #OpenCL in Python
import pycuda.compiler as cuda_compiler
import pycuda.gpuarray
import pycuda.driver as cuda
from SWESimulators import Common from SWESimulators import Common
@ -49,24 +53,26 @@ class HLL:
g: Gravitational accelleration (9.81 m/s^2) g: Gravitational accelleration (9.81 m/s^2)
""" """
def __init__(self, \ def __init__(self, \
cl_ctx, h0, hu0, hv0, \
h0, u0, v0, \
nx, ny, \ nx, ny, \
dx, dy, dt, \ dx, dy, dt, \
g, \ g, \
block_width=16, block_height=16): block_width=16, block_height=16):
self.cl_ctx = cl_ctx #Create a CUDA stream
self.stream = cuda.Stream()
#Create an OpenCL command queue
self.cl_queue = cl.CommandQueue(self.cl_ctx)
#Get kernels #Get kernels
self.lxf_kernel = Common.get_kernel(self.cl_ctx, "HLL_kernel.opencl", block_width, block_height) self.hll_module = Common.get_kernel("HLL_kernel.cu", block_width, block_height)
self.hll_kernel = self.hll_module.get_function("HLLKernel")
self.hll_kernel.prepare("iiffffPiPiPiPiPiPi")
#Create data by uploading to device #Create data by uploading to device
ghost_cells_x = 1 ghost_cells_x = 1
ghost_cells_y = 1 ghost_cells_y = 1
self.cl_data = Common.SWEDataArkawaA(self.cl_ctx, nx, ny, ghost_cells_x, ghost_cells_y, h0, u0, v0) self.data = Common.SWEDataArakawaA(self.stream, \
nx, ny, \
ghost_cells_x, ghost_cells_y, \
h0, hu0, hv0)
#Save input parameters #Save input parameters
#Notice that we need to specify them in the correct dataformat for the #Notice that we need to specify them in the correct dataformat for the
@ -82,7 +88,7 @@ class HLL:
self.t = np.float32(0.0) self.t = np.float32(0.0)
#Compute kernel launch parameters #Compute kernel launch parameters
self.local_size = (block_width, block_height) self.local_size = (block_width, block_height, 1)
self.global_size = ( \ self.global_size = ( \
int(np.ceil(self.nx / float(self.local_size[0])) * self.local_size[0]), \ int(np.ceil(self.nx / float(self.local_size[0])) * self.local_size[0]), \
int(np.ceil(self.ny / float(self.local_size[1])) * self.local_size[1]) \ int(np.ceil(self.ny / float(self.local_size[1])) * self.local_size[1]) \
@ -105,20 +111,20 @@ class HLL:
if (local_dt <= 0.0): if (local_dt <= 0.0):
break break
self.lxf_kernel.swe_2D(self.cl_queue, self.global_size, self.local_size, \ self.hll_kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
self.nx, self.ny, \ self.nx, self.ny, \
self.dx, self.dy, local_dt, \ self.dx, self.dy, local_dt, \
self.g, \ self.g, \
self.cl_data.h0.data, self.cl_data.h0.pitch, \ self.data.h0.data.gpudata, self.data.h0.pitch, \
self.cl_data.hu0.data, self.cl_data.hu0.pitch, \ self.data.hu0.data.gpudata, self.data.hu0.pitch, \
self.cl_data.hv0.data, self.cl_data.hv0.pitch, \ self.data.hv0.data.gpudata, self.data.hv0.pitch, \
self.cl_data.h1.data, self.cl_data.h1.pitch, \ self.data.h1.data.gpudata, self.data.h1.pitch, \
self.cl_data.hu1.data, self.cl_data.hu1.pitch, \ self.data.hu1.data.gpudata, self.data.hu1.pitch, \
self.cl_data.hv1.data, self.cl_data.hv1.pitch) self.data.hv1.data.gpudata, self.data.hv1.pitch)
self.t += local_dt self.t += local_dt
self.cl_data.swap() self.data.swap()
return self.t return self.t
@ -127,5 +133,5 @@ class HLL:
def download(self): def download(self):
return self.cl_data.download(self.cl_queue) return self.data.download(self.stream)

View File

@ -19,7 +19,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "common.opencl" #include "common.cu"
@ -28,8 +28,9 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
/** /**
* Computes the flux along the x axis for all faces * Computes the flux along the x axis for all faces
*/ */
void computeFluxF(__local float Q[3][block_height+2][block_width+2], __device__
__local float F[3][block_height+1][block_width+1], void computeFluxF(float Q[3][block_height+2][block_width+2],
float F[3][block_height+1][block_width+1],
const float g_) { const float g_) {
//Index of thread within block //Index of thread within block
const int tx = get_local_id(0); const int tx = get_local_id(0);
@ -40,8 +41,8 @@ void computeFluxF(__local float Q[3][block_height+2][block_width+2],
for (int i=tx; i<block_width+1; i+=get_local_size(0)) { for (int i=tx; i<block_width+1; i+=get_local_size(0)) {
const int k = i; const int k = i;
const float3 Q_l = (float3)(Q[0][l][k ], Q[1][l][k ], Q[2][l][k ]); const float3 Q_l = make_float3(Q[0][l][k ], Q[1][l][k ], Q[2][l][k ]);
const float3 Q_r = (float3)(Q[0][l][k+1], Q[1][l][k+1], Q[2][l][k+1]); const float3 Q_r = make_float3(Q[0][l][k+1], Q[1][l][k+1], Q[2][l][k+1]);
const float3 flux = HLL_flux(Q_l, Q_r, g_); const float3 flux = HLL_flux(Q_l, Q_r, g_);
@ -60,8 +61,9 @@ void computeFluxF(__local float Q[3][block_height+2][block_width+2],
/** /**
* Computes the flux along the x axis for all faces * Computes the flux along the x axis for all faces
*/ */
void computeFluxG(__local float Q[3][block_height+2][block_width+2], __device__
__local float G[3][block_height+1][block_width+1], void computeFluxG(float Q[3][block_height+2][block_width+2],
float G[3][block_height+1][block_width+1],
const float g_) { const float g_) {
//Index of thread within block //Index of thread within block
const int tx = get_local_id(0); const int tx = get_local_id(0);
@ -73,8 +75,8 @@ void computeFluxG(__local float Q[3][block_height+2][block_width+2],
const int k = i + 1; //Skip ghost cells const int k = i + 1; //Skip ghost cells
//NOte that hu and hv are swapped ("transposing" the domain)! //NOte that hu and hv are swapped ("transposing" the domain)!
const float3 Q_l = (float3)(Q[0][l ][k], Q[2][l ][k], Q[1][l ][k]); const float3 Q_l = make_float3(Q[0][l ][k], Q[2][l ][k], Q[1][l ][k]);
const float3 Q_r = (float3)(Q[0][l+1][k], Q[2][l+1][k], Q[1][l+1][k]); const float3 Q_r = make_float3(Q[0][l+1][k], Q[2][l+1][k], Q[1][l+1][k]);
// Computed flux // Computed flux
const float3 flux = HLL_flux(Q_l, Q_r, g_); const float3 flux = HLL_flux(Q_l, Q_r, g_);
@ -100,23 +102,23 @@ void computeFluxG(__local float Q[3][block_height+2][block_width+2],
__kernel void swe_2D( __global__ void HLLKernel(
int nx_, int ny_, int nx_, int ny_,
float dx_, float dy_, float dt_, float dx_, float dy_, float dt_,
float g_, float g_,
//Input h^n //Input h^n
__global float* h0_ptr_, int h0_pitch_, float* h0_ptr_, int h0_pitch_,
__global float* hu0_ptr_, int hu0_pitch_, float* hu0_ptr_, int hu0_pitch_,
__global float* hv0_ptr_, int hv0_pitch_, float* hv0_ptr_, int hv0_pitch_,
//Output h^{n+1} //Output h^{n+1}
__global float* h1_ptr_, int h1_pitch_, float* h1_ptr_, int h1_pitch_,
__global float* hu1_ptr_, int hu1_pitch_, float* hu1_ptr_, int hu1_pitch_,
__global float* hv1_ptr_, int hv1_pitch_) { float* hv1_ptr_, int hv1_pitch_) {
//Shared memory variables //Shared memory variables
__local float Q[3][block_height+2][block_width+2]; __shared__ float Q[3][block_height+2][block_width+2];
__local float F[3][block_height+1][block_width+1]; __shared__ float F[3][block_height+1][block_width+1];
//Read into shared memory //Read into shared memory
@ -124,26 +126,26 @@ __kernel void swe_2D(
hu0_ptr_, hu0_pitch_, hu0_ptr_, hu0_pitch_,
hv0_ptr_, hv0_pitch_, hv0_ptr_, hv0_pitch_,
Q, nx_, ny_); Q, nx_, ny_);
barrier(CLK_LOCAL_MEM_FENCE); __syncthreads();
noFlowBoundary1(Q, nx_, ny_); noFlowBoundary1(Q, nx_, ny_);
barrier(CLK_LOCAL_MEM_FENCE); __syncthreads();
//Compute F flux //Compute F flux
computeFluxF(Q, F, g_); computeFluxF(Q, F, g_);
barrier(CLK_LOCAL_MEM_FENCE); __syncthreads();
evolveF1(Q, F, nx_, ny_, dx_, dt_); evolveF1(Q, F, nx_, ny_, dx_, dt_);
barrier(CLK_LOCAL_MEM_FENCE); __syncthreads();
//Set boundary conditions //Set boundary conditions
noFlowBoundary1(Q, nx_, ny_); noFlowBoundary1(Q, nx_, ny_);
barrier(CLK_LOCAL_MEM_FENCE); __syncthreads();
//Compute G flux //Compute G flux
computeFluxG(Q, F, g_); computeFluxG(Q, F, g_);
barrier(CLK_LOCAL_MEM_FENCE); __syncthreads();
evolveG1(Q, F, nx_, ny_, dy_, dt_); evolveG1(Q, F, nx_, ny_, dy_, dt_);
barrier(CLK_LOCAL_MEM_FENCE); __syncthreads();

View File

@ -73,10 +73,10 @@ class LxF:
#Create data by uploading to device #Create data by uploading to device
ghost_cells_x = 1 ghost_cells_x = 1
ghost_cells_y = 1 ghost_cells_y = 1
self.data = Common.SWEDataArakawaA(nx, ny, \ self.data = Common.SWEDataArakawaA(self.stream, \
ghost_cells_x, ghost_cells_y, \ nx, ny, \
h0, hu0, hv0, \ ghost_cells_x, ghost_cells_y, \
stream=self.stream) h0, hu0, hv0)
#Save input parameters #Save input parameters
#Notice that we need to specify them in the correct dataformat for the #Notice that we need to specify them in the correct dataformat for the