mirror of
https://github.com/smyalygames/FiniteVolumeGPU.git
synced 2025-11-27 23:29:49 +01:00
164 lines
5.8 KiB
Plaintext
164 lines
5.8 KiB
Plaintext
/*
|
|
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
|
for the shallow water equations, described in
|
|
A. Kurganov & Guergana Petrova
|
|
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
|
Scheme for the Saint-Venant System Communications in Mathematical
|
|
Sciences, 5 (2007), 133-160.
|
|
|
|
Copyright (C) 2016 SINTEF ICT
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
it under the terms of the GNU General Public License as published by
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
(at your option) any later version.
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
GNU General Public License for more details.
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
|
*/
|
|
|
|
|
|
#include "common.h"
|
|
#include "SWECommon.h"
|
|
|
|
|
|
/**
|
|
* Computes the flux along the x-axis for all faces
|
|
*/
|
|
__device__
|
|
void computeFluxF(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
|
|
float F[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
|
|
const float g_, const float dx_, const float dt_) {
|
|
for (unsigned int j = threadIdx.y; j < BLOCK_HEIGHT + 4; j += BLOCK_HEIGHT) {
|
|
for (unsigned int i = threadIdx.x + 1; i < BLOCK_WIDTH + 2; i += BLOCK_WIDTH) {
|
|
// Q at interface from the right and left
|
|
const float3 Ql2 = make_float3(Q[0][j][i - 1], Q[1][j][i - 1], Q[2][j][i - 1]);
|
|
const float3 Ql1 = make_float3(Q[0][j][i], Q[1][j][i], Q[2][j][i]);
|
|
const float3 Qr1 = make_float3(Q[0][j][i + 1], Q[1][j][i + 1], Q[2][j][i + 1]);
|
|
const float3 Qr2 = make_float3(Q[0][j][i + 2], Q[1][j][i + 2], Q[2][j][i + 2]);
|
|
|
|
// Computed flux
|
|
const auto [x, y, z] = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dx_, dt_);
|
|
F[0][j][i] = x;
|
|
F[1][j][i] = y;
|
|
F[2][j][i] = z;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
/**
|
|
* Computes the flux along the y-axis for all faces
|
|
*/
|
|
__device__
|
|
void computeFluxG(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
|
|
float G[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
|
|
const float g_, const float dy_, const float dt_) {
|
|
for (unsigned int j = threadIdx.y + 1; j < BLOCK_HEIGHT + 2; j += BLOCK_HEIGHT) {
|
|
for (unsigned int i = threadIdx.x; i < BLOCK_WIDTH + 4; i += BLOCK_WIDTH) {
|
|
// Q at interface from the right and left
|
|
// Note that we swap hu and hv
|
|
const float3 Ql2 = make_float3(Q[0][j - 1][i], Q[2][j - 1][i], Q[1][j - 1][i]);
|
|
const float3 Ql1 = make_float3(Q[0][j][i], Q[2][j][i], Q[1][j][i]);
|
|
const float3 Qr1 = make_float3(Q[0][j + 1][i], Q[2][j + 1][i], Q[1][j + 1][i]);
|
|
const float3 Qr2 = make_float3(Q[0][j + 2][i], Q[2][j + 2][i], Q[1][j + 2][i]);
|
|
|
|
// Computed flux
|
|
// Note that we swap back
|
|
const auto [x, y, z] = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dy_, dt_);
|
|
G[0][j][i] = x;
|
|
G[1][j][i] = z;
|
|
G[2][j][i] = y;
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
extern "C" {
|
|
__global__ void WAFKernel(
|
|
const int nx_, const int ny_,
|
|
const float dx_, const float dy_, const float dt_,
|
|
const float g_,
|
|
|
|
const int step_,
|
|
const int boundary_conditions_,
|
|
|
|
// Input h^n
|
|
float *h0_ptr_, const int h0_pitch_,
|
|
float *hu0_ptr_, const int hu0_pitch_,
|
|
float *hv0_ptr_, const int hv0_pitch_,
|
|
|
|
// Output h^{n+1}
|
|
float *h1_ptr_, const int h1_pitch_,
|
|
float *hu1_ptr_, const int hu1_pitch_,
|
|
float *hv1_ptr_, const int hv1_pitch_,
|
|
|
|
// Subarea of internal domain to compute
|
|
const int x0 = 0, const int y0 = 0,
|
|
int x1 = 0, int y1 = 0) {
|
|
if (x1 == 0)
|
|
x1 = nx_;
|
|
|
|
if (y1 == 0)
|
|
y1 = ny_;
|
|
|
|
constexpr unsigned int w = BLOCK_WIDTH;
|
|
constexpr unsigned int h = BLOCK_HEIGHT;
|
|
constexpr unsigned int gc_x = 2;
|
|
constexpr unsigned int gc_y = 2;
|
|
constexpr unsigned int vars = 3;
|
|
|
|
// Shared memory variables
|
|
__shared__ float Q[3][h + 4][w + 4];
|
|
__shared__ float F[3][h + 4][w + 4];
|
|
|
|
|
|
// Read into shared memory Q from global memory
|
|
readBlock<w, h, gc_x, gc_y, 1, 1>(h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
|
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
|
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
|
|
__syncthreads();
|
|
|
|
|
|
// Step 0 => evolve x first, then y
|
|
if (step_ == 0) {
|
|
// Compute fluxes along the x-axis and evolve
|
|
computeFluxF(Q, F, g_, dx_, dt_);
|
|
__syncthreads();
|
|
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
|
__syncthreads();
|
|
|
|
// Compute fluxes along the y-axis and evolve
|
|
computeFluxG(Q, F, g_, dy_, dt_);
|
|
__syncthreads();
|
|
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
|
__syncthreads();
|
|
}
|
|
// Step 1 => evolve y first, then x
|
|
else {
|
|
// Compute fluxes along the y-axis and evolve
|
|
computeFluxG(Q, F, g_, dy_, dt_);
|
|
__syncthreads();
|
|
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
|
__syncthreads();
|
|
|
|
// Compute fluxes along the x-axis and evolve
|
|
computeFluxF(Q, F, g_, dx_, dt_);
|
|
__syncthreads();
|
|
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
|
__syncthreads();
|
|
}
|
|
|
|
|
|
// Write to main memory for all internal cells
|
|
writeBlock<w, h, gc_x, gc_y>(h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
|
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
|
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
|
|
}
|
|
} // extern "C"
|