refactor(gpu): fix up formatting and warnings

This commit is contained in:
Anthony Berg 2025-06-16 17:38:23 +02:00
parent 66e0f8024a
commit bb0f75afea
12 changed files with 1355 additions and 1619 deletions

View File

@ -22,166 +22,153 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "limiters.h"
template<int w, int h, int gc_x, int gc_y, int vars>
__device__ void writeCfl(float Q[vars][h+2*gc_y][w+2*gc_x],
float shmem[h+2*gc_y][w+2*gc_x],
const int nx_, const int ny_,
const float dx_, const float dy_, const float gamma_,
float* output_) {
//Index of thread within block
__device__ void writeCfl(float Q[vars][h + 2 * gc_y][w + 2 * gc_x],
float shmem[h + 2 * gc_y][w + 2 * gc_x],
const int nx_, const int ny_,
const float dx_, const float dy_, const float gamma_,
float *output_) {
// Index of thread within block
const int tx = threadIdx.x + gc_x;
const int ty = threadIdx.y + gc_y;
//Index of cell within domain
const int ti = blockDim.x*blockIdx.x + tx;
const int tj = blockDim.y*blockIdx.y + ty;
//Only internal cells
if (ti < nx_+gc_x && tj < ny_+gc_y) {
// Index of cell within domain
const int ti = blockDim.x * blockIdx.x + tx;
const int tj = blockDim.y * blockIdx.y + ty;
// Only internal cells
if (ti < nx_ + gc_x && tj < ny_ + gc_y) {
const float rho = Q[0][ty][tx];
const float u = Q[1][ty][tx] / rho;
const float v = Q[2][ty][tx] / rho;
const float max_u = dx_ / (fabsf(u) + sqrtf(gamma_*rho));
const float max_v = dy_ / (fabsf(v) + sqrtf(gamma_*rho));
const float u = Q[1][ty][tx] / rho;
const float v = Q[2][ty][tx] / rho;
const float max_u = dx_ / (fabsf(u) + sqrtf(gamma_ * rho));
const float max_v = dy_ / (fabsf(v) + sqrtf(gamma_ * rho));
shmem[ty][tx] = fminf(max_u, max_v);
}
__syncthreads();
//One row of threads loop over all rows
if (ti < nx_+gc_x && tj < ny_+gc_y) {
// One row of threads loop over all rows
if (ti < nx_ + gc_x && tj < ny_ + gc_y) {
if (ty == gc_y) {
float min_val = shmem[ty][tx];
const int max_y = min(h, ny_+gc_y - tj);
for (int j=gc_y; j<max_y+gc_y; j++) {
const int max_y = min(h, ny_ + gc_y - tj);
for (int j = gc_y; j < max_y + gc_y; ++j) {
min_val = fminf(min_val, shmem[j][tx]);
}
shmem[ty][tx] = min_val;
}
}
__syncthreads();
//One thread loops over first row to find global max
// One thread loops over first row to find global max
if (tx == gc_x && ty == gc_y) {
float min_val = shmem[ty][tx];
const int max_x = min(w, nx_+gc_x - ti);
for (int i=gc_x; i<max_x+gc_x; ++i) {
const int max_x = min(w, nx_ + gc_x - ti);
for (int i = gc_x; i < max_x + gc_x; ++i) {
min_val = fminf(min_val, shmem[ty][i]);
}
const int idx = gridDim.x*blockIdx.y + blockIdx.x;
const unsigned int idx = gridDim.x * blockIdx.y + blockIdx.x;
output_[idx] = min_val;
}
}
inline __device__ float pressure(float4 Q, float gamma) {
const float rho = Q.x;
const float rho = Q.x;
const float rho_u = Q.y;
const float rho_v = Q.z;
const float E = Q.w;
const float E = Q.w;
return (gamma-1.0f)*(E-0.5f*(rho_u*rho_u + rho_v*rho_v)/rho);
return (gamma - 1.0f) * (E - 0.5f * (rho_u * rho_u + rho_v * rho_v) / rho);
}
__device__ float4 F_func(const float4 Q, float P) {
const float rho = Q.x;
__device__ inline float4 F_func(const float4 Q, const float P) {
const float rho = Q.x;
const float rho_u = Q.y;
const float rho_v = Q.z;
const float E = Q.w;
const float E = Q.w;
const float u = rho_u/rho;
const float u = rho_u / rho;
float4 F;
F.x = rho_u;
F.y = rho_u*u + P;
F.z = rho_v*u;
F.w = u*(E+P);
F.y = rho_u * u + P;
F.z = rho_v * u;
F.w = u * (E + P);
return F;
}
/**
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 180)
*/
__device__ float4 HLL_flux(const float4 Q_l, const float4 Q_r, const float gamma) {
__device__ inline float4 HLL_flux(const float4 Q_l, const float4 Q_r, const float gamma) {
const float h_l = Q_l.x;
const float h_r = Q_r.x;
// Calculate velocities
const float u_l = Q_l.y / h_l;
const float u_r = Q_r.y / h_r;
// Calculate pressures
const float P_l = pressure(Q_l, gamma);
const float P_r = pressure(Q_r, gamma);
// Estimate the potential wave speeds
const float c_l = sqrt(gamma*P_l/Q_l.x);
const float c_r = sqrt(gamma*P_r/Q_r.x);
const float c_l = sqrt(gamma * P_l / Q_l.x);
const float c_r = sqrt(gamma * P_r / Q_r.x);
// Compute h in the "star region", h^dagger
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
const float h_dag = 0.5f * (h_l + h_r) - 0.25f * (u_r - u_l) * (h_l + h_r) / (c_l + c_r);
const float q_l_tmp = sqrt(0.5f * ((h_dag + h_l) * h_dag / (h_l * h_l)));
const float q_r_tmp = sqrt(0.5f * ((h_dag + h_r) * h_dag / (h_r * h_r)));
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
// Compute wave speed estimates
const float S_l = u_l - c_l*q_l;
const float S_r = u_r + c_r*q_r;
//Upwind selection
const float S_l = u_l - c_l * q_l;
const float S_r = u_r + c_r * q_r;
// Upwind selection
if (S_l >= 0.0f) {
return F_func(Q_l, P_l);
}
else if (S_r <= 0.0f) {
} else if (S_r <= 0.0f) {
return F_func(Q_r, P_r);
}
//Or estimate flux in the star region
// Or estimate flux in the star region
else {
const float4 F_l = F_func(Q_l, P_l);
const float4 F_r = F_func(Q_r, P_r);
const float4 flux = (S_r*F_l - S_l*F_r + S_r*S_l*(Q_r - Q_l)) / (S_r-S_l);
const float4 flux = (S_r * F_l - S_l * F_r + S_r * S_l * (Q_r - Q_l)) / (S_r - S_l);
return flux;
}
}
/**
* Central upwind flux function
*/
__device__ float4 CentralUpwindFlux(const float4 Qm, const float4 Qp, const float gamma) {
__device__ inline float4 CentralUpwindFlux(const float4 Qm, const float4 Qp, const float gamma) {
const float Pp = pressure(Qp, gamma);
const float4 Fp = F_func(Qp, Pp);
const float up = Qp.y / Qp.x; // rho*u / rho
const float cp = sqrt(gamma*Pp/Qp.x); // sqrt(gamma*P/rho)
const float up = Qp.y / Qp.x; // rho*u / rho
const float cp = sqrt(gamma * Pp / Qp.x); // sqrt(gamma*P/rho)
const float Pm = pressure(Qm, gamma);
const float4 Fm = F_func(Qm, Pm);
const float um = Qm.y / Qm.x; // rho*u / rho
const float cm = sqrt(gamma*Pm/Qm.x); // sqrt(gamma*P/rho)
const float am = min(min(um-cm, up-cp), 0.0f); // largest negative wave speed
const float ap = max(max(um+cm, up+cp), 0.0f); // largest positive wave speed
return ((ap*Fm - am*Fp) + ap*am*(Qp-Qm))/(ap-am);
}
const float um = Qm.y / Qm.x; // rho*u / rho
const float cm = sqrt(gamma * Pm / Qm.x); // sqrt(gamma*P/rho)
const float am = min(min(um - cm, up - cp), 0.0f); // largest negative wave speed
const float ap = max(max(um + cm, up + cp), 0.0f); // largest positive wave speed
return ((ap * Fm - am * Fp) + ap * am * (Qp - Qm)) / (ap - am);
}

View File

@ -22,102 +22,83 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "limiters.h"
__device__ float3 F_func(const float3 Q, const float g) {
__device__ inline float3 F_func(const float3 Q, const float g) {
float3 F;
F.x = Q.y; //hu
F.y = Q.y*Q.y / Q.x + 0.5f*g*Q.x*Q.x; //hu*hu/h + 0.5f*g*h*h;
F.z = Q.y*Q.z / Q.x; //hu*hv/h;
F.x = Q.y; //hu
F.y = Q.y * Q.y / Q.x + 0.5f * g * Q.x * Q.x; //hu*hu/h + 0.5f*g*h*h;
F.z = Q.y * Q.z / Q.x; //hu*hv/h;
return F;
}
/**
* Superbee flux limiter for WAF.
* Related to superbee limiter so that WAF_superbee(r, c) = 1 - (1-|c|)*superbee(r)
* @param r_ the ratio of upwind change (see Toro 2001, p. 203/204)
* @param c_ the courant number for wave k, dt*S_k/dx
*/
__device__ float WAF_superbee(float r_, float c_) {
__device__ inline float WAF_superbee(const float r_, const float c_) {
// r <= 0.0
if (r_ <= 0.0f) {
if (r_ <= 0.0f) {
return 1.0f;
}
// 0.0 <= r <= 1/2
else if (r_ <= 0.5f) {
return 1.0f - 2.0f*(1.0f - fabsf(c_))*r_;
else if (r_ <= 0.5f) {
return 1.0f - 2.0f * (1.0f - fabsf(c_)) * r_;
}
// 1/2 <= r <= 1
else if (r_ <= 1.0f) {
return fabs(c_);
}
// 1 <= r <= 2
else if (r_ <= 2.0f) {
return 1.0f - (1.0f - fabsf(c_))*r_;
else if (r_ <= 2.0f) {
return 1.0f - (1.0f - fabsf(c_)) * r_;
}
// r >= 2
else {
return 2.0f*fabsf(c_) - 1.0f;
return 2.0f * fabsf(c_) - 1.0f;
}
}
__device__ float WAF_albada(float r_, float c_) {
__device__ inline float WAF_albada(const float r_, const float c_) {
if (r_ <= 0.0f) {
return 1.0f;
}
else {
return 1.0f - (1.0f - fabsf(c_)) * r_ * (1.0f + r_) / (1.0f + r_*r_);
} else {
return 1.0f - (1.0f - fabsf(c_)) * r_ * (1.0f + r_) / (1.0f + r_ * r_);
}
}
__device__ float WAF_minbee(float r_, float c_) {
__device__ inline float WAF_minbee(float r_, const float c_) {
r_ = fmaxf(-1.0f, fminf(2.0f, r_));
if (r_ <= 0.0f) {
return 1.0f;
}
if (r_ >= 0.0f && r_ <= 1.0f) {
return 1.0f - (1.0f - fabsf(c_)) * r_;
}
else {
} else {
return fabsf(c_);
}
}
__device__ float WAF_minmod(float r_, float c_) {
__device__ inline float WAF_minmod(const float r_, const float c_) {
return 1.0f - (1.0f - fabsf(c_)) * fmaxf(0.0f, fminf(1.0f, r_));
}
__device__ float limiterToWAFLimiter(float r_, float c_) {
return 1.0f - (1.0f - fabsf(c_))*r_;
__device__ inline float limiterToWAFLimiter(const float r_, const float c_) {
return 1.0f - (1.0f - fabsf(c_)) * r_;
}
// Compute h in the "star region", h^dagger
__device__ __inline__ float computeHStar(float h_l, float h_r, float u_l, float u_r, float c_l, float c_r, float g_) {
//This estimate for the h* gives rise to spurious oscillations.
//return 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
__device__ __inline__ float computeHStar(float h_l, float h_r, const float u_l, const float u_r, const float c_l,
const float c_r, const float g_) {
// This estimate for the h* gives rise to spurious oscillations.
// return 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
const float h_tmp = 0.5f * (c_l + c_r) + 0.25f * (u_l - u_r);
return h_tmp*h_tmp / g_;
return h_tmp * h_tmp / g_;
}
/**
@ -127,408 +108,330 @@ __device__ __inline__ float computeHStar(float h_l, float h_r, float u_l, float
* @param Q_l1 Q_{i}
* @param Q_r1 Q_{i+1}
* @param Q_r2 Q_{i+2}
* @param g_
* @param dx_
* @param dt_
*/
__device__ float3 WAF_1D_flux(const float3 Q_l2, const float3 Q_l1, const float3 Q_r1, const float3 Q_r2, const float g_, const float dx_, const float dt_) {
__device__ inline float3 WAF_1D_flux(const float3 Q_l2, const float3 Q_l1, const float3 Q_r1, const float3 Q_r2,
const float g_, const float dx_, const float dt_) {
const float h_l = Q_l1.x;
const float h_r = Q_r1.x;
const float h_l2 = Q_l2.x;
const float h_r2 = Q_r2.x;
// Calculate velocities
const float u_l = Q_l1.y / h_l;
const float u_r = Q_r1.y / h_r;
const float u_l2 = Q_l2.y / h_l2;
const float u_r2 = Q_r2.y / h_r2;
const float v_l = Q_l1.z / h_l;
const float v_r = Q_r1.z / h_r;
const float v_l2 = Q_l2.z / h_l2;
const float v_r2 = Q_r2.z / h_r2;
// Estimate the potential wave speeds
const float c_l = sqrt(g_*h_l);
const float c_r = sqrt(g_*h_r);
const float c_l2 = sqrt(g_*h_l2);
const float c_r2 = sqrt(g_*h_r2);
const float c_l = sqrt(g_ * h_l);
const float c_r = sqrt(g_ * h_r);
const float c_l2 = sqrt(g_ * h_l2);
const float c_r2 = sqrt(g_ * h_r2);
// Compute h in the "star region", h^dagger
const float h_dag_l = computeHStar(h_l2, h_l, u_l2, u_l, c_l2, c_l, g_);
const float h_dag = computeHStar( h_l, h_r, u_l, u_r, c_l, c_r, g_);
const float h_dag_r = computeHStar( h_r, h_r2, u_r, u_r2, c_r, c_r2, g_);
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag ) ) / h_l;
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag ) ) / h_r;
const float h_dag_l = computeHStar(h_l2, h_l, u_l2, u_l, c_l2, c_l, g_);
const float h_dag = computeHStar(h_l, h_r, u_l, u_r, c_l, c_r, g_);
const float h_dag_r = computeHStar(h_r, h_r2, u_r, u_r2, c_r, c_r2, g_);
const float q_l_tmp = sqrt(0.5f * ((h_dag + h_l) * h_dag)) / h_l;
const float q_r_tmp = sqrt(0.5f * ((h_dag + h_r) * h_dag)) / h_r;
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
// Compute wave speed estimates
const float S_l = u_l - c_l*q_l;
const float S_r = u_r + c_r*q_r;
const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
const float S_l = u_l - c_l * q_l;
const float S_r = u_r + c_r * q_r;
const float S_star = (S_l * h_r * (u_r - S_r) - S_r * h_l * (u_l - S_l)) / (h_r * (u_r - S_r) - h_l * (u_l - S_l));
const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1.0, S_star, v_l);
const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1.0, S_star, v_r);
// Estimate the fluxes in the four regions
const float3 F_1 = F_func(Q_l1, g_);
const float3 F_4 = F_func(Q_r1, g_);
const float3 F_2 = F_1 + S_l*(Q_star_l - Q_l1);
const float3 F_3 = F_4 + S_r*(Q_star_r - Q_r1);
//const float3 F_2 = F_func(Q_star_l, g_);
//const float3 F_3 = F_func(Q_star_r, g_);
const float3 F_2 = F_1 + S_l * (Q_star_l - Q_l1);
const float3 F_3 = F_4 + S_r * (Q_star_r - Q_r1);
// const float3 F_2 = F_func(Q_star_l, g_);
// const float3 F_3 = F_func(Q_star_r, g_);
// Compute the courant numbers for the waves
const float c_1 = S_l * dt_ / dx_;
const float c_2 = S_star * dt_ / dx_;
const float c_3 = S_r * dt_ / dx_;
// Compute the "upwind change" vectors for the i-3/2 and i+3/2 interfaces
const float eps = 1.0e-6f;
const float r_1 = desingularize( (c_1 > 0.0f) ? (h_dag_l - h_l2) : (h_dag_r - h_r), eps) / desingularize((h_dag - h_l), eps);
const float r_2 = desingularize( (c_2 > 0.0f) ? (v_l - v_l2) : (v_r2 - v_r), eps ) / desingularize((v_r - v_l), eps);
const float r_3 = desingularize( (c_3 > 0.0f) ? (h_l - h_dag_l) : (h_r2 - h_dag_r), eps ) / desingularize((h_r - h_dag), eps);
constexpr float eps = 1.0e-6f;
const float r_1 = desingularize((c_1 > 0.0f) ? (h_dag_l - h_l2) : (h_dag_r - h_r), eps) / desingularize(
(h_dag - h_l), eps);
const float r_2 = desingularize((c_2 > 0.0f) ? (v_l - v_l2) : (v_r2 - v_r), eps) / desingularize((v_r - v_l), eps);
const float r_3 = desingularize((c_3 > 0.0f) ? (h_l - h_dag_l) : (h_r2 - h_dag_r), eps) / desingularize(
(h_r - h_dag), eps);
// Compute the limiter
// We use h for the nonlinear waves, and v for the middle shear wave
// We use h for the nonlinear waves, and v for the middle shear wave
const float A_1 = copysign(1.0f, c_1) * limiterToWAFLimiter(generalized_minmod(r_1, 1.9f), c_1);
const float A_2 = copysign(1.0f, c_2) * limiterToWAFLimiter(generalized_minmod(r_2, 1.9f), c_2);
const float A_2 = copysign(1.0f, c_2) * limiterToWAFLimiter(generalized_minmod(r_2, 1.9f), c_2);
const float A_3 = copysign(1.0f, c_3) * limiterToWAFLimiter(generalized_minmod(r_3, 1.9f), c_3);
//Average the fluxes
const float3 flux = 0.5f*( F_1 + F_4 )
- 0.5f*( A_1 * (F_2 - F_1)
+ A_2 * (F_3 - F_2)
+ A_3 * (F_4 - F_3) );
// Average the fluxes
const float3 flux = 0.5f * (F_1 + F_4)
- 0.5f * (A_1 * (F_2 - F_1)
+ A_2 * (F_3 - F_2)
+ A_3 * (F_4 - F_3));
return flux;
}
/**
* Central upwind flux function
*/
__device__ float3 CentralUpwindFlux(const float3 Qm, float3 Qp, const float g) {
__device__ inline float3 CentralUpwindFlux(const float3 Qm, const float3 Qp, const float g) {
const float3 Fp = F_func(Qp, g);
const float up = Qp.y / Qp.x; // hu / h
const float cp = sqrt(g*Qp.x); // sqrt(g*h)
const float up = Qp.y / Qp.x; // hu / h
const float cp = sqrt(g * Qp.x); // sqrt(g*h)
const float3 Fm = F_func(Qm, g);
const float um = Qm.y / Qm.x; // hu / h
const float cm = sqrt(g*Qm.x); // sqrt(g*h)
const float am = min(min(um-cm, up-cp), 0.0f); // largest negative wave speed
const float ap = max(max(um+cm, up+cp), 0.0f); // largest positive wave speed
return ((ap*Fm - am*Fp) + ap*am*(Qp-Qm))/(ap-am);
const float um = Qm.y / Qm.x; // hu / h
const float cm = sqrt(g * Qm.x); // sqrt(g*h)
const float am = min(min(um - cm, up - cp), 0.0f); // largest negative wave speed
const float ap = max(max(um + cm, up + cp), 0.0f); // largest positive wave speed
return ((ap * Fm - am * Fp) + ap * am * (Qp - Qm)) / (ap - am);
}
/**
* Godunovs centered scheme (Toro 2001, p 165)
*/
__device__ float3 GodC_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
__device__ inline float3 GodC_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_,
const float dt_) {
const float3 F_l = F_func(Q_l, g_);
const float3 F_r = F_func(Q_r, g_);
const float3 Q_godc = 0.5f*(Q_l + Q_r) + (dt_/dx_)*(F_l - F_r);
const float3 Q_godc = 0.5f * (Q_l + Q_r) + (dt_ / dx_) * (F_l - F_r);
return F_func(Q_godc, g_);
}
/**
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 180)
*/
__device__ float3 HLL_flux(const float3 Q_l, const float3 Q_r, const float g_) {
__device__ inline float3 HLL_flux(const float3 Q_l, const float3 Q_r, const float g_) {
const float h_l = Q_l.x;
const float h_r = Q_r.x;
// Calculate velocities
const float u_l = Q_l.y / h_l;
const float u_r = Q_r.y / h_r;
// Estimate the potential wave speeds
const float c_l = sqrt(g_*h_l);
const float c_r = sqrt(g_*h_r);
const float c_l = sqrt(g_ * h_l);
const float c_r = sqrt(g_ * h_r);
// Compute h in the "star region", h^dagger
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
const float h_dag = 0.5f * (h_l + h_r) - 0.25f * (u_r - u_l) * (h_l + h_r) / (c_l + c_r);
const float q_l_tmp = sqrt(0.5f * ((h_dag + h_l) * h_dag / (h_l * h_l)));
const float q_r_tmp = sqrt(0.5f * ((h_dag + h_r) * h_dag / (h_r * h_r)));
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
// Compute wave speed estimates
const float S_l = u_l - c_l*q_l;
const float S_r = u_r + c_r*q_r;
//Upwind selection
const float S_l = u_l - c_l * q_l;
const float S_r = u_r + c_r * q_r;
// Upwind selection
if (S_l >= 0.0f) {
return F_func(Q_l, g_);
}
else if (S_r <= 0.0f) {
} else if (S_r <= 0.0f) {
return F_func(Q_r, g_);
}
//Or estimate flux in the star region
// Or estimate flux in the star region
else {
const float3 F_l = F_func(Q_l, g_);
const float3 F_r = F_func(Q_r, g_);
const float3 flux = (S_r*F_l - S_l*F_r + S_r*S_l*(Q_r - Q_l)) / (S_r-S_l);
const float3 flux = (S_r * F_l - S_l * F_r + S_r * S_l * (Q_r - Q_l)) / (S_r - S_l);
return flux;
}
}
/**
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 181)
*/
__device__ float3 HLLC_flux(const float3 Q_l, const float3 Q_r, const float g_) {
__device__ inline float3 HLLC_flux(const float3 Q_l, const float3 Q_r, const float g_) {
const float h_l = Q_l.x;
const float h_r = Q_r.x;
// Calculate velocities
const float u_l = Q_l.y / h_l;
const float u_r = Q_r.y / h_r;
// Estimate the potential wave speeds
const float c_l = sqrt(g_*h_l);
const float c_r = sqrt(g_*h_r);
const float c_l = sqrt(g_ * h_l);
const float c_r = sqrt(g_ * h_r);
// Compute h in the "star region", h^dagger
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
const float h_dag = 0.5f * (h_l + h_r) - 0.25f * (u_r - u_l) * (h_l + h_r) / (c_l + c_r);
const float q_l_tmp = sqrt(0.5f * ((h_dag + h_l) * h_dag / (h_l * h_l)));
const float q_r_tmp = sqrt(0.5f * ((h_dag + h_r) * h_dag / (h_r * h_r)));
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
// Compute wave speed estimates
const float S_l = u_l - c_l*q_l;
const float S_r = u_r + c_r*q_r;
const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
const float S_l = u_l - c_l * q_l;
const float S_r = u_r + c_r * q_r;
const float S_star = (S_l * h_r * (u_r - S_r) - S_r * h_l * (u_l - S_l)) / (h_r * (u_r - S_r) - h_l * (u_l - S_l));
const float3 F_l = F_func(Q_l, g_);
const float3 F_r = F_func(Q_r, g_);
//Upwind selection
// Upwind selection
if (S_l >= 0.0f) {
return F_l;
}
else if (S_r <= 0.0f) {
} else if (S_r <= 0.0f) {
return F_r;
}
//Or estimate flux in the "left star" region
else if (S_l <= 0.0f && 0.0f <=S_star) {
// Or estimate flux in the "left star" region
else if (S_l <= 0.0f && 0.0f <= S_star) {
const float v_l = Q_l.z / h_l;
const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1, S_star, v_l);
const float3 flux = F_l + S_l*(Q_star_l - Q_l);
const float3 flux = F_l + S_l * (Q_star_l - Q_l);
return flux;
}
//Or estimate flux in the "righ star" region
else if (S_star <= 0.0f && 0.0f <=S_r) {
// Or estimate flux in the "righ star" region
else if (S_star <= 0.0f && 0.0f <= S_r) {
const float v_r = Q_r.z / h_r;
const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1, S_star, v_r);
const float3 flux = F_r + S_r*(Q_star_r - Q_r);
const float3 flux = F_r + S_r * (Q_star_r - Q_r);
return flux;
}
else {
} else {
return make_float3(-99999.9f, -99999.9f, -99999.9f); //Something wrong here
}
}
/**
* Lax-Friedrichs flux (Toro 2001, p 163)
*/
__device__ float3 LxF_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
__device__ inline float3 LxF_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_,
const float dt_) {
const float3 F_l = F_func(Q_l, g_);
const float3 F_r = F_func(Q_r, g_);
return 0.5f*(F_l + F_r) + (dx_/(2.0f*dt_))*(Q_l - Q_r);
return 0.5f * (F_l + F_r) + (dx_ / (2.0f * dt_)) * (Q_l - Q_r);
}
/**
* Lax-Friedrichs extended to 2D
*/
__device__ float3 LxF_2D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
__device__ inline float3 LxF_2D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_,
const float dt_) {
const float3 F_l = F_func(Q_l, g_);
const float3 F_r = F_func(Q_r, g_);
//Note numerical diffusion for 2D here (0.25)
return 0.5f*(F_l + F_r) + (dx_/(4.0f*dt_))*(Q_l - Q_r);
// Note numerical diffusion for 2D here (0.25)
return 0.5f * (F_l + F_r) + (dx_ / (4.0f * dt_)) * (Q_l - Q_r);
}
/**
* Richtmeyer / Two-step Lax-Wendroff flux (Toro 2001, p 164)
*/
__device__ float3 LxW2_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
__device__ inline float3 LxW2_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_,
const float dt_) {
const float3 F_l = F_func(Q_l, g_);
const float3 F_r = F_func(Q_r, g_);
const float3 Q_lw2 = 0.5f*(Q_l + Q_r) + (dt_/(2.0f*dx_))*(F_l - F_r);
const float3 Q_lw2 = 0.5f * (Q_l + Q_r) + (dt_ / (2.0f * dx_)) * (F_l - F_r);
return F_func(Q_lw2, g_);
}
/**
* First Ordered Centered (Toro 2001, p.163)
*/
__device__ float3 FORCE_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
__device__ inline float3 FORCE_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_,
const float dt_) {
const float3 F_lf = LxF_1D_flux(Q_l, Q_r, g_, dx_, dt_);
const float3 F_lw2 = LxW2_1D_flux(Q_l, Q_r, g_, dx_, dt_);
return 0.5f*(F_lf + F_lw2);
return 0.5f * (F_lf + F_lw2);
}
// TODO give better names for `Q_w` and `Q_h` in the template
// as it probably does not reflect well on the name
// as it probably does not reflect well on the name
template<int Q_w, int Q_h, int gc_x, int gc_y, int vars>
__device__ void writeCfl(float Q[vars][Q_h+2*gc_y][Q_w+2*gc_x],
float shmem[Q_h+2*gc_y][Q_w+2*gc_x],
const int nx_, const int ny_,
const float dx_, const float dy_, const float g_,
float* output_) {
//Index of thread within block
__device__ void writeCfl(float Q[vars][Q_h + 2 * gc_y][Q_w + 2 * gc_x],
float shmem[Q_h + 2 * gc_y][Q_w + 2 * gc_x],
const int nx_, const int ny_,
const float dx_, const float dy_, const float g_,
float *output_) {
// Index of thread within block
const int tx = threadIdx.x + gc_x;
const int ty = threadIdx.y + gc_y;
//Index of cell within domain
const int ti = blockDim.x*blockIdx.x + tx;
const int tj = blockDim.y*blockIdx.y + ty;
//Only internal cells
if (ti < nx_+gc_x && tj < ny_+gc_y) {
// Index of cell within domain
const int ti = blockDim.x * blockIdx.x + tx;
const int tj = blockDim.y * blockIdx.y + ty;
// Only internal cells
if (ti < nx_ + gc_x && tj < ny_ + gc_y) {
const float h = Q[0][ty][tx];
const float u = Q[1][ty][tx] / h;
const float v = Q[2][ty][tx] / h;
const float max_u = dx_ / (fabsf(u) + sqrtf(g_*h));
const float max_v = dy_ / (fabsf(v) + sqrtf(g_*h));
const float u = Q[1][ty][tx] / h;
const float v = Q[2][ty][tx] / h;
const float max_u = dx_ / (fabsf(u) + sqrtf(g_ * h));
const float max_v = dy_ / (fabsf(v) + sqrtf(g_ * h));
shmem[ty][tx] = fminf(max_u, max_v);
}
__syncthreads();
//One row of threads loop over all rows
if (ti < nx_+gc_x && tj < ny_+gc_y) {
// One row of threads loop over all rows
if (ti < nx_ + gc_x && tj < ny_ + gc_y) {
if (ty == gc_y) {
float min_val = shmem[ty][tx];
const int max_y = min(Q_h, ny_+gc_y - tj);
for (int j=gc_y; j<max_y+gc_y; j++) {
const int max_y = min(Q_h, ny_ + gc_y - tj);
for (int j = gc_y; j < max_y + gc_y; ++j) {
min_val = fminf(min_val, shmem[j][tx]);
}
shmem[ty][tx] = min_val;
}
}
__syncthreads();
//One thread loops over first row to find global max
// One thread loops over first row to find global max
if (tx == gc_x && ty == gc_y) {
float min_val = shmem[ty][tx];
const int max_x = min(Q_w, nx_+gc_x - ti);
for (int i=gc_x; i<max_x+gc_x; ++i) {
const int max_x = min(Q_w, nx_ + gc_x - ti);
for (int i = gc_x; i < max_x + gc_x; ++i) {
min_val = fminf(min_val, shmem[ty][i]);
}
const int idx = gridDim.x*blockIdx.y + blockIdx.x;
const unsigned int idx = gridDim.x * blockIdx.y + blockIdx.x;
output_[idx] = min_val;
}
}

View File

@ -23,85 +23,75 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#pragma once
#include <cfloat>
/**
* Float3 operators
*/
inline __device__ float3 operator*(const float a, const float3 b) {
return make_float3(a*b.x, a*b.y, a*b.z);
return make_float3(a * b.x, a * b.y, a * b.z);
}
inline __device__ float3 operator/(const float3 a, const float b) {
return make_float3(a.x/b, a.y/b, a.z/b);
return make_float3(a.x / b, a.y / b, a.z / b);
}
inline __device__ float3 operator-(const float3 a, const float3 b) {
return make_float3(a.x-b.x, a.y-b.y, a.z-b.z);
return make_float3(a.x - b.x, a.y - b.y, a.z - b.z);
}
inline __device__ float3 operator+(const float3 a, const float3 b) {
return make_float3(a.x+b.x, a.y+b.y, a.z+b.z);
return make_float3(a.x + b.x, a.y + b.y, a.z + b.z);
}
/**
* Float4 operators
* Float4 operators
*/
inline __device__ float4 operator*(const float a, const float4 b) {
return make_float4(a*b.x, a*b.y, a*b.z, a*b.w);
return make_float4(a * b.x, a * b.y, a * b.z, a * b.w);
}
inline __device__ float4 operator/(const float4 a, const float b) {
return make_float4(a.x/b, a.y/b, a.z/b, a.w/b);
return make_float4(a.x / b, a.y / b, a.z / b, a.w / b);
}
inline __device__ float4 operator-(const float4 a, const float4 b) {
return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);
return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
inline __device__ float4 operator+(const float4 a, const float4 b) {
return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
inline __device__ __host__ float clamp(const float f, const float a, const float b) {
return fmaxf(a, fminf(f, b));
}
inline __device__ __host__ int clamp(const int f, const int a, const int b) {
return (f < b) ? ( (f > a) ? f : a) : b;
return (f < b) ? ((f > a) ? f : a) : b;
}
__device__ float desingularize(float x_, float eps_) {
return copysign(1.0f, x_)*fmaxf(fabsf(x_), fminf(x_*x_/(2.0f*eps_)+0.5f*eps_, eps_));
__device__ inline float desingularize(const float x_, const float eps_) {
return copysign(1.0f, x_) * fmaxf(fabsf(x_), fminf(x_ * x_ / (2.0f * eps_) + 0.5f * eps_, eps_));
}
/**
* Returns the step stored in the leftmost 16 bits
* Returns the step stored in the leftmost 16 bits
* of the 32 bit step-order integer
*/
inline __device__ int getStep(int step_order_) {
inline __device__ int getStep(const int step_order_) {
return step_order_ >> 16;
}
/**
* Returns the order stored in the rightmost 16 bits
* Returns the order stored in the rightmost 16 bits
* of the 32 bit step-order integer
*/
inline __device__ int getOrder(int step_order_) {
inline __device__ int getOrder(const int step_order_) {
return step_order_ & 0x0000FFFF;
}
@ -113,45 +103,45 @@ enum BoundaryCondition {
Reflective = 3
};
inline __device__ BoundaryCondition getBCNorth(int bc_) {
inline __device__ BoundaryCondition getBCNorth(const int bc_) {
return static_cast<BoundaryCondition>((bc_ >> 24) & 0x0000000F);
}
inline __device__ BoundaryCondition getBCSouth(int bc_) {
inline __device__ BoundaryCondition getBCSouth(const int bc_) {
return static_cast<BoundaryCondition>((bc_ >> 16) & 0x0000000F);
}
inline __device__ BoundaryCondition getBCEast(int bc_) {
inline __device__ BoundaryCondition getBCEast(const int bc_) {
return static_cast<BoundaryCondition>((bc_ >> 8) & 0x0000000F);
}
inline __device__ BoundaryCondition getBCWest(int bc_) {
inline __device__ BoundaryCondition getBCWest(const int bc_) {
return static_cast<BoundaryCondition>((bc_ >> 0) & 0x0000000F);
}
// West boundary
template<int w, int h, int gc_x, int gc_y, int sign>
__device__ void bcWestReflective(float Q[h+2*gc_y][w+2*gc_x],
const int nx_, const int ny_) {
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
__device__ void bcWestReflective(float Q[h + 2 * gc_y][w + 2 * gc_x],
const int nx_, const int ny_) {
for (unsigned int j = threadIdx.y; j < h + 2 * gc_y; j += h) {
const int i = threadIdx.x + gc_x;
const int ti = blockDim.x*blockIdx.x + i;
const int ti = blockDim.x * blockIdx.x + i;
if (gc_x >= 1 && ti == gc_x) {
Q[j][i-1] = sign*Q[j][i];
Q[j][i - 1] = sign * Q[j][i];
}
if (gc_x >= 2 && ti == gc_x + 1) {
Q[j][i-3] = sign*Q[j][i];
Q[j][i - 3] = sign * Q[j][i];
}
if (gc_x >= 3 && ti == gc_x + 2) {
Q[j][i-5] = sign*Q[j][i];
Q[j][i - 5] = sign * Q[j][i];
}
if (gc_x >= 4 && ti == gc_x + 3) {
Q[j][i-7] = sign*Q[j][i];
Q[j][i - 7] = sign * Q[j][i];
}
if (gc_x >= 5 && ti == gc_x + 4) {
Q[j][i-9] = sign*Q[j][i];
Q[j][i - 9] = sign * Q[j][i];
}
}
}
@ -159,108 +149,104 @@ __device__ void bcWestReflective(float Q[h+2*gc_y][w+2*gc_x],
// East boundary
template<int w, int h, int gc_x, int gc_y, int sign>
__device__ void bcEastReflective(float Q[h+2*gc_y][w+2*gc_x],
const int nx_, const int ny_) {
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
__device__ void bcEastReflective(float Q[h + 2 * gc_y][w + 2 * gc_x],
const int nx_, const int ny_) {
for (unsigned int j = threadIdx.y; j < h + 2 * gc_y; j += h) {
const int i = threadIdx.x + gc_x;
const int ti = blockDim.x*blockIdx.x + i;
const int ti = blockDim.x * blockIdx.x + i;
if (gc_x >= 1 && ti == nx_ + gc_x - 1) {
Q[j][i+1] = sign*Q[j][i];
Q[j][i + 1] = sign * Q[j][i];
}
if (gc_x >= 2 && ti == nx_ + gc_x - 2) {
Q[j][i+3] = sign*Q[j][i];
Q[j][i + 3] = sign * Q[j][i];
}
if (gc_x >= 3 && ti == nx_ + gc_x - 3) {
Q[j][i+5] = sign*Q[j][i];
Q[j][i + 5] = sign * Q[j][i];
}
if (gc_x >= 4 && ti == nx_ + gc_x - 4) {
Q[j][i+7] = sign*Q[j][i];
Q[j][i + 7] = sign * Q[j][i];
}
if (gc_x >= 5 && ti == nx_ + gc_x - 5) {
Q[j][i+9] = sign*Q[j][i];
Q[j][i + 9] = sign * Q[j][i];
}
}
}
// South boundary
template<int w, int h, int gc_x, int gc_y, int sign>
__device__ void bcSouthReflective(float Q[h+2*gc_y][w+2*gc_x],
const int nx_, const int ny_) {
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
__device__ void bcSouthReflective(float Q[h + 2 * gc_y][w + 2 * gc_x],
const int nx_, const int ny_) {
for (unsigned int i = threadIdx.x; i < w + 2 * gc_x; i += w) {
const int j = threadIdx.y + gc_y;
const int tj = blockDim.y*blockIdx.y + j;
const int tj = blockDim.y * blockIdx.y + j;
if (gc_y >= 1 && tj == gc_y) {
Q[j-1][i] = sign*Q[j][i];
Q[j - 1][i] = sign * Q[j][i];
}
if (gc_y >= 2 && tj == gc_y + 1) {
Q[j-3][i] = sign*Q[j][i];
Q[j - 3][i] = sign * Q[j][i];
}
if (gc_y >= 3 && tj == gc_y + 2) {
Q[j-5][i] = sign*Q[j][i];
Q[j - 5][i] = sign * Q[j][i];
}
if (gc_y >= 4 && tj == gc_y + 3) {
Q[j-7][i] = sign*Q[j][i];
Q[j - 7][i] = sign * Q[j][i];
}
if (gc_y >= 5 && tj == gc_y + 4) {
Q[j-9][i] = sign*Q[j][i];
Q[j - 9][i] = sign * Q[j][i];
}
}
}
// North boundary
template<int w, int h, int gc_x, int gc_y, int sign>
__device__ void bcNorthReflective(float Q[h+2*gc_y][w+2*gc_x], const int nx_, const int ny_) {
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
__device__ void bcNorthReflective(float Q[h + 2 * gc_y][w + 2 * gc_x], const int nx_, const int ny_) {
for (unsigned int i = threadIdx.x; i < w + 2 * gc_x; i += w) {
const int j = threadIdx.y + gc_y;
const int tj = blockDim.y*blockIdx.y + j;
const int tj = blockDim.y * blockIdx.y + j;
if (gc_y >= 1 && tj == ny_ + gc_y - 1) {
Q[j+1][i] = sign*Q[j][i];
Q[j + 1][i] = sign * Q[j][i];
}
if (gc_y >= 2 && tj == ny_ + gc_y - 2) {
Q[j+3][i] = sign*Q[j][i];
Q[j + 3][i] = sign * Q[j][i];
}
if (gc_y >= 3 && tj == ny_ + gc_y - 3) {
Q[j+5][i] = sign*Q[j][i];
Q[j + 5][i] = sign * Q[j][i];
}
if (gc_y >= 4 && tj == ny_ + gc_y - 4) {
Q[j+7][i] = sign*Q[j][i];
Q[j + 7][i] = sign * Q[j][i];
}
if (gc_y >= 5 && tj == ny_ + gc_y - 5) {
Q[j+9][i] = sign*Q[j][i];
Q[j + 9][i] = sign * Q[j][i];
}
}
}
/**
* Alter the index l so that it gives periodic boundary conditions when reading
*/
template<int gc_x>
inline __device__ int handlePeriodicBoundaryX(int k, int nx_, int boundary_conditions_) {
const int gc_pad = gc_x;
//West boundary: add an offset to read from east of domain
// West boundary: add an offset to read from east of domain
if (gc_x > 0) {
if ((k < gc_pad)
&& getBCWest(boundary_conditions_) == Periodic) {
k += (nx_+2*gc_x - 2*gc_pad);
if ((k < gc_pad)
&& getBCWest(boundary_conditions_) == Periodic) {
k += (nx_ + 2 * gc_x - 2 * gc_pad);
}
//East boundary: subtract an offset to read from west of domain
else if ((k >= nx_+2*gc_x-gc_pad)
&& getBCEast(boundary_conditions_) == Periodic) {
k -= (nx_+2*gc_x - 2*gc_pad);
// East boundary: subtract an offset to read from west of domain
else if ((k >= nx_ + 2 * gc_x - gc_pad)
&& getBCEast(boundary_conditions_) == Periodic) {
k -= (nx_ + 2 * gc_x - 2 * gc_pad);
}
}
return k;
}
@ -270,32 +256,31 @@ inline __device__ int handlePeriodicBoundaryX(int k, int nx_, int boundary_condi
template<int gc_y>
inline __device__ int handlePeriodicBoundaryY(int l, int ny_, int boundary_conditions_) {
const int gc_pad = gc_y;
//South boundary: add an offset to read from north of domain
// South boundary: add an offset to read from north of domain
if (gc_y > 0) {
if ((l < gc_pad)
&& getBCSouth(boundary_conditions_) == Periodic) {
l += (ny_+2*gc_y - 2*gc_pad);
if ((l < gc_pad)
&& getBCSouth(boundary_conditions_) == Periodic) {
l += (ny_ + 2 * gc_y - 2 * gc_pad);
}
//North boundary: subtract an offset to read from south of domain
else if ((l >= ny_+2*gc_y-gc_pad)
&& getBCNorth(boundary_conditions_) == Periodic) {
l -= (ny_+2*gc_y - 2*gc_pad);
// North boundary: subtract an offset to read from south of domain
else if ((l >= ny_ + 2 * gc_y - gc_pad)
&& getBCNorth(boundary_conditions_) == Periodic) {
l -= (ny_ + 2 * gc_y - 2 * gc_pad);
}
}
return l;
}
template<int w, int h, int gc_x, int gc_y, int sign_x, int sign_y>
inline __device__
inline __device__
void handleReflectiveBoundary(
float Q[h+2*gc_y][w+2*gc_x],
const int nx_, const int ny_,
const int boundary_conditions_) {
//Handle reflective boundary conditions
float Q[h + 2 * gc_y][w + 2 * gc_x],
const int nx_, const int ny_,
const int boundary_conditions_) {
// Handle reflective boundary conditions
if (getBCNorth(boundary_conditions_) == Reflective) {
bcNorthReflective<w, h, gc_x, gc_y, sign_y>(Q, nx_, ny_);
__syncthreads();
@ -318,73 +303,69 @@ void handleReflectiveBoundary(
* Reads a block of data with ghost cells
*/
template<int w, int h, int gc_x, int gc_y, int sign_x, int sign_y>
inline __device__ void readBlock(float* ptr_, int pitch_,
float Q[h+2*gc_y][w+2*gc_x],
const int nx_, const int ny_,
const int boundary_conditions_,
int x0, int y0,
int x1, int y1) {
//Index of block within domain
const int bx = blockDim.x * blockIdx.x;
const int by = blockDim.y * blockIdx.y;
inline __device__ void readBlock(float *ptr_, int pitch_,
float Q[h + 2 * gc_y][w + 2 * gc_x],
const int nx_, const int ny_,
const int boundary_conditions_,
int x0, int y0,
int x1, int y1) {
// Index of block within domain
const unsigned int bx = blockDim.x * blockIdx.x;
const unsigned int by = blockDim.y * blockIdx.y;
//Read into shared memory
//Loop over all variables
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
//Handle periodic boundary conditions here
// Read into shared memory
// Loop over all variables
for (unsigned int j = threadIdx.y; j < h + 2 * gc_y; j += h) {
// Handle periodic boundary conditions here
int l = handlePeriodicBoundaryY<gc_y>(by + j + y0, ny_, boundary_conditions_);
l = min(l, min(ny_+2*gc_y-1, y1+2*gc_y-1));
float* row = (float*) ((char*) ptr_ + pitch_*l);
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
//Handle periodic boundary conditions here
l = min(l, min(ny_ + 2 * gc_y - 1, y1 + 2 * gc_y - 1));
auto row = reinterpret_cast<float *>(reinterpret_cast<char *>(ptr_) + pitch_ * l);
for (unsigned int i = threadIdx.x; i < w + 2 * gc_x; i += w) {
// Handle periodic boundary conditions here
int k = handlePeriodicBoundaryX<gc_x>(bx + i + x0, nx_, boundary_conditions_);
k = min(k, min(nx_+2*gc_x-1, x1+2*gc_x-1));
//Read from global memory
k = min(k, min(nx_ + 2 * gc_x - 1, x1 + 2 * gc_x - 1));
// Read from global memory
Q[j][i] = row[k];
}
}
__syncthreads();
handleReflectiveBoundary<w, h, gc_x, gc_y, sign_x, sign_y>(Q, nx_, ny_, boundary_conditions_);
}
/**
* Writes a block of data to global memory for the shallow water equations.
*/
template<int w, int h, int gc_x, int gc_y>
inline __device__ void writeBlock(float* ptr_, int pitch_,
float shmem[h+2*gc_y][w+2*gc_x],
const int nx_, const int ny_,
int rk_step_, int rk_order_,
int x0, int y0,
int x1, int y1) {
//Index of cell within domain
const int ti = blockDim.x*blockIdx.x + threadIdx.x + gc_x + x0;
const int tj = blockDim.y*blockIdx.y + threadIdx.y + gc_y + y0;
inline __device__ void writeBlock(float *ptr_, const int pitch_,
float shmem[h + 2 * gc_y][w + 2 * gc_x],
const int nx_, const int ny_,
const int rk_step_, const int rk_order_,
const int x0, const int y0,
const int x1, const int y1) {
// Index of cell within domain
const int ti = blockDim.x * blockIdx.x + threadIdx.x + gc_x + x0;
const int tj = blockDim.y * blockIdx.y + threadIdx.y + gc_y + y0;
//In case we are writing only to a subarea given by (x0, y0) x (x1, y1)
const int max_ti = min(nx_+gc_x, x1+gc_x);
const int max_tj = min(ny_+gc_y, y1+gc_y);
//Only write internal cells
if ((x0+gc_x <= ti) && (ti < max_ti) && (y0+gc_y <= tj) && (tj < max_tj)) {
//Index of thread within block
// In case we are writing only to a subarea given by (x0, y0) x (x1, y1)
const int max_ti = min(nx_ + gc_x, x1 + gc_x);
const int max_tj = min(ny_ + gc_y, y1 + gc_y);
// Only write internal cells
if ((x0 + gc_x <= ti) && (ti < max_ti) && (y0 + gc_y <= tj) && (tj < max_tj)) {
// Index of thread within block
const int tx = threadIdx.x + gc_x;
const int ty = threadIdx.y + gc_y;
float* const row = (float*) ((char*) ptr_ + pitch_*tj);
//Handle runge-kutta timestepping here
const auto row = reinterpret_cast<float *>(reinterpret_cast<char *>(ptr_) + pitch_ * tj);
// Handle runge-kutta timestepping here
row[ti] = shmem[ty][tx];
/**
* SSPRK1 (forward Euler)
* u^1 = u^n + dt*f(u^n)
@ -400,9 +381,8 @@ inline __device__ void writeBlock(float* ptr_, int pitch_,
else if (rk_order_ == 2) {
if (rk_step_ == 0) {
row[ti] = shmem[ty][tx];
}
else if (rk_step_ == 1) {
row[ti] = 0.5f*row[ti] + 0.5f*shmem[ty][tx];
} else if (rk_step_ == 1) {
row[ti] = 0.5f * row[ti] + 0.5f * shmem[ty][tx];
}
}
/**
@ -415,143 +395,112 @@ inline __device__ void writeBlock(float* ptr_, int pitch_,
else if (rk_order_ == 3) {
if (rk_step_ == 0) {
row[ti] = shmem[ty][tx];
}
else if (rk_step_ == 1) {
row[ti] = 0.75f*row[ti] + 0.25f*shmem[ty][tx];
}
else if (rk_step_ == 2) {
const float t = 1.0f / 3.0f; //Not representable in base 2
row[ti] = t*row[ti] + (1.0f-t)*shmem[ty][tx];
} else if (rk_step_ == 1) {
row[ti] = 0.75f * row[ti] + 0.25f * shmem[ty][tx];
} else if (rk_step_ == 2) {
constexpr float t = 1.0f / 3.0f; // Not representable in base 2
row[ti] = t * row[ti] + (1.0f - t) * shmem[ty][tx];
}
}
// DEBUG
//row[ti] = 99.0;
// row[ti] = 99.0;
}
}
template<int w, int h, int gc_x, int gc_y, int vars>
__device__ void evolveF(float Q[vars][h+2*gc_y][w+2*gc_x],
float F[vars][h+2*gc_y][w+2*gc_x],
const float dx_, const float dt_) {
for (int var=0; var < vars; ++var) {
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
for (int i=threadIdx.x+gc_x; i<w+gc_x; i+=w) {
Q[var][j][i] = Q[var][j][i] + (F[var][j][i-1] - F[var][j][i]) * dt_ / dx_;
__device__ void evolveF(float Q[vars][h + 2 * gc_y][w + 2 * gc_x],
float F[vars][h + 2 * gc_y][w + 2 * gc_x],
const float dx_, const float dt_) {
for (int var = 0; var < vars; ++var) {
for (unsigned int j = threadIdx.y; j < h + 2 * gc_y; j += h) {
for (int i = threadIdx.x + gc_x; i < w + gc_x; i += w) {
Q[var][j][i] = Q[var][j][i] + (F[var][j][i - 1] - F[var][j][i]) * dt_ / dx_;
}
}
}
}
/**
* Evolves the solution in time along the y axis (dimensional splitting)
*/
template<int w, int h, int gc_x, int gc_y, int vars>
__device__ void evolveG(float Q[vars][h+2*gc_y][w+2*gc_x],
float G[vars][h+2*gc_y][w+2*gc_x],
const float dy_, const float dt_) {
for (int var=0; var < vars; ++var) {
for (int j=threadIdx.y+gc_y; j<h+gc_y; j+=h) {
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
Q[var][j][i] = Q[var][j][i] + (G[var][j-1][i] - G[var][j][i]) * dt_ / dy_;
__device__ void evolveG(float Q[vars][h + 2 * gc_y][w + 2 * gc_x],
float G[vars][h + 2 * gc_y][w + 2 * gc_x],
const float dy_, const float dt_) {
for (int var = 0; var < vars; ++var) {
for (int j = threadIdx.y + gc_y; j < h + gc_y; j += h) {
for (unsigned int i = threadIdx.x; i < w + 2 * gc_x; i += w) {
Q[var][j][i] = Q[var][j][i] + (G[var][j - 1][i] - G[var][j][i]) * dt_ / dy_;
}
}
}
}
/**
* Helper function for debugging etc.
*/
template<int shmem_width, int shmem_height, int vars>
__device__ void memset(float Q[vars][shmem_height][shmem_width], float value) {
for (int k=0; k<vars; ++k) {
for (int j=threadIdx.y; j<shmem_height; ++j) {
for (int i=threadIdx.x; i<shmem_width; ++i) {
for (int k = 0; k < vars; ++k) {
for (unsigned int j = threadIdx.y; j < shmem_height; ++j) {
for (unsigned int i = threadIdx.x; i < shmem_width; ++i) {
Q[k][j][i] = value;
}
}
}
}
template <unsigned int threads>
__device__ void reduce_max(float* data, unsigned int n) {
__shared__ float sdata[threads];
unsigned int tid = threadIdx.x;
//Reduce to "threads" elements
sdata[tid] = FLT_MIN;
for (unsigned int i=tid; i<n; i += threads) {
sdata[tid] = max(sdata[tid], dt_ctx.L[i]);
}
__syncthreads();
//Now, reduce all elements into a single element
if (threads >= 512) {
if (tid < 256) {
sdata[tid] = max(sdata[tid], sdata[tid + 256]);
}
__syncthreads();
}
if (threads >= 256) {
if (tid < 128) {
sdata[tid] = max(sdata[tid], sdata[tid + 128]);
}
__syncthreads();
}
if (threads >= 128) {
if (tid < 64) {
sdata[tid] = max(sdata[tid], sdata[tid + 64]);
}
__syncthreads();
}
if (tid < 32) {
volatile float* sdata_volatile = sdata;
if (threads >= 64) {
sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 32]);
}
if (tid < 16) {
if (threads >= 32) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 16]);
if (threads >= 16) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 8]);
if (threads >= 8) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 4]);
if (threads >= 4) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 2]);
if (threads >= 2) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 1]);
}
if (tid == 0) {
return sdata_volatile[0];
}
}
}
template<unsigned int threads>
__device__ float reduce_max(float *data, unsigned int n) {
__shared__ float sdata[threads];
unsigned int tid = threadIdx.x;
// Reduce to "threads" elements
sdata[tid] = FLT_MIN;
for (unsigned int i = tid; i < n; i += threads) {
sdata[tid] = max(sdata[tid], dt_ctx.L[i]);
}
__syncthreads();
// Now, reduce all elements into a single element
if (threads >= 512) {
if (tid < 256) {
sdata[tid] = max(sdata[tid], sdata[tid + 256]);
}
__syncthreads();
}
if (threads >= 256) {
if (tid < 128) {
sdata[tid] = max(sdata[tid], sdata[tid + 128]);
}
__syncthreads();
}
if (threads >= 128) {
if (tid < 64) {
sdata[tid] = max(sdata[tid], sdata[tid + 64]);
}
__syncthreads();
}
if (tid < 32) {
volatile float *sdata_volatile = sdata;
if (threads >= 64) {
sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 32]);
}
if (tid < 16) {
if (threads >= 32) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 16]);
if (threads >= 16) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 8]);
if (threads >= 8) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 4]);
if (threads >= 4) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 2]);
if (threads >= 2) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 1]);
}
if (tid == 0) {
return sdata_volatile[0];
}
}
}

View File

@ -20,41 +20,35 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#pragma once
/**
* Reconstructs a slope using the generalized minmod limiter based on three
* Reconstructs a slope using the generalized minmod limiter based on three
* consecutive values
*/
__device__ __inline__ float minmodSlope(float left, float center, float right, float theta) {
__device__ __inline__ float minmodSlope(const float left, const float center, const float right, const float theta) {
const float backward = (center - left) * theta;
const float central = (right - left) * 0.5f;
const float forward = (right - center) * theta;
return 0.25f
*copysign(1.0f, backward)
*(copysign(1.0f, backward) + copysign(1.0f, central))
*(copysign(1.0f, central) + copysign(1.0f, forward))
*min( min(fabs(backward), fabs(central)), fabs(forward) );
return 0.25f
* copysign(1.0f, backward)
* (copysign(1.0f, backward) + copysign(1.0f, central))
* (copysign(1.0f, central) + copysign(1.0f, forward))
* min(min(fabs(backward), fabs(central)), fabs(forward));
}
/**
* Reconstructs a minmod slope for a whole block along the abscissa
*/
template<int w, int h, int gc_x, int gc_y, int vars>
__device__ void minmodSlopeX(float Q[vars][h+2*gc_y][w+2*gc_x],
float Qx[vars][h+2*gc_y][w+2*gc_x],
const float theta_) {
//Reconstruct slopes along x axis
for (int p=0; p<vars; ++p) {
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
for (int i=threadIdx.x+1; i<w+2*gc_x-1; i+=w) {
Qx[p][j][i] = minmodSlope(Q[p][j][i-1], Q[p][j][i], Q[p][j][i+1], theta_);
__device__ void minmodSlopeX(float Q[vars][h + 2 * gc_y][w + 2 * gc_x],
float Qx[vars][h + 2 * gc_y][w + 2 * gc_x],
const float theta_) {
// Reconstruct slopes along x-axis
for (int p = 0; p < vars; ++p) {
for (unsigned int j = threadIdx.y; j < h + 2 * gc_y; j += h) {
for (unsigned int i = threadIdx.x + 1; i < w + 2 * gc_x - 1; i += w) {
Qx[p][j][i] = minmodSlope(Q[p][j][i - 1], Q[p][j][i], Q[p][j][i + 1], theta_);
}
}
}
@ -65,54 +59,52 @@ __device__ void minmodSlopeX(float Q[vars][h+2*gc_y][w+2*gc_x],
* Reconstructs a minmod slope for a whole block along the ordinate
*/
template<int w, int h, int gc_x, int gc_y, int vars>
__device__ void minmodSlopeY(float Q[vars][h+2*gc_y][w+2*gc_x],
float Qy[vars][h+2*gc_y][w+2*gc_x],
const float theta_) {
//Reconstruct slopes along y axis
for (int p=0; p<vars; ++p) {
for (int j=threadIdx.y+1; j<h+2*gc_y-1; j+=h) {
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
Qy[p][j][i] = minmodSlope(Q[p][j-1][i], Q[p][j][i], Q[p][j+1][i], theta_);
__device__ void minmodSlopeY(float Q[vars][h + 2 * gc_y][w + 2 * gc_x],
float Qy[vars][h + 2 * gc_y][w + 2 * gc_x],
const float theta_) {
// Reconstruct slopes along y-axis
for (int p = 0; p < vars; ++p) {
for (unsigned int j = threadIdx.y + 1; j < h + 2 * gc_y - 1; j += h) {
for (unsigned int i = threadIdx.x; i < w + 2 * gc_x; i += w) {
Qy[p][j][i] = minmodSlope(Q[p][j - 1][i], Q[p][j][i], Q[p][j + 1][i], theta_);
}
}
}
}
__device__ float monotonized_central(float r_) {
return fmaxf(0.0f, fminf(2.0f, fminf(2.0f*r_, 0.5f*(1.0f+r_))));
__device__ inline float monotonized_central(const float r_) {
return fmaxf(0.0f, fminf(2.0f, fminf(2.0f * r_, 0.5f * (1.0f + r_))));
}
__device__ float osher(float r_, float beta_) {
__device__ inline float osher(const float r_, const float beta_) {
return fmaxf(0.0f, fminf(beta_, r_));
}
__device__ float sweby(float r_, float beta_) {
return fmaxf(0.0f, fmaxf(fminf(r_, beta_), fminf(beta_*r_, 1.0f)));
__device__ inline float sweby(const float r_, const float beta_) {
return fmaxf(0.0f, fmaxf(fminf(r_, beta_), fminf(beta_ * r_, 1.0f)));
}
__device__ float minmod(float r_) {
__device__ inline float minmod(const float r_) {
return fmaxf(0.0f, fminf(1.0f, r_));
}
__device__ float generalized_minmod(float r_, float theta_) {
return fmaxf(0.0f, fminf(theta_*r_, fminf( (1.0f + r_) / 2.0f, theta_)));
__device__ inline float generalized_minmod(const float r_, const float theta_) {
return fmaxf(0.0f, fminf(theta_ * r_, fminf((1.0f + r_) / 2.0f, theta_)));
}
__device__ float superbee(float r_) {
return fmaxf(0.0f, fmaxf(fminf(2.0f*r_, 1.0f), fminf(r_, 2.0f)));
__device__ inline float superbee(const float r_) {
return fmaxf(0.0f, fmaxf(fminf(2.0f * r_, 1.0f), fminf(r_, 2.0f)));
}
__device__ float vanAlbada1(float r_) {
return (r_*r_ + r_) / (r_*r_ + 1.0f);
__device__ inline float vanAlbada1(const float r_) {
return (r_ * r_ + r_) / (r_ * r_ + 1.0f);
}
__device__ float vanAlbada2(float r_) {
return 2.0f*r_ / (r_*r_* + 1.0f);
__device__ inline float vanAlbada2(const float r_) {
return 2.0f * r_ / (r_ * r_ * +1.0f);
}
__device__ float vanLeer(float r_) {
__device__ inline float vanLeer(const float r_) {
return (r_ + fabsf(r_)) / (1.0f + fabsf(r_));
}
}

View File

@ -1,4 +1,4 @@
/*
/*
This kernel implements the Central Upwind flux function to
solve the Euler equations
@ -19,166 +19,164 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "common.h"
#include "EulerCommon.h"
#include "limiters.h"
__device__
void computeFluxF(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float Qx[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float F[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
void computeFluxF(float Q[4][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float Qx[4][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float F[4][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
const float gamma_, const float dx_, const float dt_) {
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
for (unsigned int j = threadIdx.y; j < BLOCK_HEIGHT + 4; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x + 1; i < BLOCK_WIDTH + 2; i += BLOCK_WIDTH) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
const float4 Q_rl = make_float4(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1],
Q[3][j][i+1] - 0.5f*Qx[3][j][i+1]);
const float4 Q_rr = make_float4(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1],
Q[3][j][i+1] + 0.5f*Qx[3][j][i+1]);
const float4 Q_rl = make_float4(Q[0][j][i + 1] - 0.5f * Qx[0][j][i + 1],
Q[1][j][i + 1] - 0.5f * Qx[1][j][i + 1],
Q[2][j][i + 1] - 0.5f * Qx[2][j][i + 1],
Q[3][j][i + 1] - 0.5f * Qx[3][j][i + 1]);
const float4 Q_rr = make_float4(Q[0][j][i + 1] + 0.5f * Qx[0][j][i + 1],
Q[1][j][i + 1] + 0.5f * Qx[1][j][i + 1],
Q[2][j][i + 1] + 0.5f * Qx[2][j][i + 1],
Q[3][j][i + 1] + 0.5f * Qx[3][j][i + 1]);
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f*Qx[0][j][i],
Q[1][j][i] - 0.5f*Qx[1][j][i],
Q[2][j][i] - 0.5f*Qx[2][j][i],
Q[3][j][i] - 0.5f*Qx[3][j][i]);
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f*Qx[0][j][i],
Q[1][j][i] + 0.5f*Qx[1][j][i],
Q[2][j][i] + 0.5f*Qx[2][j][i],
Q[3][j][i] + 0.5f*Qx[3][j][i]);
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f * Qx[0][j][i],
Q[1][j][i] - 0.5f * Qx[1][j][i],
Q[2][j][i] - 0.5f * Qx[2][j][i],
Q[3][j][i] - 0.5f * Qx[3][j][i]);
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f * Qx[0][j][i],
Q[1][j][i] + 0.5f * Qx[1][j][i],
Q[2][j][i] + 0.5f * Qx[2][j][i],
Q[3][j][i] + 0.5f * Qx[3][j][i]);
//Evolve half a timestep (predictor step)
const float4 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
const float4 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
// Evolve half a timestep (predictor step)
const float4 Q_r_bar = Q_rl + dt_ / (2.0f * dx_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
const float4 Q_l_bar = Q_lr + dt_ / (2.0f * dx_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
// Compute flux based on prediction
//const float4 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
const float4 flux = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
//Write to shared memory
F[0][j][i] = flux.x;
F[1][j][i] = flux.y;
F[2][j][i] = flux.z;
F[3][j][i] = flux.w;
// const float4 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
const auto [x, y, z, w] = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
// Write to shared memory
F[0][j][i] = x;
F[1][j][i] = y;
F[2][j][i] = z;
F[3][j][i] = w;
}
}
}
__device__
void computeFluxG(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float Qy[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float G[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
void computeFluxG(float Q[4][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float Qy[4][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float G[4][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
const float gamma_, const float dy_, const float dt_) {
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
for (unsigned int j = threadIdx.y + 1; j < BLOCK_HEIGHT + 2; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x; i < BLOCK_WIDTH + 4; i += BLOCK_WIDTH) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
//NOte that hu and hv are swapped ("transposing" the domain)!
const float4 Q_rl = make_float4(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i],
Q[3][j+1][i] - 0.5f*Qy[3][j+1][i]);
const float4 Q_rr = make_float4(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i],
Q[3][j+1][i] + 0.5f*Qy[3][j+1][i]);
// Note that hu and hv are swapped ("transposing" the domain)!
const float4 Q_rl = make_float4(Q[0][j + 1][i] - 0.5f * Qy[0][j + 1][i],
Q[2][j + 1][i] - 0.5f * Qy[2][j + 1][i],
Q[1][j + 1][i] - 0.5f * Qy[1][j + 1][i],
Q[3][j + 1][i] - 0.5f * Qy[3][j + 1][i]);
const float4 Q_rr = make_float4(Q[0][j + 1][i] + 0.5f * Qy[0][j + 1][i],
Q[2][j + 1][i] + 0.5f * Qy[2][j + 1][i],
Q[1][j + 1][i] + 0.5f * Qy[1][j + 1][i],
Q[3][j + 1][i] + 0.5f * Qy[3][j + 1][i]);
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f*Qy[0][j][i],
Q[2][j][i] - 0.5f*Qy[2][j][i],
Q[1][j][i] - 0.5f*Qy[1][j][i],
Q[3][j][i] - 0.5f*Qy[3][j][i]);
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f*Qy[0][j][i],
Q[2][j][i] + 0.5f*Qy[2][j][i],
Q[1][j][i] + 0.5f*Qy[1][j][i],
Q[3][j][i] + 0.5f*Qy[3][j][i]);
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f * Qy[0][j][i],
Q[2][j][i] - 0.5f * Qy[2][j][i],
Q[1][j][i] - 0.5f * Qy[1][j][i],
Q[3][j][i] - 0.5f * Qy[3][j][i]);
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f * Qy[0][j][i],
Q[2][j][i] + 0.5f * Qy[2][j][i],
Q[1][j][i] + 0.5f * Qy[1][j][i],
Q[3][j][i] + 0.5f * Qy[3][j][i]);
// Evolve half a timestep (predictor step)
const float4 Q_r_bar = Q_rl + dt_ / (2.0f * dy_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
const float4 Q_l_bar = Q_lr + dt_ / (2.0f * dy_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
//Evolve half a timestep (predictor step)
const float4 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
const float4 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
// Compute flux based on prediction
const float4 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
//const float4 flux = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
//Write to shared memory
//Note that we here swap hu and hv back to the original
G[0][j][i] = flux.x;
G[1][j][i] = flux.z;
G[2][j][i] = flux.y;
G[3][j][i] = flux.w;
const auto [x, y, z, w] = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
// const float4 flux = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
// Write to shared memory
// Note that we here swap hu and hv back to the original
G[0][j][i] = x;
G[1][j][i] = z;
G[2][j][i] = y;
G[3][j][i] = w;
}
}
}
/**
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
*/
extern "C" {
__global__ void KP07DimsplitKernel(
int nx_, int ny_,
float dx_, float dy_, float dt_,
float g_,
float gamma_,
float theta_,
int step_,
int boundary_conditions_,
//Input h^n
float* rho0_ptr_, int rho0_pitch_,
float* rho_u0_ptr_, int rho_u0_pitch_,
float* rho_v0_ptr_, int rho_v0_pitch_,
float* E0_ptr_, int E0_pitch_,
//Output h^{n+1}
float* rho1_ptr_, int rho1_pitch_,
float* rho_u1_ptr_, int rho_u1_pitch_,
float* rho_v1_ptr_, int rho_v1_pitch_,
float* E1_ptr_, int E1_pitch_,
//Output CFL
float* cfl_,
const int nx_, const int ny_,
const float dx_, const float dy_, const float dt_,
const float g_,
const float gamma_,
//Subarea of internal domain to compute
int x0=0, int y0=0,
int x1=0, int y1=0) {
const float theta_,
if(x1 == 0)
const int step_,
const int boundary_conditions_,
// Input h^n
float *rho0_ptr_, const int rho0_pitch_,
float *rho_u0_ptr_, const int rho_u0_pitch_,
float *rho_v0_ptr_, const int rho_v0_pitch_,
float *E0_ptr_, const int E0_pitch_,
// Output h^{n+1}
float *rho1_ptr_, const int rho1_pitch_,
float *rho_u1_ptr_, const int rho_u1_pitch_,
float *rho_v1_ptr_, const int rho_v1_pitch_,
float *E1_ptr_, const int E1_pitch_,
// Output CFL
float *cfl_,
// Subarea of internal domain to compute
const int x0 = 0, const int y0 = 0,
int x1 = 0, int y1 = 0) {
if (x1 == 0)
x1 = nx_;
if(y1 == 0)
if (y1 == 0)
y1 = ny_;
const unsigned int w = BLOCK_WIDTH;
const unsigned int h = BLOCK_HEIGHT;
const unsigned int gc_x = 2;
const unsigned int gc_y = 2;
const unsigned int vars = 4;
//Shared memory variables
__shared__ float Q[4][h+2*gc_y][w+2*gc_x];
__shared__ float Qx[4][h+2*gc_y][w+2*gc_x];
__shared__ float F[4][h+2*gc_y][w+2*gc_x];
//Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>( rho0_ptr_, rho0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(rho_u0_ptr_, rho_u0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(rho_v0_ptr_, rho_v0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, 1>( E0_ptr_, E0_pitch_, Q[3], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
//Step 0 => evolve x first, then y
constexpr unsigned int w = BLOCK_WIDTH;
constexpr unsigned int h = BLOCK_HEIGHT;
constexpr unsigned int gc_x = 2;
constexpr unsigned int gc_y = 2;
constexpr unsigned int vars = 4;
// Shared memory variables
__shared__ float Q[4][h + 2 * gc_y][w + 2 * gc_x];
__shared__ float Qx[4][h + 2 * gc_y][w + 2 * gc_x];
__shared__ float F[4][h + 2 * gc_y][w + 2 * gc_x];
// Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>(rho0_ptr_, rho0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(rho_u0_ptr_, rho_u0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1,
y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(rho_v0_ptr_, rho_v0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1,
y1);
readBlock<w, h, gc_x, gc_y, 1, 1>(E0_ptr_, E0_pitch_, Q[3], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
// Step 0 => evolve x first, then y
if (step_ == 0) {
//Compute fluxes along the x axis and evolve
// Compute fluxes along the x-axis and evolve
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
@ -186,65 +184,63 @@ __global__ void KP07DimsplitKernel(
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
__syncthreads();
//Compute fluxes along the y axis and evolve
// Compute fluxes along the y-axis and evolve
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
__syncthreads();
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
//Gravity source term
__syncthreads();
// Gravity source term
if (g_ > 0.0f) {
const int i = threadIdx.x + gc_x;
const int j = threadIdx.y + gc_y;
const unsigned int i = threadIdx.x + gc_x;
const unsigned int j = threadIdx.y + gc_y;
const float rho_v = Q[2][j][i];
Q[2][j][i] -= g_*Q[0][j][i]*dt_;
Q[3][j][i] -= g_*rho_v*dt_;
Q[2][j][i] -= g_ * Q[0][j][i] * dt_;
Q[3][j][i] -= g_ * rho_v * dt_;
__syncthreads();
}
}
//Step 1 => evolve y first, then x
// Step 1 => evolve y first, then x
else {
//Compute fluxes along the y axis and evolve
// Compute fluxes along the y-axis and evolve
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
__syncthreads();
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
//Compute fluxes along the x axis and evolve
// Compute fluxes along the x-axis and evolve
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
__syncthreads();
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
__syncthreads();
//Gravity source term
// Gravity source term
if (g_ > 0.0f) {
const int i = threadIdx.x + gc_x;
const int j = threadIdx.y + gc_y;
const unsigned int i = threadIdx.x + gc_x;
const unsigned int j = threadIdx.y + gc_y;
const float rho_v = Q[2][j][i];
Q[2][j][i] -= g_*Q[0][j][i]*dt_;
Q[3][j][i] -= g_*rho_v*dt_;
Q[2][j][i] -= g_ * Q[0][j][i] * dt_;
Q[3][j][i] -= g_ * rho_v * dt_;
__syncthreads();
}
}
// Write to main memory for all internal cells
writeBlock<w, h, gc_x, gc_y>( rho1_ptr_, rho1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(rho1_ptr_, rho1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(rho_u1_ptr_, rho_u1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(rho_v1_ptr_, rho_v1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>( E1_ptr_, E1_pitch_, Q[3], nx_, ny_, 0, 1, x0, y0, x1, y1);
//Compute the CFL for this block
if (cfl_ != NULL) {
writeBlock<w, h, gc_x, gc_y>(E1_ptr_, E1_pitch_, Q[3], nx_, ny_, 0, 1, x0, y0, x1, y1);
// Compute the CFL for this block
if (cfl_ != nullptr) {
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, gamma_, cfl_);
}
}
} // extern "C"
} // extern "C"

View File

@ -24,28 +24,28 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
/**
* Computes the flux along the x axis for all faces
* Computes the flux along the x-axis for all faces
*/
__device__
void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
__device__
void computeFluxF(float Q[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
float F[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
const float g_, const float dx_, const float dt_) {
//Compute fluxes along the x axis
for (int j=threadIdx.y; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
// Compute fluxes along the x-axis
for (unsigned int j = threadIdx.y; j < BLOCK_HEIGHT + 2; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x; i < BLOCK_WIDTH + 1; i += BLOCK_WIDTH) {
// Q at interface from the right and left
const float3 Qp = make_float3(Q[0][j][i+1],
Q[1][j][i+1],
Q[2][j][i+1]);
const float3 Qp = make_float3(Q[0][j][i + 1],
Q[1][j][i + 1],
Q[2][j][i + 1]);
const float3 Qm = make_float3(Q[0][j][i],
Q[1][j][i],
Q[2][j][i]);
// Computed flux
const float3 flux = FORCE_1D_flux(Qm, Qp, g_, dx_, dt_);
F[0][j][i] = flux.x;
F[1][j][i] = flux.y;
F[2][j][i] = flux.z;
const auto [x, y, z] = FORCE_1D_flux(Qm, Qp, g_, dx_, dt_);
F[0][j][i] = x;
F[1][j][i] = y;
F[2][j][i] = z;
}
}
}
@ -54,28 +54,28 @@ void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
/**
* Computes the flux along the y axis for all faces
*/
__device__
void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
float G[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
__device__
void computeFluxG(float Q[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
float G[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
const float g_, const float dy_, const float dt_) {
//Compute fluxes along the y axis
for (int j=threadIdx.y; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
// Compute fluxes along the y axis
for (unsigned int j = threadIdx.y; j < BLOCK_HEIGHT + 1; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x; i < BLOCK_WIDTH + 2; i += BLOCK_WIDTH) {
// Q at interface from the right and left
// Note that we swap hu and hv
const float3 Qp = make_float3(Q[0][j+1][i],
Q[2][j+1][i],
Q[1][j+1][i]);
const float3 Qp = make_float3(Q[0][j + 1][i],
Q[2][j + 1][i],
Q[1][j + 1][i]);
const float3 Qm = make_float3(Q[0][j][i],
Q[2][j][i],
Q[1][j][i]);
// Computed flux
// Note that we swap back
const float3 flux = FORCE_1D_flux(Qm, Qp, g_, dy_, dt_);
G[0][j][i] = flux.x;
G[1][j][i] = flux.z;
G[2][j][i] = flux.y;
const auto [x, y, z] = FORCE_1D_flux(Qm, Qp, g_, dy_, dt_);
G[0][j][i] = x;
G[1][j][i] = z;
G[2][j][i] = y;
}
}
}
@ -83,71 +83,69 @@ void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
extern "C" {
__global__ void FORCEKernel(
int nx_, int ny_,
float dx_, float dy_, float dt_,
float g_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
float* hv0_ptr_, int hv0_pitch_,
//Output h^{n+1}
float* h1_ptr_, int h1_pitch_,
float* hu1_ptr_, int hu1_pitch_,
float* hv1_ptr_, int hv1_pitch_,
//Output CFL
float* cfl_,
const int nx_, const int ny_,
const float dx_, const float dy_, const float dt_,
const float g_,
//Subarea of internal domain to compute
int x0=0, int y0=0,
int x1=0, int y1=0) {
const int boundary_conditions_,
if(x1 == 0)
// Input h^n
float *h0_ptr_, const int h0_pitch_,
float *hu0_ptr_, const int hu0_pitch_,
float *hv0_ptr_, const int hv0_pitch_,
// Output h^{n+1}
float *h1_ptr_, const int h1_pitch_,
float *hu1_ptr_, const int hu1_pitch_,
float *hv1_ptr_, const int hv1_pitch_,
// Output CFL
float *cfl_,
// Subarea of internal domain to compute
const int x0 = 0, const int y0 = 0,
int x1 = 0, int y1 = 0) {
if (x1 == 0)
x1 = nx_;
if(y1 == 0)
y1 = ny_;
if (y1 == 0)
y1 = ny_;
const unsigned int w = BLOCK_WIDTH;
const unsigned int h = BLOCK_HEIGHT;
const unsigned int gc_x = 1;
const unsigned int gc_y = 1;
const unsigned int vars = 3;
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
//Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
constexpr unsigned int w = BLOCK_WIDTH;
constexpr unsigned int h = BLOCK_HEIGHT;
constexpr unsigned int gc_x = 1;
constexpr unsigned int gc_y = 1;
constexpr unsigned int vars = 3;
__shared__ float Q[vars][h + 2 * gc_y][w + 2 * gc_x];
__shared__ float F[vars][h + 2 * gc_y][w + 2 * gc_x];
// Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>(h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
__syncthreads();
//Compute flux along x, and evolve
// Compute flux along x, and evolve
computeFluxF(Q, F, g_, dx_, dt_);
__syncthreads();
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
__syncthreads();
//Compute flux along y, and evolve
// Compute flux along y, and evolve
computeFluxG(Q, F, g_, dy_, dt_);
__syncthreads();
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
//Write to main memory
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
// Write to main memory
writeBlock<w, h, gc_x, gc_y>(h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
//Compute the CFL for this block
if (cfl_ != NULL) {
// Compute the CFL for this block
if (cfl_ != nullptr) {
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
}
}
} // extern "C"
} // extern "C"

View File

@ -18,154 +18,134 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "common.h"
#include "SWECommon.h"
/**
* Computes the flux along the x axis for all faces
* Computes the flux along the x-axis for all faces
*/
__device__
void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
void computeFluxF(float Q[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
float F[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
const float g_) {
for (int j=threadIdx.y; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
for (unsigned int j = threadIdx.y; j < BLOCK_HEIGHT + 2; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x; i < BLOCK_WIDTH + 1; i += BLOCK_WIDTH) {
// Q at interface from the right and left
const float3 Q_r = make_float3(Q[0][j][i+1],
Q[1][j][i+1],
Q[2][j][i+1]);
const float3 Q_r = make_float3(Q[0][j][i + 1],
Q[1][j][i + 1],
Q[2][j][i + 1]);
const float3 Q_l = make_float3(Q[0][j][i],
Q[1][j][i],
Q[2][j][i]);
// Computed flux
const float3 flux = HLL_flux(Q_l, Q_r, g_);
F[0][j][i] = flux.x;
F[1][j][i] = flux.y;
F[2][j][i] = flux.z;
const auto [x, y, z] = HLL_flux(Q_l, Q_r, g_);
F[0][j][i] = x;
F[1][j][i] = y;
F[2][j][i] = z;
}
}
}
/**
* Computes the flux along the y axis for all faces
* Computes the flux along the y-axis for all faces
*/
__device__
void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
float G[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
void computeFluxG(float Q[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
float G[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
const float g_) {
//Compute fluxes along the y axis
for (int j=threadIdx.y; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
// Compute fluxes along the y axis
for (unsigned int j = threadIdx.y; j < BLOCK_HEIGHT + 1; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x; i < BLOCK_WIDTH + 2; i += BLOCK_WIDTH) {
// Q at interface from the right and left
// Note that we swap hu and hv
const float3 Q_r = make_float3(Q[0][j+1][i],
Q[2][j+1][i],
Q[1][j+1][i]);
const float3 Q_r = make_float3(Q[0][j + 1][i],
Q[2][j + 1][i],
Q[1][j + 1][i]);
const float3 Q_l = make_float3(Q[0][j][i],
Q[2][j][i],
Q[1][j][i]);
// Computed flux
//Note that we here swap hu and hv back to the original
const float3 flux = HLL_flux(Q_l, Q_r, g_);
G[0][j][i] = flux.x;
G[1][j][i] = flux.z;
G[2][j][i] = flux.y;
// Note that we here swap hu and hv back to the original
const auto [x, y, z] = HLL_flux(Q_l, Q_r, g_);
G[0][j][i] = x;
G[1][j][i] = z;
G[2][j][i] = y;
}
}
}
extern "C" {
__global__ void HLLKernel(
int nx_, int ny_,
float dx_, float dy_, float dt_,
float g_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
float* hv0_ptr_, int hv0_pitch_,
//Output h^{n+1}
float* h1_ptr_, int h1_pitch_,
float* hu1_ptr_, int hu1_pitch_,
float* hv1_ptr_, int hv1_pitch_,
//Output CFL
float* cfl_,
const int nx_, const int ny_,
const float dx_, const float dy_, const float dt_,
const float g_,
//Subarea of internal domain to compute
int x0=0, int y0=0,
int x1=0, int y1=0) {
const int boundary_conditions_,
if(x1 == 0)
// Input h^n
float *h0_ptr_, const int h0_pitch_,
float *hu0_ptr_, const int hu0_pitch_,
float *hv0_ptr_, const int hv0_pitch_,
// Output h^{n+1}
float *h1_ptr_, const int h1_pitch_,
float *hu1_ptr_, const int hu1_pitch_,
float *hv1_ptr_, const int hv1_pitch_,
// Output CFL
float *cfl_,
// Subarea of internal domain to compute
const int x0 = 0, const int y0 = 0,
int x1 = 0, int y1 = 0) {
if (x1 == 0)
x1 = nx_;
if(y1 == 0)
if (y1 == 0)
y1 = ny_;
const unsigned int w = BLOCK_WIDTH;
const unsigned int h = BLOCK_HEIGHT;
const unsigned int gc_x = 1;
const unsigned int gc_y = 1;
const unsigned int vars = 3;
//Shared memory variables
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
//Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
//Compute F flux
constexpr unsigned int w = BLOCK_WIDTH;
constexpr unsigned int h = BLOCK_HEIGHT;
constexpr unsigned int gc_x = 1;
constexpr unsigned int gc_y = 1;
constexpr unsigned int vars = 3;
// Shared memory variables
__shared__ float Q[vars][h + 2 * gc_y][w + 2 * gc_x];
__shared__ float F[vars][h + 2 * gc_y][w + 2 * gc_x];
// Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>(h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
// Compute F flux
computeFluxF(Q, F, g_);
__syncthreads();
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
__syncthreads();
//Compute G flux
// Compute G flux
computeFluxG(Q, F, g_);
__syncthreads();
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
// Write to main memory for all internal cells
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
//Compute the CFL for this block
if (cfl_ != NULL) {
// Compute the CFL for this block
if (cfl_ != nullptr) {
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
}
}
} // extern "C"
} // extern "C"

View File

@ -23,166 +23,152 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#include "limiters.h"
/**
* Computes the flux along the x axis for all faces
* Computes the flux along the x-axis for all faces
*/
__device__
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float Qx[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
void computeFluxF(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float Qx[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float F[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
const float g_, const float dx_, const float dt_) {
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
const float3 Q_rl = make_float3(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1]);
const float3 Q_rr = make_float3(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1]);
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qx[0][j][i],
Q[1][j][i] - 0.5f*Qx[1][j][i],
Q[2][j][i] - 0.5f*Qx[2][j][i]);
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qx[0][j][i],
Q[1][j][i] + 0.5f*Qx[1][j][i],
Q[2][j][i] + 0.5f*Qx[2][j][i]);
for (unsigned int j = threadIdx.y; j < BLOCK_HEIGHT + 4; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x + 1; i < BLOCK_WIDTH + 2; i += BLOCK_WIDTH) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
const float3 Q_rl = make_float3(Q[0][j][i + 1] - 0.5f * Qx[0][j][i + 1],
Q[1][j][i + 1] - 0.5f * Qx[1][j][i + 1],
Q[2][j][i + 1] - 0.5f * Qx[2][j][i + 1]);
const float3 Q_rr = make_float3(Q[0][j][i + 1] + 0.5f * Qx[0][j][i + 1],
Q[1][j][i + 1] + 0.5f * Qx[1][j][i + 1],
Q[2][j][i + 1] + 0.5f * Qx[2][j][i + 1]);
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f * Qx[0][j][i],
Q[1][j][i] - 0.5f * Qx[1][j][i],
Q[2][j][i] - 0.5f * Qx[2][j][i]);
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f * Qx[0][j][i],
Q[1][j][i] + 0.5f * Qx[1][j][i],
Q[2][j][i] + 0.5f * Qx[2][j][i]);
//Evolve half a timestep (predictor step)
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
const float3 Q_r_bar = Q_rl + dt_ / (2.0f * dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
const float3 Q_l_bar = Q_lr + dt_ / (2.0f * dx_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
// Compute flux based on prediction
const float3 flux = HLL_flux(Q_l_bar, Q_r_bar, g_);
const auto [x, y, z] = HLL_flux(Q_l_bar, Q_r_bar, g_);
//Write to shared memory
F[0][j][i] = flux.x;
F[1][j][i] = flux.y;
F[2][j][i] = flux.z;
F[0][j][i] = x;
F[1][j][i] = y;
F[2][j][i] = z;
}
}
}
/**
* Computes the flux along the x axis for all faces
* Computes the flux along the x-axis for all faces
*/
__device__
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float Qy[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float G[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
void computeFluxG(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float Qy[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float G[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
const float g_, const float dy_, const float dt_) {
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
//NOte that hu and hv are swapped ("transposing" the domain)!
const float3 Q_rl = make_float3(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i]);
const float3 Q_rr = make_float3(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i]);
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qy[0][j][i],
Q[2][j][i] - 0.5f*Qy[2][j][i],
Q[1][j][i] - 0.5f*Qy[1][j][i]);
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qy[0][j][i],
Q[2][j][i] + 0.5f*Qy[2][j][i],
Q[1][j][i] + 0.5f*Qy[1][j][i]);
//Evolve half a timestep (predictor step)
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
for (unsigned int j = threadIdx.y + 1; j < BLOCK_HEIGHT + 2; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x; i < BLOCK_WIDTH + 4; i += BLOCK_WIDTH) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
// Note that hu and hv are swapped ("transposing" the domain)!
const float3 Q_rl = make_float3(Q[0][j + 1][i] - 0.5f * Qy[0][j + 1][i],
Q[2][j + 1][i] - 0.5f * Qy[2][j + 1][i],
Q[1][j + 1][i] - 0.5f * Qy[1][j + 1][i]);
const float3 Q_rr = make_float3(Q[0][j + 1][i] + 0.5f * Qy[0][j + 1][i],
Q[2][j + 1][i] + 0.5f * Qy[2][j + 1][i],
Q[1][j + 1][i] + 0.5f * Qy[1][j + 1][i]);
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f * Qy[0][j][i],
Q[2][j][i] - 0.5f * Qy[2][j][i],
Q[1][j][i] - 0.5f * Qy[1][j][i]);
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f * Qy[0][j][i],
Q[2][j][i] + 0.5f * Qy[2][j][i],
Q[1][j][i] + 0.5f * Qy[1][j][i]);
// Evolve half a timestep (predictor step)
const float3 Q_r_bar = Q_rl + dt_ / (2.0f * dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
const float3 Q_l_bar = Q_lr + dt_ / (2.0f * dy_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
// Compute flux based on prediction
const float3 flux = HLL_flux(Q_l_bar, Q_r_bar, g_);
//Write to shared memory
//Note that we here swap hu and hv back to the original
G[0][j][i] = flux.x;
G[1][j][i] = flux.z;
G[2][j][i] = flux.y;
const auto [x, y, z] = HLL_flux(Q_l_bar, Q_r_bar, g_);
// Write to shared memory
// Note that we here swap hu and hv back to the original
G[0][j][i] = x;
G[1][j][i] = z;
G[2][j][i] = y;
}
}
}
extern "C" {
__global__ void HLL2Kernel(
int nx_, int ny_,
float dx_, float dy_, float dt_,
float g_,
float theta_,
int step_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
float* hv0_ptr_, int hv0_pitch_,
//Output h^{n+1}
float* h1_ptr_, int h1_pitch_,
float* hu1_ptr_, int hu1_pitch_,
float* hv1_ptr_, int hv1_pitch_,
//Output CFL
float* cfl_,
const int nx_, const int ny_,
const float dx_, const float dy_, const float dt_,
const float g_,
//Subarea of internal domain to compute
int x0=0, int y0=0,
int x1=0, int y1=0) {
const float theta_,
if(x1 == 0)
const int step_,
const int boundary_conditions_,
// Input h^n
float *h0_ptr_, const int h0_pitch_,
float *hu0_ptr_, const int hu0_pitch_,
float *hv0_ptr_, const int hv0_pitch_,
// Output h^{n+1}
float *h1_ptr_, const int h1_pitch_,
float *hu1_ptr_, const int hu1_pitch_,
float *hv1_ptr_, const int hv1_pitch_,
// Output CFL
float *cfl_,
// Subarea of internal domain to compute
const int x0 = 0, const int y0 = 0,
int x1 = 0, int y1 = 0) {
if (x1 == 0)
x1 = nx_;
if(y1 == 0)
if (y1 == 0)
y1 = ny_;
const unsigned int w = BLOCK_WIDTH;
const unsigned int h = BLOCK_HEIGHT;
const unsigned int gc_x = 2;
const unsigned int gc_y = 2;
const unsigned int vars = 3;
//Shared memory variables
__shared__ float Q[3][h+4][w+4];
__shared__ float Qx[3][h+4][w+4];
__shared__ float F[3][h+4][w+4];
//Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
//Step 0 => evolve x first, then y
constexpr unsigned int w = BLOCK_WIDTH;
constexpr unsigned int h = BLOCK_HEIGHT;
constexpr unsigned int gc_x = 2;
constexpr unsigned int gc_y = 2;
constexpr unsigned int vars = 3;
// Shared memory variables
__shared__ float Q[3][h + 4][w + 4];
__shared__ float Qx[3][h + 4][w + 4];
__shared__ float F[3][h + 4][w + 4];
// Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>(h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
// Step 0 => evolve x first, then y
if (step_ == 0) {
//Compute fluxes along the x axis and evolve
// Compute fluxes along the x-axis and evolve
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxF(Q, Qx, F, g_, dx_, dt_);
__syncthreads();
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
__syncthreads();
//Compute fluxes along the y axis and evolve
// Compute fluxes along the y-axis and evolve
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxG(Q, Qx, F, g_, dy_, dt_);
@ -190,17 +176,17 @@ __global__ void HLL2Kernel(
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
}
//Step 1 => evolve y first, then x
// Step 1 => evolve y first, then x
else {
//Compute fluxes along the y axis and evolve
// Compute fluxes along the y-axis and evolve
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxG(Q, Qx, F, g_, dy_, dt_);
__syncthreads();
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
//Compute fluxes along the x axis and evolve
// Compute fluxes along the x-axis and evolve
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxF(Q, Qx, F, g_, dx_, dt_);
@ -208,19 +194,16 @@ __global__ void HLL2Kernel(
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
__syncthreads();
}
// Write to main memory for all internal cells
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
//Compute the CFL for this block
if (cfl_ != NULL) {
// Compute the CFL for this block
if (cfl_ != nullptr) {
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
}
}
} // extern "C"
} // extern "C"

View File

@ -23,88 +23,85 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "common.h"
#include "SWECommon.h"
#include "limiters.h"
__device__
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
void computeFluxF(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float Qx[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
float F[3][BLOCK_HEIGHT + 1][BLOCK_WIDTH + 1],
const float g_) {
//Index of thread within block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
// Index of thread within block
{
int j=ty;
const int l = j + 2; //Skip ghost cells
for (int i=tx; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
const int k = i + 1;
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
const unsigned int j = ty;
const unsigned int l = j + 2; // Skip ghost cells
for (unsigned int i = tx; i < BLOCK_WIDTH + 1; i += BLOCK_WIDTH) {
const unsigned int k = i + 1;
// Q at interface from the right and left
const float3 Qp = make_float3(Q[0][l][k+1] - 0.5f*Qx[0][j][i+1],
Q[1][l][k+1] - 0.5f*Qx[1][j][i+1],
Q[2][l][k+1] - 0.5f*Qx[2][j][i+1]);
const float3 Qm = make_float3(Q[0][l][k ] + 0.5f*Qx[0][j][i ],
Q[1][l][k ] + 0.5f*Qx[1][j][i ],
Q[2][l][k ] + 0.5f*Qx[2][j][i ]);
const float3 Qp = make_float3(Q[0][l][k + 1] - 0.5f * Qx[0][j][i + 1],
Q[1][l][k + 1] - 0.5f * Qx[1][j][i + 1],
Q[2][l][k + 1] - 0.5f * Qx[2][j][i + 1]);
const float3 Qm = make_float3(Q[0][l][k] + 0.5f * Qx[0][j][i],
Q[1][l][k] + 0.5f * Qx[1][j][i],
Q[2][l][k] + 0.5f * Qx[2][j][i]);
// Computed flux
const float3 flux = CentralUpwindFlux(Qm, Qp, g_);
F[0][j][i] = flux.x;
F[1][j][i] = flux.y;
F[2][j][i] = flux.z;
const auto [x, y, z] = CentralUpwindFlux(Qm, Qp, g_);
F[0][j][i] = x;
F[1][j][i] = y;
F[2][j][i] = z;
}
}
}
}
__device__
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
void computeFluxG(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float Qy[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
float G[3][BLOCK_HEIGHT + 1][BLOCK_WIDTH + 1],
const float g_) {
//Index of thread within block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
for (int j=ty; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
const int l = j + 1;
// Index of thread within block
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
for (unsigned int j = ty; j < BLOCK_HEIGHT + 1; j += BLOCK_HEIGHT) {
{
int i=tx;
const int k = i + 2; //Skip ghost cells
const unsigned int l = j + 1;
const unsigned int i = tx;
const unsigned int k = i + 2; // Skip ghost cells
// Q at interface from the right and left
// Note that we swap hu and hv
const float3 Qp = make_float3(Q[0][l+1][k] - 0.5f*Qy[0][j+1][i],
Q[2][l+1][k] - 0.5f*Qy[2][j+1][i],
Q[1][l+1][k] - 0.5f*Qy[1][j+1][i]);
const float3 Qm = make_float3(Q[0][l ][k] + 0.5f*Qy[0][j ][i],
Q[2][l ][k] + 0.5f*Qy[2][j ][i],
Q[1][l ][k] + 0.5f*Qy[1][j ][i]);
const float3 Qp = make_float3(Q[0][l + 1][k] - 0.5f * Qy[0][j + 1][i],
Q[2][l + 1][k] - 0.5f * Qy[2][j + 1][i],
Q[1][l + 1][k] - 0.5f * Qy[1][j + 1][i]);
const float3 Qm = make_float3(Q[0][l][k] + 0.5f * Qy[0][j][i],
Q[2][l][k] + 0.5f * Qy[2][j][i],
Q[1][l][k] + 0.5f * Qy[1][j][i]);
// Computed flux
// Note that we swap back
const float3 flux = CentralUpwindFlux(Qm, Qp, g_);
G[0][j][i] = flux.x;
G[1][j][i] = flux.z;
G[2][j][i] = flux.y;
const auto [x, y, z] = CentralUpwindFlux(Qm, Qp, g_);
G[0][j][i] = x;
G[1][j][i] = z;
G[2][j][i] = y;
}
}
}
__device__ void minmodSlopeX(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
const float theta_) {
//Reconstruct slopes along x axis
for (int p=0; p<3; ++p) {
__device__ void minmodSlopeX(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float Qx[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
const float theta_) {
// Reconstruct slopes along x-axis
for (int p = 0; p < 3; ++p) {
{
const int j = threadIdx.y+2;
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+3; i+=BLOCK_WIDTH) {
Qx[p][j-2][i-1] = minmodSlope(Q[p][j][i-1], Q[p][j][i], Q[p][j][i+1], theta_);
const unsigned int j = threadIdx.y + 2;
for (unsigned int i = threadIdx.x + 1; i < BLOCK_WIDTH + 3; i += BLOCK_WIDTH) {
Qx[p][j - 2][i - 1] = minmodSlope(Q[p][j][i - 1], Q[p][j][i], Q[p][j][i + 1], theta_);
}
}
}
@ -114,15 +111,15 @@ __device__ void minmodSlopeX(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
/**
* Reconstructs a minmod slope for a whole block along the ordinate
*/
__device__ void minmodSlopeY(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
const float theta_) {
//Reconstruct slopes along y axis
for (int p=0; p<3; ++p) {
const int i = threadIdx.x + 2;
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+3; j+=BLOCK_HEIGHT) {
__device__ void minmodSlopeY(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float Qy[3][BLOCK_HEIGHT + 2][BLOCK_WIDTH + 2],
const float theta_) {
//Reconstruct slopes along y-axis
for (int p = 0; p < 3; ++p) {
const unsigned int i = threadIdx.x + 2;
for (unsigned int j = threadIdx.y + 1; j < BLOCK_HEIGHT + 3; j += BLOCK_HEIGHT) {
{
Qy[p][j-1][i-2] = minmodSlope(Q[p][j-1][i], Q[p][j][i], Q[p][j+1][i], theta_);
Qy[p][j - 1][i - 2] = minmodSlope(Q[p][j - 1][i], Q[p][j][i], Q[p][j + 1][i], theta_);
}
}
}
@ -134,111 +131,108 @@ __device__ void minmodSlopeY(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
*/
extern "C" {
__global__ void KP07Kernel(
int nx_, int ny_,
float dx_, float dy_, float dt_,
float g_,
float theta_,
int step_order_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
float* hv0_ptr_, int hv0_pitch_,
//Output h^{n+1}
float* h1_ptr_, int h1_pitch_,
float* hu1_ptr_, int hu1_pitch_,
float* hv1_ptr_, int hv1_pitch_,
//Output CFL
float* cfl_,
const int nx_, const int ny_,
const float dx_, const float dy_, const float dt_,
const float g_,
//Subarea of internal domain to compute
int x0=0, int y0=0,
int x1=0, int y1=0) {
const float theta_,
if(x1 == 0)
const int step_order_,
const int boundary_conditions_,
// Input h^n
float *h0_ptr_, const int h0_pitch_,
float *hu0_ptr_, const int hu0_pitch_,
float *hv0_ptr_, const int hv0_pitch_,
// Output h^{n+1}
float *h1_ptr_, const int h1_pitch_,
float *hu1_ptr_, const int hu1_pitch_,
float *hv1_ptr_, const int hv1_pitch_,
// Output CFL
float *cfl_,
// Subarea of internal domain to compute
const int x0 = 0, const int y0 = 0,
int x1 = 0, int y1 = 0) {
if (x1 == 0)
x1 = nx_;
if(y1 == 0)
if (y1 == 0)
y1 = ny_;
const unsigned int w = BLOCK_WIDTH;
const unsigned int h = BLOCK_HEIGHT;
const unsigned int gc_x = 2;
const unsigned int gc_y = 2;
const unsigned int vars = 3;
//Index of thread within block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
constexpr unsigned int w = BLOCK_WIDTH;
constexpr unsigned int h = BLOCK_HEIGHT;
constexpr unsigned int gc_x = 2;
constexpr unsigned int gc_y = 2;
constexpr unsigned int vars = 3;
//Index of cell within domain
const int ti = blockDim.x*blockIdx.x + threadIdx.x + 2; //Skip global ghost cells, i.e., +2
const int tj = blockDim.y*blockIdx.y + threadIdx.y + 2;
//Shared memory variables
__shared__ float Q[3][h+4][w+4];
__shared__ float Qx[3][h+2][w+2];
__shared__ float Qy[3][h+2][w+2];
__shared__ float F[3][h+1][w+1];
__shared__ float G[3][h+1][w+1];
//Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
//Reconstruct slopes along x and axis
// Index of thread within block
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
// Index of cell within domain
const unsigned int ti = blockDim.x * blockIdx.x + threadIdx.x + 2; //Skip global ghost cells, i.e., +2
const unsigned int tj = blockDim.y * blockIdx.y + threadIdx.y + 2;
// Shared memory variables
__shared__ float Q[3][h + 4][w + 4];
__shared__ float Qx[3][h + 2][w + 2];
__shared__ float Qy[3][h + 2][w + 2];
__shared__ float F[3][h + 1][w + 1];
__shared__ float G[3][h + 1][w + 1];
// Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>(h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
// Reconstruct slopes along x and axis
minmodSlopeX(Q, Qx, theta_);
minmodSlopeY(Q, Qy, theta_);
__syncthreads();
//Compute fluxes along the x and y axis
// Compute fluxes along the x and y axis
computeFluxF(Q, Qx, F, g_);
computeFluxG(Q, Qy, G, g_);
__syncthreads();
//Sum fluxes and advance in time for all internal cells
if (ti > 1 && ti < nx_+2 && tj > 1 && tj < ny_+2) {
const int i = tx + 2; //Skip local ghost cells, i.e., +2
const int j = ty + 2;
Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
float* const h_row = (float*) ((char*) h1_ptr_ + h1_pitch_*tj);
float* const hu_row = (float*) ((char*) hu1_ptr_ + hu1_pitch_*tj);
float* const hv_row = (float*) ((char*) hv1_ptr_ + hv1_pitch_*tj);
// Sum fluxes and advance in time for all internal cells
if (ti > 1 && ti < nx_ + 2 && tj > 1 && tj < ny_ + 2) {
const unsigned int i = tx + 2; // Skip local ghost cells, i.e., +2
const unsigned int j = ty + 2;
Q[0][j][i] += (F[0][ty][tx] - F[0][ty][tx + 1]) * dt_ / dx_
+ (G[0][ty][tx] - G[0][ty + 1][tx]) * dt_ / dy_;
Q[1][j][i] += (F[1][ty][tx] - F[1][ty][tx + 1]) * dt_ / dx_
+ (G[1][ty][tx] - G[1][ty + 1][tx]) * dt_ / dy_;
Q[2][j][i] += (F[2][ty][tx] - F[2][ty][tx + 1]) * dt_ / dx_
+ (G[2][ty][tx] - G[2][ty + 1][tx]) * dt_ / dy_;
const auto h_row = reinterpret_cast<float *>(reinterpret_cast<char *>(h1_ptr_) + h1_pitch_ * tj);
const auto hu_row = reinterpret_cast<float *>(reinterpret_cast<char *>(hu1_ptr_) + hu1_pitch_ * tj);
auto *const hv_row = reinterpret_cast<float *>(reinterpret_cast<char *>(hv1_ptr_) + hv1_pitch_ * tj);
if (getOrder(step_order_) == 2 && getStep(step_order_) == 1) {
//Write to main memory
h_row[ti] = 0.5f*(h_row[ti] + Q[0][j][i]);
hu_row[ti] = 0.5f*(hu_row[ti] + Q[1][j][i]);
hv_row[ti] = 0.5f*(hv_row[ti] + Q[2][j][i]);
}
else {
h_row[ti] = Q[0][j][i];
// Write to main memory
h_row[ti] = 0.5f * (h_row[ti] + Q[0][j][i]);
hu_row[ti] = 0.5f * (hu_row[ti] + Q[1][j][i]);
hv_row[ti] = 0.5f * (hv_row[ti] + Q[2][j][i]);
} else {
h_row[ti] = Q[0][j][i];
hu_row[ti] = Q[1][j][i];
hv_row[ti] = Q[2][j][i];
}
}
//Compute the CFL for this block
if (cfl_ != NULL) {
// Compute the CFL for this block
if (cfl_ != nullptr) {
writeCfl<w, h, gc_x, gc_y, vars>(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
}
}
} //extern "C"
} // extern "C"

View File

@ -23,179 +23,170 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "common.h"
#include "SWECommon.h"
#include "limiters.h"
template <int w, int h, int gc_x, int gc_y>
template<int w, int h, int gc_x, int gc_y>
__device__
void computeFluxF(float Q[3][h+2*gc_y][w+2*gc_x],
float Qx[3][h+2*gc_y][w+2*gc_x],
float F[3][h+2*gc_y][w+2*gc_x],
void computeFluxF(float Q[3][h + 2 * gc_y][w + 2 * gc_x],
float Qx[3][h + 2 * gc_y][w + 2 * gc_x],
float F[3][h + 2 * gc_y][w + 2 * gc_x],
const float g_, const float dx_, const float dt_) {
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
for (int i=threadIdx.x+1; i<w+2*gc_x-2; i+=w) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
const float3 Q_rl = make_float3(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1]);
const float3 Q_rr = make_float3(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1]);
for (unsigned int j = threadIdx.y; j < h + 2 * gc_y; j += h) {
for (unsigned int i = threadIdx.x + 1; i < w + 2 * gc_x - 2; i += w) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
const float3 Q_rl = make_float3(Q[0][j][i + 1] - 0.5f * Qx[0][j][i + 1],
Q[1][j][i + 1] - 0.5f * Qx[1][j][i + 1],
Q[2][j][i + 1] - 0.5f * Qx[2][j][i + 1]);
const float3 Q_rr = make_float3(Q[0][j][i + 1] + 0.5f * Qx[0][j][i + 1],
Q[1][j][i + 1] + 0.5f * Qx[1][j][i + 1],
Q[2][j][i + 1] + 0.5f * Qx[2][j][i + 1]);
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qx[0][j][i],
Q[1][j][i] - 0.5f*Qx[1][j][i],
Q[2][j][i] - 0.5f*Qx[2][j][i]);
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qx[0][j][i],
Q[1][j][i] + 0.5f*Qx[1][j][i],
Q[2][j][i] + 0.5f*Qx[2][j][i]);
//Evolve half a timestep (predictor step)
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f * Qx[0][j][i],
Q[1][j][i] - 0.5f * Qx[1][j][i],
Q[2][j][i] - 0.5f * Qx[2][j][i]);
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f * Qx[0][j][i],
Q[1][j][i] + 0.5f * Qx[1][j][i],
Q[2][j][i] + 0.5f * Qx[2][j][i]);
// Evolve half a timestep (predictor step)
const float3 Q_r_bar = Q_rl + dt_ / (2.0f * dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
const float3 Q_l_bar = Q_lr + dt_ / (2.0f * dx_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
// Compute flux based on prediction
const float3 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, g_);
//Write to shared memory
F[0][j][i] = flux.x;
F[1][j][i] = flux.y;
F[2][j][i] = flux.z;
}
}
}
const auto [x, y, z] = CentralUpwindFlux(Q_l_bar, Q_r_bar, g_);
template <int w, int h, int gc_x, int gc_y>
__device__
void computeFluxG(float Q[3][h+2*gc_y][w+2*gc_x],
float Qy[3][h+2*gc_y][w+2*gc_x],
float G[3][h+2*gc_y][w+2*gc_x],
const float g_, const float dy_, const float dt_) {
for (int j=threadIdx.y+1; j<h+2*gc_y-2; j+=h) {
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
//NOte that hu and hv are swapped ("transposing" the domain)!
const float3 Q_rl = make_float3(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i]);
const float3 Q_rr = make_float3(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i]);
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qy[0][j][i],
Q[2][j][i] - 0.5f*Qy[2][j][i],
Q[1][j][i] - 0.5f*Qy[1][j][i]);
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qy[0][j][i],
Q[2][j][i] + 0.5f*Qy[2][j][i],
Q[1][j][i] + 0.5f*Qy[1][j][i]);
//Evolve half a timestep (predictor step)
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
const float3 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
// Compute flux based on prediction
const float3 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, g_);
//Write to shared memory
//Note that we here swap hu and hv back to the original
G[0][j][i] = flux.x;
G[1][j][i] = flux.z;
G[2][j][i] = flux.y;
// Write to shared memory
F[0][j][i] = x;
F[1][j][i] = y;
F[2][j][i] = z;
}
}
}
template<int w, int h, int gc_x, int gc_y>
__device__
void computeFluxG(float Q[3][h + 2 * gc_y][w + 2 * gc_x],
float Qy[3][h + 2 * gc_y][w + 2 * gc_x],
float G[3][h + 2 * gc_y][w + 2 * gc_x],
const float g_, const float dy_, const float dt_) {
for (unsigned int j = threadIdx.y + 1; j < h + 2 * gc_y - 2; j += h) {
for (unsigned int i = threadIdx.x; i < w + 2 * gc_x; i += w) {
// Reconstruct point values of Q at the left and right hand side
// of the cell for both the left (i) and right (i+1) cell
// Note that hu and hv are swapped ("transposing" the domain)!
const float3 Q_rl = make_float3(Q[0][j + 1][i] - 0.5f * Qy[0][j + 1][i],
Q[2][j + 1][i] - 0.5f * Qy[2][j + 1][i],
Q[1][j + 1][i] - 0.5f * Qy[1][j + 1][i]);
const float3 Q_rr = make_float3(Q[0][j + 1][i] + 0.5f * Qy[0][j + 1][i],
Q[2][j + 1][i] + 0.5f * Qy[2][j + 1][i],
Q[1][j + 1][i] + 0.5f * Qy[1][j + 1][i]);
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f * Qy[0][j][i],
Q[2][j][i] - 0.5f * Qy[2][j][i],
Q[1][j][i] - 0.5f * Qy[1][j][i]);
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f * Qy[0][j][i],
Q[2][j][i] + 0.5f * Qy[2][j][i],
Q[1][j][i] + 0.5f * Qy[1][j][i]);
// Evolve half a timestep (predictor step)
const float3 Q_r_bar = Q_rl + dt_ / (2.0f * dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
const float3 Q_l_bar = Q_lr + dt_ / (2.0f * dy_) * (F_func(Q_ll, g_) - F_func(Q_lr, g_));
// Compute flux based on prediction
const auto [x, y, z] = CentralUpwindFlux(Q_l_bar, Q_r_bar, g_);
// Write to shared memory
// Note that we here swap hu and hv back to the original
G[0][j][i] = x;
G[1][j][i] = z;
G[2][j][i] = y;
}
}
}
/**
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
*/
extern "C" {
__global__ void KP07DimsplitKernel(
int nx_, int ny_,
float dx_, float dy_, float dt_,
float g_,
float theta_,
int step_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
float* hv0_ptr_, int hv0_pitch_,
//Output h^{n+1}
float* h1_ptr_, int h1_pitch_,
float* hu1_ptr_, int hu1_pitch_,
float* hv1_ptr_, int hv1_pitch_,
//Output CFL
float* cfl_,
const int nx_, const int ny_,
const float dx_, const float dy_, const float dt_,
const float g_,
//Subarea of internal domain to compute
int x0=0, int y0=0,
int x1=0, int y1=0) {
const float theta_,
if(x1 == 0)
const int step_,
const int boundary_conditions_,
// Input h^n
float *h0_ptr_, const int h0_pitch_,
float *hu0_ptr_, const int hu0_pitch_,
float *hv0_ptr_, const int hv0_pitch_,
// Output h^{n+1}
float *h1_ptr_, const int h1_pitch_,
float *hu1_ptr_, const int hu1_pitch_,
float *hv1_ptr_, const int hv1_pitch_,
// Output CFL
float *cfl_,
// Subarea of internal domain to compute
const int x0 = 0, const int y0 = 0,
int x1 = 0, int y1 = 0) {
if (x1 == 0)
x1 = nx_;
if(y1 == 0)
if (y1 == 0)
y1 = ny_;
const unsigned int w = BLOCK_WIDTH;
const unsigned int h = BLOCK_HEIGHT;
const unsigned int gc_x = 2;
const unsigned int gc_y = 2;
const unsigned int vars = 3;
//Shared memory variables
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
__shared__ float Qx[vars][h+2*gc_y][w+2*gc_x];
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
//Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
constexpr unsigned int w = BLOCK_WIDTH;
constexpr unsigned int h = BLOCK_HEIGHT;
constexpr unsigned int gc_x = 2;
constexpr unsigned int gc_y = 2;
constexpr unsigned int vars = 3;
// Shared memory variables
__shared__ float Q[vars][h + 2 * gc_y][w + 2 * gc_x];
__shared__ float Qx[vars][h + 2 * gc_y][w + 2 * gc_x];
__shared__ float F[vars][h + 2 * gc_y][w + 2 * gc_x];
// Read into shared memory
readBlock<w, h, gc_x, gc_y, 1, 1>(h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
if (step_ == 0) {
//Along X
// Along X
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxF<w, h, gc_x, gc_y>(Q, Qx, F, g_, dx_, dt_);
__syncthreads();
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
__syncthreads();
//Along Y
// Along Y
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxG<w, h, gc_x, gc_y>(Q, Qx, F, g_, dy_, dt_);
__syncthreads();
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
}
else {
//Along Y
} else {
// Along Y
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxG<w, h, gc_x, gc_y>(Q, Qx, F, g_, dy_, dt_);
__syncthreads();
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
//Along X
// Along X
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
__syncthreads();
computeFluxF<w, h, gc_x, gc_y>(Q, Qx, F, g_, dx_, dt_);
@ -203,25 +194,15 @@ __global__ void KP07DimsplitKernel(
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
__syncthreads();
}
// Write to main memory for all internal cells
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
//Compute the CFL for this block
if (cfl_ != NULL) {
// Compute the CFL for this block
if (cfl_ != nullptr) {
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
}
}
} // extern "C"
} // extern "C"

View File

@ -24,155 +24,153 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
/**
* Computes the flux along the x axis for all faces
* Computes the flux along the x-axis for all faces
*/
template <int block_width, int block_height>
__device__
void computeFluxF(float Q[3][block_height+2][block_width+2],
float F[3][block_height][block_width+1],
template<int block_width, int block_height>
__device__
void computeFluxF(float Q[3][block_height + 2][block_width + 2],
float F[3][block_height][block_width + 1],
const float g_, const float dx_, const float dt_) {
//Index of thread within block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
{
const int j=ty;
const int l = j + 1; //Skip ghost cells
for (int i=tx; i<block_width+1; i+=block_width) {
const int k = i;
// Index of thread within block
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
const unsigned int j = ty;
const unsigned int l = j + 1; // Skip ghost cells
for (unsigned int i = tx; i < block_width + 1; i += block_width) {
const unsigned int k = i;
// Q at interface from the right and left
const float3 Qp = make_float3(Q[0][l][k+1],
Q[1][l][k+1],
Q[2][l][k+1]);
const float3 Qp = make_float3(Q[0][l][k + 1],
Q[1][l][k + 1],
Q[2][l][k + 1]);
const float3 Qm = make_float3(Q[0][l][k],
Q[1][l][k],
Q[2][l][k]);
// Computed flux
const float3 flux = LxF_2D_flux(Qm, Qp, g_, dx_, dt_);
F[0][j][i] = flux.x;
F[1][j][i] = flux.y;
F[2][j][i] = flux.z;
const auto [x, y, z] = LxF_2D_flux(Qm, Qp, g_, dx_, dt_);
F[0][j][i] = x;
F[1][j][i] = y;
F[2][j][i] = z;
}
}
}
/**
* Computes the flux along the y axis for all faces
*/
template <int block_width, int block_height>
* Computes the flux along the y-axis for all faces
*/
template<int block_width, int block_height>
__device__
void computeFluxG(float Q[3][block_height+2][block_width+2],
float G[3][block_height+1][block_width],
void computeFluxG(float Q[3][block_height + 2][block_width + 2],
float G[3][block_height + 1][block_width],
const float g_, const float dy_, const float dt_) {
//Index of thread within block
const int tx = threadIdx.x;
const int ty = threadIdx.y;
for (int j=ty; j<block_height+1; j+=block_height) {
const int l = j;
// Index of thread within block
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
for (unsigned int j = ty; j < block_height + 1; j += block_height) {
const unsigned int l = j;
{
const int i=tx;
const int k = i + 1; //Skip ghost cells
const unsigned int i = tx;
const unsigned int k = i + 1; // Skip ghost cells
// Q at interface from the right and left
// Note that we swap hu and hv
const float3 Qp = make_float3(Q[0][l+1][k],
Q[2][l+1][k],
Q[1][l+1][k]);
const float3 Qp = make_float3(Q[0][l + 1][k],
Q[2][l + 1][k],
Q[1][l + 1][k]);
const float3 Qm = make_float3(Q[0][l][k],
Q[2][l][k],
Q[1][l][k]);
// Computed flux
// Note that we swap back
const float3 flux = LxF_2D_flux(Qm, Qp, g_, dy_, dt_);
G[0][j][i] = flux.x;
G[1][j][i] = flux.z;
G[2][j][i] = flux.y;
const auto [x, y, z] = LxF_2D_flux(Qm, Qp, g_, dy_, dt_);
G[0][j][i] = x;
G[1][j][i] = z;
G[2][j][i] = y;
}
}
}
}
extern "C" {
__global__
__global__
void LxFKernel(
int nx_, int ny_,
float dx_, float dy_, float dt_,
float g_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
float* hv0_ptr_, int hv0_pitch_,
//Output h^{n+1}
float* h1_ptr_, int h1_pitch_,
float* hu1_ptr_, int hu1_pitch_,
float* hv1_ptr_, int hv1_pitch_,
//Output CFL
float* cfl_,
const int nx_, const int ny_,
const float dx_, const float dy_, const float dt_,
const float g_,
//Subarea of internal domain to compute
int x0=0, int y0=0,
int x1=0, int y1=0) {
const int boundary_conditions_,
if(x1 == 0)
// Input h^n
float *h0_ptr_, const int h0_pitch_,
float *hu0_ptr_, const int hu0_pitch_,
float *hv0_ptr_, const int hv0_pitch_,
// Output h^{n+1}
float *h1_ptr_, const int h1_pitch_,
float *hu1_ptr_, const int hu1_pitch_,
float *hv1_ptr_, const int hv1_pitch_,
// Output CFL
float *cfl_,
// Subarea of internal domain to compute
const int x0 = 0, const int y0 = 0,
int x1 = 0, int y1 = 0) {
if (x1 == 0)
x1 = nx_;
if(y1 == 0)
if (y1 == 0)
y1 = ny_;
const unsigned int w = BLOCK_WIDTH;
const unsigned int h = BLOCK_HEIGHT;
const unsigned int gc_x = 1;
const unsigned int gc_y = 1;
const unsigned int vars = 3;
__shared__ float Q[vars][h+2][w+2];
__shared__ float F[vars][h ][w+1];
__shared__ float G[vars][h+1][w ];
//Read from global memory
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
//Compute fluxes along the x and y axis
constexpr unsigned int w = BLOCK_WIDTH;
constexpr unsigned int h = BLOCK_HEIGHT;
constexpr unsigned int gc_x = 1;
constexpr unsigned int gc_y = 1;
constexpr unsigned int vars = 3;
__shared__ float Q[vars][h + 2][w + 2];
__shared__ float F[vars][h][w + 1];
__shared__ float G[vars][h + 1][w];
// Read from global memory
readBlock<w, h, gc_x, gc_y, 1, 1>(h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
// Compute fluxes along the x and y axis
computeFluxF<w, h>(Q, F, g_, dx_, dt_);
computeFluxG<w, h>(Q, G, g_, dy_, dt_);
__syncthreads();
//Evolve for all cells
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int i = tx + 1; //Skip local ghost cells, i.e., +1
const int j = ty + 1;
Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
// Evolve for all cells
const unsigned int tx = threadIdx.x;
const unsigned int ty = threadIdx.y;
const unsigned int i = tx + 1; // Skip local ghost cells, i.e., +1
const unsigned int j = ty + 1;
Q[0][j][i] += (F[0][ty][tx] - F[0][ty][tx + 1]) * dt_ / dx_
+ (G[0][ty][tx] - G[0][ty + 1][tx]) * dt_ / dy_;
Q[1][j][i] += (F[1][ty][tx] - F[1][ty][tx + 1]) * dt_ / dx_
+ (G[1][ty][tx] - G[1][ty + 1][tx]) * dt_ / dy_;
Q[2][j][i] += (F[2][ty][tx] - F[2][ty][tx + 1]) * dt_ / dx_
+ (G[2][ty][tx] - G[2][ty + 1][tx]) * dt_ / dy_;
__syncthreads();
//Write to main memory
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
// Write to main memory
writeBlock<w, h, gc_x, gc_y>(h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
//Compute the CFL for this block
if (cfl_ != NULL) {
// Compute the CFL for this block
if (cfl_ != nullptr) {
writeCfl<w, h, gc_x, gc_y, vars>(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
}
}
} // extern "C"

View File

@ -23,154 +23,131 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "common.h"
#include "SWECommon.h"
/**
* Computes the flux along the x axis for all faces
* Computes the flux along the x-axis for all faces
*/
__device__
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
void computeFluxF(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float F[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
const float g_, const float dx_, const float dt_) {
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
for (unsigned int j = threadIdx.y; j < BLOCK_HEIGHT + 4; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x + 1; i < BLOCK_WIDTH + 2; i += BLOCK_WIDTH) {
// Q at interface from the right and left
const float3 Ql2 = make_float3(Q[0][j][i-1], Q[1][j][i-1], Q[2][j][i-1]);
const float3 Ql1 = make_float3(Q[0][j][i ], Q[1][j][i ], Q[2][j][i ]);
const float3 Qr1 = make_float3(Q[0][j][i+1], Q[1][j][i+1], Q[2][j][i+1]);
const float3 Qr2 = make_float3(Q[0][j][i+2], Q[1][j][i+2], Q[2][j][i+2]);
const float3 Ql2 = make_float3(Q[0][j][i - 1], Q[1][j][i - 1], Q[2][j][i - 1]);
const float3 Ql1 = make_float3(Q[0][j][i], Q[1][j][i], Q[2][j][i]);
const float3 Qr1 = make_float3(Q[0][j][i + 1], Q[1][j][i + 1], Q[2][j][i + 1]);
const float3 Qr2 = make_float3(Q[0][j][i + 2], Q[1][j][i + 2], Q[2][j][i + 2]);
// Computed flux
const float3 flux = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dx_, dt_);
F[0][j][i] = flux.x;
F[1][j][i] = flux.y;
F[2][j][i] = flux.z;
const auto [x, y, z] = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dx_, dt_);
F[0][j][i] = x;
F[1][j][i] = y;
F[2][j][i] = z;
}
}
}
/**
* Computes the flux along the y axis for all faces
* Computes the flux along the y-axis for all faces
*/
__device__
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
float G[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
void computeFluxG(float Q[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
float G[3][BLOCK_HEIGHT + 4][BLOCK_WIDTH + 4],
const float g_, const float dy_, const float dt_) {
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
for (unsigned int j = threadIdx.y + 1; j < BLOCK_HEIGHT + 2; j += BLOCK_HEIGHT) {
for (unsigned int i = threadIdx.x; i < BLOCK_WIDTH + 4; i += BLOCK_WIDTH) {
// Q at interface from the right and left
// Note that we swap hu and hv
const float3 Ql2 = make_float3(Q[0][j-1][i], Q[2][j-1][i], Q[1][j-1][i]);
const float3 Ql1 = make_float3(Q[0][j ][i], Q[2][j ][i], Q[1][j ][i]);
const float3 Qr1 = make_float3(Q[0][j+1][i], Q[2][j+1][i], Q[1][j+1][i]);
const float3 Qr2 = make_float3(Q[0][j+2][i], Q[2][j+2][i], Q[1][j+2][i]);
const float3 Ql2 = make_float3(Q[0][j - 1][i], Q[2][j - 1][i], Q[1][j - 1][i]);
const float3 Ql1 = make_float3(Q[0][j][i], Q[2][j][i], Q[1][j][i]);
const float3 Qr1 = make_float3(Q[0][j + 1][i], Q[2][j + 1][i], Q[1][j + 1][i]);
const float3 Qr2 = make_float3(Q[0][j + 2][i], Q[2][j + 2][i], Q[1][j + 2][i]);
// Computed flux
// Note that we swap back
const float3 flux = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dy_, dt_);
G[0][j][i] = flux.x;
G[1][j][i] = flux.z;
G[2][j][i] = flux.y;
const auto [x, y, z] = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dy_, dt_);
G[0][j][i] = x;
G[1][j][i] = z;
G[2][j][i] = y;
}
}
}
extern "C" {
__global__ void WAFKernel(
int nx_, int ny_,
float dx_, float dy_, float dt_,
float g_,
int step_,
int boundary_conditions_,
//Input h^n
float* h0_ptr_, int h0_pitch_,
float* hu0_ptr_, int hu0_pitch_,
float* hv0_ptr_, int hv0_pitch_,
//Output h^{n+1}
float* h1_ptr_, int h1_pitch_,
float* hu1_ptr_, int hu1_pitch_,
float* hv1_ptr_, int hv1_pitch_,
//Subarea of internal domain to compute
int x0=0, int y0=0,
int x1=0, int y1=0) {
if(x1 == 0)
const int nx_, const int ny_,
const float dx_, const float dy_, const float dt_,
const float g_,
const int step_,
const int boundary_conditions_,
// Input h^n
float *h0_ptr_, const int h0_pitch_,
float *hu0_ptr_, const int hu0_pitch_,
float *hv0_ptr_, const int hv0_pitch_,
// Output h^{n+1}
float *h1_ptr_, const int h1_pitch_,
float *hu1_ptr_, const int hu1_pitch_,
float *hv1_ptr_, const int hv1_pitch_,
// Subarea of internal domain to compute
const int x0 = 0, const int y0 = 0,
int x1 = 0, int y1 = 0) {
if (x1 == 0)
x1 = nx_;
if(y1 == 0)
y1 = ny_;
const unsigned int w = BLOCK_WIDTH;
const unsigned int h = BLOCK_HEIGHT;
const unsigned int gc_x = 2;
const unsigned int gc_y = 2;
const unsigned int vars = 3;
//Shared memory variables
__shared__ float Q[3][h+4][w+4];
__shared__ float F[3][h+4][w+4];
//Read into shared memory Q from global memory
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
if (y1 == 0)
y1 = ny_;
constexpr unsigned int w = BLOCK_WIDTH;
constexpr unsigned int h = BLOCK_HEIGHT;
constexpr unsigned int gc_x = 2;
constexpr unsigned int gc_y = 2;
constexpr unsigned int vars = 3;
// Shared memory variables
__shared__ float Q[3][h + 4][w + 4];
__shared__ float F[3][h + 4][w + 4];
// Read into shared memory Q from global memory
readBlock<w, h, gc_x, gc_y, 1, 1>(h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_, x0, y0, x1, y1);
__syncthreads();
//Step 0 => evolve x first, then y
// Step 0 => evolve x first, then y
if (step_ == 0) {
//Compute fluxes along the x axis and evolve
// Compute fluxes along the x-axis and evolve
computeFluxF(Q, F, g_, dx_, dt_);
__syncthreads();
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
__syncthreads();
//Compute fluxes along the y axis and evolve
// Compute fluxes along the y-axis and evolve
computeFluxG(Q, F, g_, dy_, dt_);
__syncthreads();
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
}
//Step 1 => evolve y first, then x
// Step 1 => evolve y first, then x
else {
//Compute fluxes along the y axis and evolve
// Compute fluxes along the y-axis and evolve
computeFluxG(Q, F, g_, dy_, dt_);
__syncthreads();
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
__syncthreads();
//Compute fluxes along the x axis and evolve
// Compute fluxes along the x-axis and evolve
computeFluxF(Q, F, g_, dx_, dt_);
__syncthreads();
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
@ -178,11 +155,9 @@ __global__ void WAFKernel(
}
// Write to main memory for all internal cells
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1, x0, y0, x1, y1);
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1, x0, y0, x1, y1);
}
} // extern "C"
} // extern "C"