mirror of
https://github.com/smyalygames/FiniteVolumeGPU.git
synced 2025-05-18 06:24:13 +02:00
commit
51b54b7711
263
Autotuning.ipynb
263
Autotuning.ipynb
@ -228,7 +228,7 @@
|
||||
"CUDA version (9, 1, 0)\n",
|
||||
"Driver version 9010\n",
|
||||
"Using 'GeForce 840M' GPU\n",
|
||||
"Created context handle <879048629408>\n",
|
||||
"Created context handle <694827722560>\n",
|
||||
"Using CUDA cache dir c:\\Users\\anbro\\Documents\\projects\\ShallowWaterGPU\\GPUSimulators\\cuda_cache\n",
|
||||
"Autotuning enabled. It may take several minutes to run the code the first time: have patience\n"
|
||||
]
|
||||
@ -247,13 +247,13 @@
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"gen_data: 3115.227938 ms\n"
|
||||
"gen_data: 1647.211552 ms\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"data": {
|
||||
"text/plain": [
|
||||
"<matplotlib.image.AxesImage at 0xccab2d4c18>"
|
||||
"<matplotlib.image.AxesImage at 0xa1c91aa390>"
|
||||
]
|
||||
},
|
||||
"execution_count": 8,
|
||||
@ -328,14 +328,238 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 12,
|
||||
"execution_count": 9,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
"outputs": [],
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"LxF\n",
|
||||
"[63x63] => 107.3 (0.000185)\n",
|
||||
"[127x127] => 165.6 (0.000487)\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stderr",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"C:\\Users\\anbro\\AppData\\Local\\Continuum\\anaconda3\\lib\\site-packages\\ipykernel_launcher.py:22: RuntimeWarning: invalid value encountered in sqrt\n"
|
||||
]
|
||||
},
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"[191x191] => 183.4 (0.000995)\n",
|
||||
"[255x255] => 180.0 (0.001806)\n",
|
||||
"[319x319] => 185.8 (0.002738)\n",
|
||||
"[383x383] => 187.3 (0.003915)\n",
|
||||
"[447x447] => 189.7 (0.005266)\n",
|
||||
"[511x511] => 191.8 (0.006806)\n",
|
||||
"[639x639] => 193.6 (0.010548)\n",
|
||||
"[767x767] => 193.7 (0.015182)\n",
|
||||
"[895x895] => 195.6 (0.020481)\n",
|
||||
"[1023x1023] => 195.0 (0.026839)\n",
|
||||
"[1151x1151] => 195.8 (0.033822)\n",
|
||||
"[1279x1279] => 196.1 (0.041711)\n",
|
||||
"[1407x1407] => 196.2 (0.050439)\n",
|
||||
"[1535x1535] => 196.4 (0.059986)\n",
|
||||
"[1663x1663] => 196.6 (0.070330)\n",
|
||||
"[1791x1791] => 196.7 (0.081546)\n",
|
||||
"[1919x1919] => 196.9 (0.093511)\n",
|
||||
"[2047x2047] => 202.9 (0.103257)\n",
|
||||
"[2303x2303] => 210.7 (0.125838)\n",
|
||||
"[2559x2559] => 208.0 (0.157417)\n",
|
||||
"[2815x2815] => 211.6 (0.187229)\n",
|
||||
"[3071x3071] => 208.7 (0.225954)\n",
|
||||
"[3327x3327] => 214.2 (0.258395)\n",
|
||||
"[3583x3583] => 214.2 (0.299629)\n",
|
||||
"[3839x3839] => 214.2 (0.343982)\n",
|
||||
"[4095x4095] => 214.9 (0.390088)\n",
|
||||
"FORCE\n",
|
||||
"[63x63] => 94.3 (0.000210)\n",
|
||||
"[127x127] => 136.5 (0.000591)\n",
|
||||
"[191x191] => 147.0 (0.001241)\n",
|
||||
"[255x255] => 148.5 (0.002189)\n",
|
||||
"[319x319] => 151.6 (0.003357)\n",
|
||||
"[383x383] => 153.0 (0.004793)\n",
|
||||
"[447x447] => 153.9 (0.006494)\n",
|
||||
"[511x511] => 155.0 (0.008421)\n",
|
||||
"[639x639] => 156.4 (0.013056)\n",
|
||||
"[767x767] => 156.5 (0.018790)\n",
|
||||
"[895x895] => 157.0 (0.025514)\n",
|
||||
"[1023x1023] => 143.6 (0.036450)\n",
|
||||
"[1151x1151] => 143.6 (0.046115)\n",
|
||||
"[1279x1279] => 143.8 (0.056865)\n",
|
||||
"[1407x1407] => 143.9 (0.068797)\n",
|
||||
"[1535x1535] => 144.0 (0.081832)\n",
|
||||
"[1663x1663] => 144.0 (0.096007)\n",
|
||||
"[1791x1791] => 144.0 (0.111343)\n",
|
||||
"[1919x1919] => 144.2 (0.127712)\n",
|
||||
"[2047x2047] => 151.7 (0.138153)\n",
|
||||
"[2303x2303] => 147.3 (0.180021)\n",
|
||||
"[2559x2559] => 154.3 (0.212248)\n",
|
||||
"[2815x2815] => 158.3 (0.250279)\n",
|
||||
"[3071x3071] => 156.9 (0.300547)\n",
|
||||
"[3327x3327] => 158.4 (0.349353)\n",
|
||||
"[3583x3583] => 158.4 (0.405175)\n",
|
||||
"[3839x3839] => 158.4 (0.465201)\n",
|
||||
"[4095x4095] => 158.4 (0.529337)\n",
|
||||
"HLL\n",
|
||||
"[63x63] => 65.7 (0.000302)\n",
|
||||
"[127x127] => 98.6 (0.000818)\n",
|
||||
"[191x191] => 108.1 (0.001688)\n",
|
||||
"[255x255] => 109.2 (0.002977)\n",
|
||||
"[319x319] => 111.9 (0.004546)\n",
|
||||
"[383x383] => 113.2 (0.006482)\n",
|
||||
"[447x447] => 113.7 (0.008785)\n",
|
||||
"[511x511] => 114.4 (0.011411)\n",
|
||||
"[639x639] => 115.3 (0.017713)\n",
|
||||
"[767x767] => 115.6 (0.025454)\n",
|
||||
"[895x895] => 105.7 (0.037888)\n",
|
||||
"[1023x1023] => 105.8 (0.049473)\n",
|
||||
"[1151x1151] => 105.9 (0.062558)\n",
|
||||
"[1279x1279] => 106.0 (0.077148)\n",
|
||||
"[1407x1407] => 106.1 (0.093290)\n",
|
||||
"[1535x1535] => 109.8 (0.107271)\n",
|
||||
"[1663x1663] => 106.2 (0.130195)\n",
|
||||
"[1791x1791] => 107.7 (0.148973)\n",
|
||||
"[1919x1919] => 115.0 (0.160104)\n",
|
||||
"[2047x2047] => 113.3 (0.184913)\n",
|
||||
"[2303x2303] => 111.9 (0.236908)\n",
|
||||
"[2559x2559] => 116.6 (0.280840)\n",
|
||||
"[2815x2815] => 116.6 (0.339777)\n",
|
||||
"[3071x3071] => 116.6 (0.404268)\n",
|
||||
"[3327x3327] => 116.6 (0.474572)\n",
|
||||
"[3583x3583] => 116.7 (0.550240)\n",
|
||||
"[3839x3839] => 116.7 (0.631563)\n",
|
||||
"[4095x4095] => 116.7 (0.718161)\n",
|
||||
"HLL2\n",
|
||||
"[63x63] => 44.2 (0.000449)\n",
|
||||
"[127x127] => 63.0 (0.001280)\n",
|
||||
"[191x191] => 68.4 (0.002666)\n",
|
||||
"[255x255] => 69.2 (0.004698)\n",
|
||||
"[319x319] => 70.6 (0.007204)\n",
|
||||
"[383x383] => 71.1 (0.010314)\n",
|
||||
"[447x447] => 71.6 (0.013956)\n",
|
||||
"[511x511] => 72.0 (0.018146)\n",
|
||||
"[639x639] => 72.4 (0.028204)\n",
|
||||
"[767x767] => 72.5 (0.040545)\n",
|
||||
"[895x895] => 72.8 (0.055047)\n",
|
||||
"[1023x1023] => 72.8 (0.071828)\n",
|
||||
"[1151x1151] => 66.5 (0.099652)\n",
|
||||
"[1279x1279] => 69.8 (0.117195)\n",
|
||||
"[1407x1407] => 67.0 (0.147833)\n",
|
||||
"[1535x1535] => 71.3 (0.165185)\n",
|
||||
"[1663x1663] => 71.2 (0.194123)\n",
|
||||
"[1791x1791] => 72.1 (0.222351)\n",
|
||||
"[1919x1919] => 70.3 (0.261847)\n",
|
||||
"[2047x2047] => 73.2 (0.286228)\n",
|
||||
"[2303x2303] => 72.0 (0.368479)\n",
|
||||
"[2559x2559] => 73.2 (0.447096)\n",
|
||||
"[2815x2815] => 73.2 (0.541084)\n",
|
||||
"[3071x3071] => 73.2 (0.643925)\n",
|
||||
"[3327x3327] => 73.2 (0.755588)\n",
|
||||
"[3583x3583] => 73.3 (0.876222)\n",
|
||||
"[3839x3839] => 73.3 (1.005958)\n",
|
||||
"[4095x4095] => 73.3 (1.144158)\n",
|
||||
"KP07\n",
|
||||
"[63x63] => 69.9 (0.000284)\n",
|
||||
"[127x127] => 95.0 (0.000849)\n",
|
||||
"[191x191] => 101.7 (0.001794)\n",
|
||||
"[255x255] => 101.3 (0.003209)\n",
|
||||
"[319x319] => 106.9 (0.004760)\n",
|
||||
"[383x383] => 107.1 (0.006850)\n",
|
||||
"[447x447] => 109.2 (0.009150)\n",
|
||||
"[511x511] => 108.0 (0.012088)\n",
|
||||
"[639x639] => 111.6 (0.018295)\n",
|
||||
"[767x767] => 111.6 (0.026361)\n",
|
||||
"[895x895] => 102.4 (0.039123)\n",
|
||||
"[1023x1023] => 102.2 (0.051186)\n",
|
||||
"[1151x1151] => 102.3 (0.064764)\n",
|
||||
"[1279x1279] => 103.4 (0.079074)\n",
|
||||
"[1407x1407] => 103.2 (0.095876)\n",
|
||||
"[1535x1535] => 106.3 (0.110860)\n",
|
||||
"[1663x1663] => 103.1 (0.134182)\n",
|
||||
"[1791x1791] => 107.7 (0.148853)\n",
|
||||
"[1919x1919] => 105.5 (0.174575)\n",
|
||||
"[2047x2047] => 111.4 (0.188084)\n",
|
||||
"[2303x2303] => 113.5 (0.233650)\n",
|
||||
"[2559x2559] => 114.0 (0.287327)\n",
|
||||
"[2815x2815] => 113.7 (0.348536)\n",
|
||||
"[3071x3071] => 113.2 (0.416533)\n",
|
||||
"[3327x3327] => 113.7 (0.486893)\n",
|
||||
"[3583x3583] => 113.5 (0.565573)\n",
|
||||
"[3839x3839] => 113.5 (0.649058)\n",
|
||||
"[4095x4095] => 113.6 (0.738275)\n",
|
||||
"KP07_dimsplit\n",
|
||||
"[63x63] => 49.9 (0.000397)\n",
|
||||
"[127x127] => 71.7 (0.001125)\n",
|
||||
"[191x191] => 76.8 (0.002374)\n",
|
||||
"[255x255] => 77.5 (0.004197)\n",
|
||||
"[319x319] => 79.0 (0.006437)\n",
|
||||
"[383x383] => 79.8 (0.009189)\n",
|
||||
"[447x447] => 80.3 (0.012449)\n",
|
||||
"[511x511] => 80.6 (0.016191)\n",
|
||||
"[639x639] => 81.1 (0.025171)\n",
|
||||
"[767x767] => 81.3 (0.036181)\n",
|
||||
"[895x895] => 74.3 (0.053902)\n",
|
||||
"[1023x1023] => 74.4 (0.070335)\n",
|
||||
"[1151x1151] => 76.2 (0.086896)\n",
|
||||
"[1279x1279] => 74.5 (0.109725)\n",
|
||||
"[1407x1407] => 74.6 (0.132712)\n",
|
||||
"[1535x1535] => 79.4 (0.148342)\n",
|
||||
"[1663x1663] => 78.3 (0.176547)\n",
|
||||
"[1791x1791] => 81.3 (0.197279)\n",
|
||||
"[1919x1919] => 78.5 (0.234550)\n",
|
||||
"[2047x2047] => 82.0 (0.255396)\n",
|
||||
"[2303x2303] => 81.0 (0.327297)\n",
|
||||
"[2559x2559] => 82.0 (0.399197)\n",
|
||||
"[2815x2815] => 82.0 (0.483034)\n",
|
||||
"[3071x3071] => 82.0 (0.574737)\n",
|
||||
"[3327x3327] => 82.1 (0.674395)\n",
|
||||
"[3583x3583] => 82.1 (0.782180)\n",
|
||||
"[3839x3839] => 82.1 (0.897551)\n",
|
||||
"[4095x4095] => 82.1 (1.020911)\n",
|
||||
"WAF\n",
|
||||
"[63x63] => 32.8 (0.000605)\n",
|
||||
"[127x127] => 45.6 (0.001768)\n",
|
||||
"[191x191] => 53.9 (0.003381)\n",
|
||||
"[255x255] => 54.3 (0.005985)\n",
|
||||
"[319x319] => 57.7 (0.008821)\n",
|
||||
"[383x383] => 56.9 (0.012893)\n",
|
||||
"[447x447] => 59.3 (0.016840)\n",
|
||||
"[511x511] => 58.8 (0.022214)\n",
|
||||
"[639x639] => 59.6 (0.034278)\n",
|
||||
"[767x767] => 60.1 (0.048942)\n",
|
||||
"[895x895] => 55.3 (0.072483)\n",
|
||||
"[1023x1023] => 55.4 (0.094402)\n",
|
||||
"[1151x1151] => 55.7 (0.119006)\n",
|
||||
"[1279x1279] => 55.0 (0.148746)\n",
|
||||
"[1407x1407] => 55.8 (0.177399)\n",
|
||||
"[1535x1535] => 58.7 (0.200663)\n",
|
||||
"[1663x1663] => 57.8 (0.239299)\n",
|
||||
"[1791x1791] => 59.6 (0.269144)\n",
|
||||
"[1919x1919] => 61.1 (0.301218)\n",
|
||||
"[2047x2047] => 61.2 (0.342070)\n",
|
||||
"[2303x2303] => 61.3 (0.432280)\n",
|
||||
"[2559x2559] => 61.0 (0.537125)\n",
|
||||
"[2815x2815] => 61.1 (0.648336)\n",
|
||||
"[3071x3071] => 61.3 (0.769734)\n",
|
||||
"[3327x3327] => 61.4 (0.901199)\n",
|
||||
"[3583x3583] => 61.1 (1.049726)\n",
|
||||
"[3839x3839] => 61.3 (1.202961)\n",
|
||||
"[4095x4095] => 61.4 (1.366446)\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"run_simulation = False\n",
|
||||
"run_simulation = True\n",
|
||||
"sizes = list(range(64, 512, 64)) + list(range(512, 2048, 128)) + list(range(2048, 4096, 256)) + [4096]\n",
|
||||
"simulators = [LxF.LxF, FORCE.FORCE, HLL.HLL, HLL2.HLL2, KP07.KP07, KP07_dimsplit.KP07_dimsplit, WAF.WAF]\n",
|
||||
"if (run_simulation):\n",
|
||||
" megacells = {}\n",
|
||||
" for simulator in simulators:\n",
|
||||
@ -388,7 +612,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"execution_count": 10,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -412,7 +636,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 14,
|
||||
"execution_count": 11,
|
||||
"metadata": {
|
||||
"scrolled": false
|
||||
},
|
||||
@ -423,7 +647,7 @@
|
||||
"Text(0.5,0,'nx')"
|
||||
]
|
||||
},
|
||||
"execution_count": 14,
|
||||
"execution_count": 11,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
@ -450,7 +674,7 @@
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 15,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
@ -459,7 +683,7 @@
|
||||
"Text(0.5,0,'nx')"
|
||||
]
|
||||
},
|
||||
"execution_count": 15,
|
||||
"execution_count": 12,
|
||||
"metadata": {},
|
||||
"output_type": "execute_result"
|
||||
},
|
||||
@ -487,6 +711,23 @@
|
||||
"plt.xlabel(\"nx\")"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": 13,
|
||||
"metadata": {},
|
||||
"outputs": [
|
||||
{
|
||||
"name": "stdout",
|
||||
"output_type": "stream",
|
||||
"text": [
|
||||
"False\n"
|
||||
]
|
||||
}
|
||||
],
|
||||
"source": [
|
||||
"print(type(None) == None)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"cell_type": "code",
|
||||
"execution_count": null,
|
||||
|
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
499239
EulerTesting.ipynb
Normal file
499239
EulerTesting.ipynb
Normal file
File diff suppressed because one or more lines are too long
@ -29,13 +29,13 @@ from socket import gethostname
|
||||
import pycuda.driver as cuda
|
||||
|
||||
|
||||
from GPUSimulators import Common, Simulator
|
||||
from GPUSimulators import Common, Simulator, CudaContext
|
||||
|
||||
class Autotuner:
|
||||
def __init__(self,
|
||||
nx=2048, ny=2048,
|
||||
block_widths=range(8, 32, 2),
|
||||
block_heights=range(8, 32, 2)):
|
||||
block_widths=range(8, 32, 1),
|
||||
block_heights=range(8, 32, 1)):
|
||||
logger = logging.getLogger(__name__)
|
||||
self.filename = "autotuning_data_" + gethostname() + ".npz"
|
||||
self.nx = nx
|
||||
@ -60,7 +60,7 @@ class Autotuner:
|
||||
return
|
||||
|
||||
# Set arguments to send to the simulators during construction
|
||||
context = Common.CudaContext(autotuning=False)
|
||||
context = CudaContext.CudaContext(autotuning=False)
|
||||
g = 9.81
|
||||
h0, hu0, hv0, dx, dy, dt = Autotuner.gen_test_data(nx=self.nx, ny=self.ny, g=g)
|
||||
arguments = {
|
||||
@ -239,19 +239,21 @@ class Autotuner:
|
||||
h = np.zeros((ny, nx), dtype=np.float32);
|
||||
hu = np.zeros((ny, nx), dtype=np.float32);
|
||||
hv = np.zeros((ny, nx), dtype=np.float32);
|
||||
|
||||
x = dx*(np.arange(0, nx, dtype=np.float32)+0.5) - x_center
|
||||
y = dy*(np.arange(0, ny, dtype=np.float32)+0.5) - y_center
|
||||
|
||||
extent = 1.0/np.sqrt(2.0)
|
||||
x = (dx*(np.arange(0, nx, dtype=np.float32)+0.5) - x_center) / size
|
||||
y = (dy*(np.arange(0, ny, dtype=np.float32)+0.5) - y_center) / size
|
||||
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
|
||||
r = np.sqrt(xv**2 + yv**2)
|
||||
r = np.minimum(1.0, np.sqrt(xv**2 + yv**2))
|
||||
xv = None
|
||||
yv = None
|
||||
gc.collect()
|
||||
|
||||
|
||||
#Generate highres
|
||||
h = 0.5 + 0.1*0.5*(1.0 + np.cos(np.pi*r/size)) * (r < size)
|
||||
hu = 0.1*0.5*(1.0 + np.cos(np.pi*r/size)) * (r < size)
|
||||
hv = 0.1*0.5*(1.0 + np.cos(np.pi*r/size)) * (r < size)
|
||||
cos = np.cos(np.pi*r)
|
||||
h = 0.5 + 0.1*0.5*(1.0 + cos)
|
||||
hu = 0.1*0.5*(1.0 + cos)
|
||||
hv = hu.copy()
|
||||
|
||||
scale = 0.7
|
||||
max_h_estimate = 0.6
|
||||
|
@ -24,22 +24,171 @@ import os
|
||||
|
||||
import numpy as np
|
||||
import time
|
||||
import signal
|
||||
import subprocess
|
||||
import tempfile
|
||||
import re
|
||||
import io
|
||||
import hashlib
|
||||
import logging
|
||||
import gc
|
||||
import netCDF4
|
||||
import json
|
||||
|
||||
import pycuda.compiler as cuda_compiler
|
||||
import pycuda.gpuarray
|
||||
import pycuda.driver as cuda
|
||||
from pycuda.tools import PageLockedMemoryPool
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def safeCall(cmd):
|
||||
logger = logging.getLogger(__name__)
|
||||
try:
|
||||
#git rev-parse HEAD
|
||||
current_dir = os.path.dirname(os.path.realpath(__file__))
|
||||
params = dict()
|
||||
params['stderr'] = subprocess.STDOUT
|
||||
params['cwd'] = current_dir
|
||||
params['universal_newlines'] = True #text=True in more recent python
|
||||
params['shell'] = False
|
||||
if os.name == 'nt':
|
||||
params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
stdout = subprocess.check_output(cmd, **params)
|
||||
except subprocess.CalledProcessError as e:
|
||||
output = e.output
|
||||
logger.error("Git failed, \nReturn code: " + str(e.returncode) + "\nOutput: " + output)
|
||||
raise e
|
||||
|
||||
return stdout
|
||||
|
||||
def getGitHash():
|
||||
return safeCall(["git", "rev-parse", "HEAD"])
|
||||
|
||||
def getGitStatus():
|
||||
return safeCall(["git", "status", "--porcelain", "-uno"])
|
||||
|
||||
def toJson(in_dict, compressed=True):
|
||||
"""
|
||||
Creates JSON string from a dictionary
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
out_dict = in_dict.copy()
|
||||
for key in out_dict:
|
||||
if isinstance(out_dict[key], np.ndarray):
|
||||
out_dict[key] = out_dict[key].tolist()
|
||||
else:
|
||||
try:
|
||||
json.dumps(out_dict[key])
|
||||
except:
|
||||
value = str(out_dict[key])
|
||||
logger.warning("JSON: Converting {:s} to string ({:s})".format(key, value))
|
||||
out_dict[key] = value
|
||||
return json.dumps(out_dict)
|
||||
|
||||
def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names=[]):
|
||||
"""
|
||||
Runs a simulation, and stores output in netcdf file. Stores the times given in
|
||||
save_times, and saves all of the variables in list save_var_names. Elements in
|
||||
save_var_names can be set to None if you do not want to save them
|
||||
"""
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
assert len(save_times) > 0, "Need to specify which times to save"
|
||||
|
||||
with Timer("construct") as t:
|
||||
sim = simulator(**simulator_args)
|
||||
logger.info("Constructed in " + str(t.secs) + " seconds")
|
||||
|
||||
#Create netcdf file and simulate
|
||||
with DataDumper(outfile, mode='w', clobber=False) as outdata:
|
||||
|
||||
#Create attributes (metadata)
|
||||
outdata.ncfile.created = time.ctime(time.time())
|
||||
outdata.ncfile.git_hash = getGitHash()
|
||||
outdata.ncfile.git_status = getGitStatus()
|
||||
outdata.ncfile.simulator = str(simulator)
|
||||
outdata.ncfile.sim_args = toJson(simulator_args)
|
||||
|
||||
#Create dimensions
|
||||
outdata.ncfile.createDimension('time', len(save_times))
|
||||
outdata.ncfile.createDimension('x', simulator_args['nx'])
|
||||
outdata.ncfile.createDimension('y', simulator_args['ny'])
|
||||
|
||||
#Create variables for dimensions
|
||||
ncvars = {}
|
||||
ncvars['time'] = outdata.ncfile.createVariable('time', np.dtype('float32').char, 'time')
|
||||
ncvars['x'] = outdata.ncfile.createVariable( 'x', np.dtype('float32').char, 'x')
|
||||
ncvars['y'] = outdata.ncfile.createVariable( 'y', np.dtype('float32').char, 'y')
|
||||
|
||||
#Fill variables with proper values
|
||||
ncvars['time'][:] = save_times
|
||||
extent = sim.getExtent()
|
||||
ncvars['x'][:] = np.linspace(extent[0], extent[1], simulator_args['nx'])
|
||||
ncvars['y'][:] = np.linspace(extent[2], extent[3], simulator_args['ny'])
|
||||
|
||||
#Choose which variables to download (prune None from list, but keep the index)
|
||||
download_vars = []
|
||||
for i, var_name in enumerate(save_var_names):
|
||||
if var_name is not None:
|
||||
download_vars += [i]
|
||||
save_var_names = list(save_var_names[i] for i in download_vars)
|
||||
|
||||
#Create variables
|
||||
for var_name in save_var_names:
|
||||
ncvars[var_name] = outdata.ncfile.createVariable(var_name, np.dtype('float32').char, ('time', 'y', 'x'), zlib=True, least_significant_digit=3)
|
||||
|
||||
#Create step sizes between each save
|
||||
t_steps = np.empty_like(save_times)
|
||||
t_steps[0] = save_times[0]
|
||||
t_steps[1:] = save_times[1:] - save_times[0:-1]
|
||||
|
||||
#Start simulation loop
|
||||
progress_printer = ProgressPrinter(save_times[-1], print_every=10)
|
||||
for k in range(len(save_times)):
|
||||
#Get target time and step size there
|
||||
t_step = t_steps[k]
|
||||
t_end = save_times[k]
|
||||
|
||||
#Sanity check simulator
|
||||
try:
|
||||
sim.check()
|
||||
except AssertionError as e:
|
||||
logger.error("Error after {:d} steps (t={:f}: {:s}".format(sim.simSteps(), sim.simTime(), str(e)))
|
||||
return outdata.filename
|
||||
|
||||
#Simulate
|
||||
if (t_step > 0.0):
|
||||
sim.simulate(t_step)
|
||||
|
||||
#Download
|
||||
save_vars = sim.download(download_vars)
|
||||
|
||||
#Save to file
|
||||
for i, var_name in enumerate(save_var_names):
|
||||
ncvars[var_name][k, :] = save_vars[i]
|
||||
|
||||
#Write progress to screen
|
||||
print_string = progress_printer.getPrintString(t_end)
|
||||
if (print_string):
|
||||
logger.debug(print_string)
|
||||
|
||||
logger.debug("Simulated to t={:f} in {:d} timesteps (average dt={:f})".format(t_end, sim.simSteps(), sim.simTime() / sim.simSteps()))
|
||||
|
||||
return outdata.filename
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
from GPUSimulators import Autotuner
|
||||
|
||||
"""
|
||||
Class which keeps track of time spent for a section of code
|
||||
"""
|
||||
class Timer(object):
|
||||
"""
|
||||
Class which keeps track of time spent for a section of code
|
||||
"""
|
||||
def __init__(self, tag, log_level=logging.DEBUG):
|
||||
self.tag = tag
|
||||
self.log_level = log_level
|
||||
@ -54,232 +203,261 @@ class Timer(object):
|
||||
self.secs = self.end - self.start
|
||||
self.msecs = self.secs * 1000 # millisecs
|
||||
self.logger.log(self.log_level, "%s: %f ms", self.tag, self.msecs)
|
||||
|
||||
def elapsed(self):
|
||||
return time.time() - self.start
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class which keeps track of the CUDA context and some helper functions
|
||||
"""
|
||||
class CudaContext(object):
|
||||
class PopenFileBuffer(object):
|
||||
"""
|
||||
Simple class for holding a set of tempfiles
|
||||
for communicating with a subprocess
|
||||
"""
|
||||
def __init__(self):
|
||||
self.stdout = tempfile.TemporaryFile(mode='w+t')
|
||||
self.stderr = tempfile.TemporaryFile(mode='w+t')
|
||||
|
||||
def __del__(self):
|
||||
self.stdout.close()
|
||||
self.stderr.close()
|
||||
|
||||
def read(self):
|
||||
self.stdout.seek(0)
|
||||
cout = self.stdout.read()
|
||||
self.stdout.seek(0, 2)
|
||||
|
||||
self.stderr.seek(0)
|
||||
cerr = self.stderr.read()
|
||||
self.stderr.seek(0, 2)
|
||||
|
||||
return cout, cerr
|
||||
|
||||
class IPEngine(object):
|
||||
"""
|
||||
Class for starting IPEngines for MPI processing in IPython
|
||||
"""
|
||||
def __init__(self, n_engines):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
#Start ipcontroller
|
||||
self.logger.info("Starting IPController")
|
||||
self.c_buff = PopenFileBuffer()
|
||||
c_cmd = ["ipcontroller", "--ip='*'"]
|
||||
c_params = dict()
|
||||
c_params['stderr'] = self.c_buff.stderr
|
||||
c_params['stdout'] = self.c_buff.stdout
|
||||
c_params['shell'] = False
|
||||
if os.name == 'nt':
|
||||
c_params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
self.c = subprocess.Popen(c_cmd, **c_params)
|
||||
|
||||
#Wait until controller is running
|
||||
time.sleep(3)
|
||||
|
||||
#Start engines
|
||||
self.logger.info("Starting IPEngines")
|
||||
self.e_buff = PopenFileBuffer()
|
||||
e_cmd = ["mpiexec", "-n", str(n_engines), "ipengine", "--mpi"]
|
||||
e_params = dict()
|
||||
e_params['stderr'] = self.e_buff.stderr
|
||||
e_params['stdout'] = self.e_buff.stdout
|
||||
e_params['shell'] = False
|
||||
if os.name == 'nt':
|
||||
e_params['creationflags'] = subprocess.CREATE_NEW_PROCESS_GROUP
|
||||
self.e = subprocess.Popen(e_cmd, **e_params)
|
||||
|
||||
# attach to a running cluster
|
||||
import ipyparallel
|
||||
self.cluster = ipyparallel.Client()#profile='mpi')
|
||||
time.sleep(3)
|
||||
while(len(self.cluster.ids) != n_engines):
|
||||
time.sleep(0.5)
|
||||
self.logger.info("Waiting for cluster...")
|
||||
self.cluster = ipyparallel.Client()#profile='mpi')
|
||||
|
||||
self.logger.info("Done")
|
||||
|
||||
def __del__(self):
|
||||
self.shutdown()
|
||||
|
||||
def __init__(self, blocking=False, use_cache=True, autotuning=True):
|
||||
self.blocking = blocking
|
||||
self.use_cache = use_cache
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.kernels = {}
|
||||
def shutdown(self):
|
||||
if (self.e is not None):
|
||||
if (os.name == 'nt'):
|
||||
self.logger.warn("Sending CTRL+C to IPEngine")
|
||||
self.e.send_signal(signal.CTRL_C_EVENT)
|
||||
|
||||
try:
|
||||
self.e.communicate(timeout=3)
|
||||
self.e.kill()
|
||||
except subprocess.TimeoutExpired:
|
||||
self.logger.warn("Killing IPEngine")
|
||||
self.e.kill()
|
||||
self.e.communicate()
|
||||
self.e = None
|
||||
|
||||
cout, cerr = self.e_buff.read()
|
||||
self.logger.info("IPEngine cout: {:s}".format(cout))
|
||||
self.logger.info("IPEngine cerr: {:s}".format(cerr))
|
||||
self.e_buff = None
|
||||
|
||||
gc.collect()
|
||||
|
||||
if (self.c is not None):
|
||||
if (os.name == 'nt'):
|
||||
self.logger.warn("Sending CTRL+C to IPController")
|
||||
self.c.send_signal(signal.CTRL_C_EVENT)
|
||||
|
||||
try:
|
||||
self.c.communicate(timeout=3)
|
||||
self.c.kill()
|
||||
except subprocess.TimeoutExpired:
|
||||
self.logger.warn("Killing IPController")
|
||||
self.c.kill()
|
||||
self.c.communicate()
|
||||
self.c = None
|
||||
|
||||
cout, cerr = self.c_buff.read()
|
||||
self.logger.info("IPController cout: {:s}".format(cout))
|
||||
self.logger.info("IPController cerr: {:s}".format(cerr))
|
||||
self.c_buff = None
|
||||
|
||||
self.module_path = os.path.dirname(os.path.realpath(__file__))
|
||||
gc.collect()
|
||||
|
||||
#Initialize cuda (must be first call to PyCUDA)
|
||||
cuda.init(flags=0)
|
||||
|
||||
self.logger.info("PyCUDA version %s", str(pycuda.VERSION_TEXT))
|
||||
|
||||
#Print some info about CUDA
|
||||
self.logger.info("CUDA version %s", str(cuda.get_version()))
|
||||
self.logger.info("Driver version %s", str(cuda.get_driver_version()))
|
||||
|
||||
self.cuda_device = cuda.Device(0)
|
||||
self.logger.info("Using '%s' GPU", self.cuda_device.name())
|
||||
self.logger.debug(" => compute capability: %s", str(self.cuda_device.compute_capability()))
|
||||
self.logger.debug(" => memory: %d MB", self.cuda_device.total_memory() / (1024*1024))
|
||||
|
||||
|
||||
|
||||
# Create the CUDA context
|
||||
if (self.blocking):
|
||||
self.cuda_context = self.cuda_device.make_context(flags=cuda.ctx_flags.SCHED_BLOCKING_SYNC)
|
||||
self.logger.warning("Using blocking context")
|
||||
else:
|
||||
self.cuda_context = self.cuda_device.make_context(flags=cuda.ctx_flags.SCHED_AUTO)
|
||||
|
||||
class DataDumper(object):
|
||||
"""
|
||||
Simple class for holding a netCDF4 object
|
||||
(handles opening and closing in a nice way)
|
||||
Use as
|
||||
with DataDumper("filename") as data:
|
||||
...
|
||||
"""
|
||||
def __init__(self, filename, *args, **kwargs):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
self.logger.info("Created context handle <%s>", str(self.cuda_context.handle))
|
||||
#Create directory if needed
|
||||
filename = os.path.abspath(filename)
|
||||
dirname = os.path.dirname(filename)
|
||||
if dirname and not os.path.isdir(dirname):
|
||||
self.logger.info("Creating directory " + dirname)
|
||||
os.makedirs(dirname)
|
||||
|
||||
#Create cache dir for cubin files
|
||||
if (self.use_cache):
|
||||
self.cache_path = os.path.join(self.module_path, "cuda_cache")
|
||||
if not os.path.isdir(self.cache_path):
|
||||
os.mkdir(self.cache_path)
|
||||
self.logger.info("Using CUDA cache dir %s", self.cache_path)
|
||||
#Get mode of file if we have that
|
||||
mode = None
|
||||
if (args):
|
||||
mode = args[0]
|
||||
elif (kwargs and 'mode' in kwargs.keys()):
|
||||
mode = kwargs['mode']
|
||||
|
||||
self.autotuner = None
|
||||
if (autotuning):
|
||||
self.logger.info("Autotuning enabled. It may take several minutes to run the code the first time: have patience")
|
||||
self.autotuner = Autotuner.Autotuner()
|
||||
|
||||
|
||||
def __del__(self, *args):
|
||||
self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
# Loop over all contexts in stack, and remove "this"
|
||||
other_contexts = []
|
||||
while (cuda.Context.get_current() != None):
|
||||
context = cuda.Context.get_current()
|
||||
if (context.handle != self.cuda_context.handle):
|
||||
self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
other_contexts = [context] + other_contexts
|
||||
cuda.Context.pop()
|
||||
#Create new unique file if writing
|
||||
if (mode):
|
||||
if (("w" in mode) or ("+" in mode) or ("a" in mode)):
|
||||
i = 0
|
||||
stem, ext = os.path.splitext(filename)
|
||||
while (os.path.isfile(filename)):
|
||||
filename = "{:s}_{:04d}{:s}".format(stem, i, ext)
|
||||
i = i+1
|
||||
self.filename = os.path.abspath(filename)
|
||||
|
||||
#Save arguments
|
||||
self.args = args
|
||||
self.kwargs = kwargs
|
||||
|
||||
#Log output
|
||||
self.logger.info("Initialized " + self.filename)
|
||||
|
||||
|
||||
def __enter__(self):
|
||||
self.logger.info("Opening " + self.filename)
|
||||
if (self.args):
|
||||
self.logger.info("Arguments: " + str(self.args))
|
||||
if (self.kwargs):
|
||||
self.logger.info("Keyword arguments: " + str(self.kwargs))
|
||||
self.ncfile = netCDF4.Dataset(self.filename, *self.args, **self.kwargs)
|
||||
return self
|
||||
|
||||
def __exit__(self, *args):
|
||||
self.logger.info("Closing " + self.filename)
|
||||
self.ncfile.close()
|
||||
|
||||
|
||||
def toJson(in_dict):
|
||||
out_dict = in_dict.copy()
|
||||
|
||||
for key in out_dict:
|
||||
if isinstance(out_dict[key], np.ndarray):
|
||||
out_dict[key] = out_dict[key].tolist()
|
||||
else:
|
||||
self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
cuda.Context.pop()
|
||||
try:
|
||||
json.dumps(out_dict[key])
|
||||
except:
|
||||
out_dict[key] = str(out_dict[key])
|
||||
|
||||
# Add all the contexts we popped that were not our own
|
||||
for context in other_contexts:
|
||||
self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context.handle), str(context.handle))
|
||||
cuda.Context.push(context)
|
||||
|
||||
self.logger.debug("<%s> Detaching", str(self.cuda_context.handle))
|
||||
self.cuda_context.detach()
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return "CudaContext id " + str(self.cuda_context.handle)
|
||||
|
||||
|
||||
def hash_kernel(kernel_filename, include_dirs):
|
||||
# Generate a kernel ID for our caches
|
||||
num_includes = 0
|
||||
max_includes = 100
|
||||
kernel_hasher = hashlib.md5()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Loop over file and includes, and check if something has changed
|
||||
files = [kernel_filename]
|
||||
while len(files):
|
||||
|
||||
if (num_includes > max_includes):
|
||||
raise("Maximum number of includes reached - circular include in {:}?".format(kernel_filename))
|
||||
|
||||
filename = files.pop()
|
||||
|
||||
#logger.debug("Hashing %s", filename)
|
||||
|
||||
modified = os.path.getmtime(filename)
|
||||
|
||||
# Open the file
|
||||
with io.open(filename, "r") as file:
|
||||
|
||||
# Search for #inclue <something> and also hash the file
|
||||
file_str = file.read()
|
||||
kernel_hasher.update(file_str.encode('utf-8'))
|
||||
kernel_hasher.update(str(modified).encode('utf-8'))
|
||||
|
||||
#Find all includes
|
||||
includes = re.findall('^\W*#include\W+(.+?)\W*$', file_str, re.M)
|
||||
|
||||
# Loop over everything that looks like an include
|
||||
for include_file in includes:
|
||||
|
||||
#Search through include directories for the file
|
||||
file_path = os.path.dirname(filename)
|
||||
for include_path in [file_path] + include_dirs:
|
||||
|
||||
# If we find it, add it to list of files to check
|
||||
temp_path = os.path.join(include_path, include_file)
|
||||
if (os.path.isfile(temp_path)):
|
||||
files = files + [temp_path]
|
||||
num_includes = num_includes + 1 #For circular includes...
|
||||
break
|
||||
|
||||
return kernel_hasher.hexdigest()
|
||||
|
||||
"""
|
||||
Reads a text file and creates an OpenCL kernel from that
|
||||
"""
|
||||
def get_prepared_kernel(self, kernel_filename, kernel_function_name, \
|
||||
prepared_call_args, \
|
||||
include_dirs=[], no_extern_c=True,
|
||||
**kwargs):
|
||||
"""
|
||||
Helper function to print compilation output
|
||||
"""
|
||||
def cuda_compile_message_handler(compile_success_bool, info_str, error_str):
|
||||
self.logger.debug("Compilation returned %s", str(compile_success_bool))
|
||||
if info_str:
|
||||
self.logger.debug("Info: %s", info_str)
|
||||
if error_str:
|
||||
self.logger.debug("Error: %s", error_str)
|
||||
|
||||
#self.logger.debug("Getting %s", kernel_filename)
|
||||
|
||||
# Create a hash of the kernel (and its includes)
|
||||
kwargs_hasher = hashlib.md5()
|
||||
kwargs_hasher.update(str(kwargs).encode('utf-8'));
|
||||
kwargs_hash = kwargs_hasher.hexdigest()
|
||||
kwargs_hasher = None
|
||||
root, ext = os.path.splitext(kernel_filename)
|
||||
kernel_hash = root \
|
||||
+ "_" + CudaContext.hash_kernel( \
|
||||
os.path.join(self.module_path, kernel_filename), \
|
||||
include_dirs=[self.module_path] + include_dirs) \
|
||||
+ "_" + kwargs_hash \
|
||||
+ ext
|
||||
cached_kernel_filename = os.path.join(self.cache_path, kernel_hash)
|
||||
|
||||
# If we have the kernel in our hashmap, return it
|
||||
if (kernel_hash in self.kernels.keys()):
|
||||
self.logger.debug("Found kernel %s cached in hashmap (%s)", kernel_filename, kernel_hash)
|
||||
return self.kernels[kernel_hash]
|
||||
|
||||
# If we have it on disk, return it
|
||||
elif (self.use_cache and os.path.isfile(cached_kernel_filename)):
|
||||
self.logger.debug("Found kernel %s cached on disk (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
with io.open(cached_kernel_filename, "rb") as file:
|
||||
file_str = file.read()
|
||||
module = cuda.module_from_buffer(file_str, message_handler=cuda_compile_message_handler)
|
||||
|
||||
kernel = module.get_function(kernel_function_name)
|
||||
kernel.prepare(prepared_call_args)
|
||||
self.kernels[kernel_hash] = kernel
|
||||
return kernel
|
||||
|
||||
# Otherwise, compile it from source
|
||||
else:
|
||||
self.logger.debug("Compiling %s (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
#Create kernel string
|
||||
kernel_string = ""
|
||||
for key, value in kwargs.items():
|
||||
kernel_string += "#define {:s} {:s}\n".format(str(key), str(value))
|
||||
kernel_string += '#include "{:s}"'.format(os.path.join(self.module_path, kernel_filename))
|
||||
if (self.use_cache):
|
||||
with io.open(cached_kernel_filename + ".txt", "w") as file:
|
||||
file.write(kernel_string)
|
||||
|
||||
|
||||
with Timer("compiler") as timer:
|
||||
cubin = cuda_compiler.compile(kernel_string, include_dirs=include_dirs, no_extern_c=no_extern_c, cache_dir=False)
|
||||
module = cuda.module_from_buffer(cubin, message_handler=cuda_compile_message_handler)
|
||||
if (self.use_cache):
|
||||
with io.open(cached_kernel_filename, "wb") as file:
|
||||
file.write(cubin)
|
||||
|
||||
kernel = module.get_function(kernel_function_name)
|
||||
kernel.prepare(prepared_call_args)
|
||||
self.kernels[kernel_hash] = kernel
|
||||
|
||||
|
||||
return kernel
|
||||
|
||||
"""
|
||||
Clears the kernel cache (useful for debugging & development)
|
||||
"""
|
||||
def clear_kernel_cache(self):
|
||||
self.logger.debug("Clearing cache")
|
||||
self.kernels = {}
|
||||
gc.collect()
|
||||
|
||||
"""
|
||||
Synchronizes all streams etc
|
||||
"""
|
||||
def synchronize(self):
|
||||
self.cuda_context.synchronize()
|
||||
return json.dumps(out_dict)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class ProgressPrinter(object):
|
||||
"""
|
||||
Small helper class for
|
||||
"""
|
||||
def __init__(self, total_steps, print_every=5):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.start = time.time()
|
||||
self.total_steps = total_steps
|
||||
self.print_every = print_every
|
||||
self.next_print_time = self.print_every
|
||||
self.last_step = 0
|
||||
self.secs_per_iter = None
|
||||
|
||||
def getPrintString(self, step):
|
||||
elapsed = time.time() - self.start
|
||||
if (elapsed > self.next_print_time):
|
||||
dt = elapsed - (self.next_print_time - self.print_every)
|
||||
dsteps = step - self.last_step
|
||||
steps_remaining = self.total_steps - step
|
||||
|
||||
if (dsteps == 0):
|
||||
return
|
||||
|
||||
self.last_step = step
|
||||
self.next_print_time = elapsed + self.print_every
|
||||
|
||||
if not self.secs_per_iter:
|
||||
self.secs_per_iter = dt / dsteps
|
||||
self.secs_per_iter = 0.2*self.secs_per_iter + 0.8*(dt / dsteps)
|
||||
|
||||
remaining_time = steps_remaining * self.secs_per_iter
|
||||
|
||||
return "{:s}. Total: {:s}, elapsed: {:s}, remaining: {:s}".format(
|
||||
ProgressPrinter.progressBar(step, self.total_steps),
|
||||
ProgressPrinter.timeString(elapsed + remaining_time),
|
||||
ProgressPrinter.timeString(elapsed),
|
||||
ProgressPrinter.timeString(remaining_time))
|
||||
|
||||
def timeString(seconds):
|
||||
seconds = int(max(seconds, 1))
|
||||
minutes, seconds = divmod(seconds, 60)
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
periods = [('h', hours), ('m', minutes), ('s', seconds)]
|
||||
time_string = ' '.join('{}{}'.format(value, name)
|
||||
for name, value in periods
|
||||
if value)
|
||||
return time_string
|
||||
|
||||
def progressBar(step, total_steps, width=30):
|
||||
progress = np.round(width * step / total_steps).astype(np.int32)
|
||||
progressbar = "0% [" + "#"*(progress) + "="*(width-progress) + "] 100%"
|
||||
return progressbar
|
||||
|
||||
|
||||
|
||||
@ -288,13 +466,13 @@ class CudaContext(object):
|
||||
|
||||
|
||||
"""
|
||||
Class that holds data
|
||||
Class that holds 2D data
|
||||
"""
|
||||
class CudaArray2D:
|
||||
"""
|
||||
Uploads initial data to the CL device
|
||||
Uploads initial data to the CUDA device
|
||||
"""
|
||||
def __init__(self, stream, nx, ny, x_halo, y_halo, cpu_data):
|
||||
def __init__(self, stream, nx, ny, x_halo, y_halo, cpu_data=None, dtype=np.float32):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.nx = nx
|
||||
self.ny = ny
|
||||
@ -305,41 +483,181 @@ class CudaArray2D:
|
||||
ny_halo = ny + 2*y_halo
|
||||
|
||||
#self.logger.debug("Allocating [%dx%d] buffer", self.nx, self.ny)
|
||||
#Should perhaps use pycuda.driver.mem_alloc_data.pitch() here
|
||||
self.data = pycuda.gpuarray.zeros((ny_halo, nx_halo), dtype)
|
||||
|
||||
#For returning to download
|
||||
self.memorypool = PageLockedMemoryPool()
|
||||
|
||||
#If we don't have any data, just allocate and return
|
||||
if cpu_data is None:
|
||||
return
|
||||
|
||||
#Make sure data is in proper format
|
||||
assert np.issubdtype(cpu_data.dtype, np.float32), "Wrong datatype: %s" % str(cpu_data.dtype)
|
||||
assert cpu_data.shape == (ny_halo, nx_halo) or cpu_data.shape == (self.ny, self.nx), "Wrong shape of data %s vs %s / %s" % (str(cpu_data.shape), str((self.ny, self.nx)), str((ny_halo, nx_halo)))
|
||||
assert cpu_data.itemsize == 4, "Wrong size of data type"
|
||||
assert not np.isfortran(cpu_data), "Wrong datatype (Fortran, expected C)"
|
||||
|
||||
#Upload data to the device
|
||||
if (cpu_data.shape == (ny_halo, nx_halo)):
|
||||
self.data = pycuda.gpuarray.to_gpu_async(cpu_data, stream=stream)
|
||||
elif (cpu_data.shape == (self.ny, self.nx)):
|
||||
#Should perhaps use pycuda.driver.mem_alloc_data.pitch() here
|
||||
self.data = pycuda.gpuarray.empty((ny_halo, nx_halo), cpu_data.dtype)
|
||||
#self.data.fill(0.0)
|
||||
|
||||
#Create copy object from host to device
|
||||
copy = cuda.Memcpy2D()
|
||||
copy.set_src_host(cpu_data)
|
||||
copy.set_dst_device(self.data.gpudata)
|
||||
|
||||
#Set offsets and pitch of destination
|
||||
copy.dst_x_in_bytes = self.x_halo*self.data.strides[1]
|
||||
copy.dst_y = self.y_halo
|
||||
copy.dst_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
copy.width_in_bytes = self.nx*cpu_data.itemsize
|
||||
copy.height = self.ny
|
||||
|
||||
#Perform the copy
|
||||
copy(stream)
|
||||
stream.synchronize()
|
||||
|
||||
#Create copy object from host to device
|
||||
x = (nx_halo - cpu_data.shape[1]) // 2
|
||||
y = (ny_halo - cpu_data.shape[0]) // 2
|
||||
self.upload(stream, cpu_data, extent=[x, y, cpu_data.shape[1], cpu_data.shape[0]])
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data.gpudata), self.nx, self.ny)
|
||||
|
||||
|
||||
def __del__(self, *args):
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data.gpudata), self.nx, self.ny)
|
||||
self.data.gpudata.free()
|
||||
self.data = None
|
||||
|
||||
"""
|
||||
Enables downloading data from GPU to Python
|
||||
"""
|
||||
def download(self, stream, cpu_data=None, asynch=False, extent=None):
|
||||
if (extent is None):
|
||||
x = self.x_halo
|
||||
y = self.y_halo
|
||||
nx = self.nx
|
||||
ny = self.ny
|
||||
else:
|
||||
assert False, "Wrong data shape: %s vs %s / %s" % (str(cpu_data.shape), str((self.ny, self.nx)), str((ny_halo, nx_halo)))
|
||||
x, y, nx, ny = extent
|
||||
|
||||
if (cpu_data is None):
|
||||
#self.logger.debug("Downloading [%dx%d] buffer", self.nx, self.ny)
|
||||
#Allocate host memory
|
||||
#The following fails, don't know why (crashes python)
|
||||
#cpu_data = cuda.pagelocked_empty((self.ny, self.nx), np.float32)32)
|
||||
#Non-pagelocked: cpu_data = np.empty((ny, nx), dtype=np.float32)
|
||||
cpu_data = self.memorypool.allocate((ny, nx), dtype=np.float32)
|
||||
|
||||
assert nx == cpu_data.shape[1]
|
||||
assert ny == cpu_data.shape[0]
|
||||
assert x+nx <= self.nx + 2*self.x_halo
|
||||
assert y+ny <= self.ny + 2*self.y_halo
|
||||
|
||||
#Create copy object from device to host
|
||||
copy = cuda.Memcpy2D()
|
||||
copy.set_src_device(self.data.gpudata)
|
||||
copy.set_dst_host(cpu_data)
|
||||
|
||||
#Set offsets and pitch of source
|
||||
copy.src_x_in_bytes = int(x)*self.data.strides[1]
|
||||
copy.src_y = int(y)
|
||||
copy.src_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
copy.width_in_bytes = int(nx)*cpu_data.itemsize
|
||||
copy.height = int(ny)
|
||||
|
||||
copy(stream)
|
||||
if asynch==False:
|
||||
stream.synchronize()
|
||||
|
||||
return cpu_data
|
||||
|
||||
|
||||
def upload(self, stream, cpu_data, extent=None):
|
||||
if (extent is None):
|
||||
x = self.x_halo
|
||||
y = self.y_halo
|
||||
nx = self.nx
|
||||
ny = self.ny
|
||||
else:
|
||||
x, y, nx, ny = extent
|
||||
|
||||
assert(nx == cpu_data.shape[1])
|
||||
assert(ny == cpu_data.shape[0])
|
||||
assert(x+nx <= self.nx + 2*self.x_halo)
|
||||
assert(y+ny <= self.ny + 2*self.y_halo)
|
||||
|
||||
#Create copy object from device to host
|
||||
copy = cuda.Memcpy2D()
|
||||
copy.set_dst_device(self.data.gpudata)
|
||||
copy.set_src_host(cpu_data)
|
||||
|
||||
#Set offsets and pitch of source
|
||||
copy.dst_x_in_bytes = int(x)*self.data.strides[1]
|
||||
copy.dst_y = int(y)
|
||||
copy.dst_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
copy.width_in_bytes = int(nx)*cpu_data.itemsize
|
||||
copy.height = int(ny)
|
||||
|
||||
copy(stream)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that holds 2D data
|
||||
"""
|
||||
class CudaArray3D:
|
||||
"""
|
||||
Uploads initial data to the CL device
|
||||
"""
|
||||
def __init__(self, stream, nx, ny, nz, x_halo, y_halo, z_halo, cpu_data=None, dtype=np.float32):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.nx = nx
|
||||
self.ny = ny
|
||||
self.nz = nz
|
||||
self.x_halo = x_halo
|
||||
self.y_halo = y_halo
|
||||
self.z_halo = z_halo
|
||||
|
||||
nx_halo = nx + 2*x_halo
|
||||
ny_halo = ny + 2*y_halo
|
||||
nz_halo = nz + 2*z_halo
|
||||
|
||||
#self.logger.debug("Allocating [%dx%dx%d] buffer", self.nx, self.ny, self.nz)
|
||||
#Should perhaps use pycuda.driver.mem_alloc_data.pitch() here
|
||||
self.data = pycuda.gpuarray.zeros((nz_halo, ny_halo, nx_halo), dtype)
|
||||
|
||||
#For returning to download
|
||||
self.memorypool = PageLockedMemoryPool()
|
||||
|
||||
#If we don't have any data, just allocate and return
|
||||
if cpu_data is None:
|
||||
return
|
||||
|
||||
#Make sure data is in proper format
|
||||
assert cpu_data.shape == (nz_halo, ny_halo, nx_halo) or cpu_data.shape == (self.nz, self.ny, self.nx), "Wrong shape of data %s vs %s / %s" % (str(cpu_data.shape), str((self.nz, self.ny, self.nx)), str((nz_halo, ny_halo, nx_halo)))
|
||||
assert cpu_data.itemsize == 4, "Wrong size of data type"
|
||||
assert not np.isfortran(cpu_data), "Wrong datatype (Fortran, expected C)"
|
||||
|
||||
#Create copy object from host to device
|
||||
copy = cuda.Memcpy3D()
|
||||
copy.set_src_host(cpu_data)
|
||||
copy.set_dst_device(self.data.gpudata)
|
||||
|
||||
#Set offsets of destination
|
||||
x_offset = (nx_halo - cpu_data.shape[2]) // 2
|
||||
y_offset = (ny_halo - cpu_data.shape[1]) // 2
|
||||
z_offset = (nz_halo - cpu_data.shape[0]) // 2
|
||||
copy.dst_x_in_bytes = x_offset*self.data.strides[1]
|
||||
copy.dst_y = y_offset
|
||||
copy.dst_z = z_offset
|
||||
|
||||
#Set pitch of destination
|
||||
copy.dst_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
width = max(self.nx, cpu_data.shape[2])
|
||||
height = max(self.ny, cpu_data.shape[1])
|
||||
depth = max(self.nz, cpu-data.shape[0])
|
||||
copy.width_in_bytes = width*cpu_data.itemsize
|
||||
copy.height = height
|
||||
copy.depth = depth
|
||||
|
||||
#Perform the copy
|
||||
copy(stream)
|
||||
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data.gpudata), self.nx, self.ny)
|
||||
|
||||
@ -352,11 +670,12 @@ class CudaArray2D:
|
||||
"""
|
||||
Enables downloading data from GPU to Python
|
||||
"""
|
||||
def download(self, stream, async=False):
|
||||
def download(self, stream, asynch=False):
|
||||
#self.logger.debug("Downloading [%dx%d] buffer", self.nx, self.ny)
|
||||
#Allocate host memory
|
||||
#cpu_data = cuda.pagelocked_empty((self.ny, self.nx), np.float32)
|
||||
cpu_data = np.empty((self.ny, self.nx), dtype=np.float32)
|
||||
#cpu_data = np.empty((self.nz, self.ny, self.nx), dtype=np.float32)
|
||||
cpu_data = self.memorypool.allocate((self.nz, self.ny, self.nx), dtype=np.float32)
|
||||
|
||||
#Create copy object from device to host
|
||||
copy = cuda.Memcpy2D()
|
||||
@ -366,15 +685,17 @@ class CudaArray2D:
|
||||
#Set offsets and pitch of source
|
||||
copy.src_x_in_bytes = self.x_halo*self.data.strides[1]
|
||||
copy.src_y = self.y_halo
|
||||
copy.src_z = self.z_halo
|
||||
copy.src_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
copy.width_in_bytes = self.nx*cpu_data.itemsize
|
||||
copy.height = self.ny
|
||||
copy.depth = self.nz
|
||||
|
||||
copy(stream)
|
||||
if async==False:
|
||||
if asynch==False:
|
||||
stream.synchronize()
|
||||
|
||||
return cpu_data
|
||||
@ -390,36 +711,43 @@ class CudaArray2D:
|
||||
"""
|
||||
A class representing an Arakawa A type (unstaggered, logically Cartesian) grid
|
||||
"""
|
||||
class SWEDataArakawaA:
|
||||
"""
|
||||
Uploads initial data to the CL device
|
||||
"""
|
||||
def __init__(self, stream, nx, ny, halo_x, halo_y, h0, hu0, hv0):
|
||||
class ArakawaA2D:
|
||||
def __init__(self, stream, nx, ny, halo_x, halo_y, cpu_variables):
|
||||
"""
|
||||
Uploads initial data to the GPU device
|
||||
"""
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.h0 = CudaArray2D(stream, nx, ny, halo_x, halo_y, h0)
|
||||
self.hu0 = CudaArray2D(stream, nx, ny, halo_x, halo_y, hu0)
|
||||
self.hv0 = CudaArray2D(stream, nx, ny, halo_x, halo_y, hv0)
|
||||
self.gpu_variables = []
|
||||
for cpu_variable in cpu_variables:
|
||||
self.gpu_variables += [CudaArray2D(stream, nx, ny, halo_x, halo_y, cpu_variable)]
|
||||
|
||||
self.h1 = CudaArray2D(stream, nx, ny, halo_x, halo_y, h0)
|
||||
self.hu1 = CudaArray2D(stream, nx, ny, halo_x, halo_y, hu0)
|
||||
self.hv1 = CudaArray2D(stream, nx, ny, halo_x, halo_y, hv0)
|
||||
def __getitem__(self, key):
|
||||
assert type(key) == int, "Indexing is int based"
|
||||
if (key > len(self.gpu_variables) or key < 0):
|
||||
raise IndexError("Out of bounds")
|
||||
return self.gpu_variables[key]
|
||||
|
||||
def download(self, stream, variables=None):
|
||||
"""
|
||||
Enables downloading data from the GPU device to Python
|
||||
"""
|
||||
if variables is None:
|
||||
variables=range(len(self.gpu_variables))
|
||||
|
||||
cpu_variables = []
|
||||
for i in variables:
|
||||
assert i < len(self.gpu_variables), "Variable {:d} is out of range".format(i)
|
||||
cpu_variables += [self.gpu_variables[i].download(stream, asynch=True)]
|
||||
|
||||
"""
|
||||
Swaps the variables after a timestep has been completed
|
||||
"""
|
||||
def swap(self):
|
||||
self.h1, self.h0 = self.h0, self.h1
|
||||
self.hu1, self.hu0 = self.hu0, self.hu1
|
||||
self.hv1, self.hv0 = self.hv0, self.hv1
|
||||
|
||||
"""
|
||||
Enables downloading data from CL device to Python
|
||||
"""
|
||||
def download(self, stream):
|
||||
h_cpu = self.h0.download(stream, async=True)
|
||||
hu_cpu = self.hu0.download(stream, async=True)
|
||||
hv_cpu = self.hv0.download(stream, async=False)
|
||||
|
||||
return h_cpu, hu_cpu, hv_cpu
|
||||
|
||||
stream.synchronize()
|
||||
return cpu_variables
|
||||
|
||||
def check(self):
|
||||
"""
|
||||
Checks that data is still sane
|
||||
"""
|
||||
for i, gpu_variable in enumerate(self.gpu_variables):
|
||||
var_sum = pycuda.gpuarray.sum(gpu_variable.data).get()
|
||||
self.logger.debug("Data %d with size [%d x %d] has average %f", i, gpu_variable.nx, gpu_variable.ny, var_sum / (gpu_variable.nx * gpu_variable.ny))
|
||||
assert np.isnan(var_sum) == False, "Data contains NaN values!"
|
||||
|
272
GPUSimulators/CudaContext.py
Normal file
272
GPUSimulators/CudaContext.py
Normal file
@ -0,0 +1,272 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements Cuda context handling
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import time
|
||||
import re
|
||||
import io
|
||||
import hashlib
|
||||
import logging
|
||||
import gc
|
||||
|
||||
import pycuda.compiler as cuda_compiler
|
||||
import pycuda.gpuarray
|
||||
import pycuda.driver as cuda
|
||||
|
||||
from GPUSimulators import Autotuner, Common
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class which keeps track of the CUDA context and some helper functions
|
||||
"""
|
||||
class CudaContext(object):
|
||||
|
||||
def __init__(self, device=None, context_flags=None, use_cache=True, autotuning=True):
|
||||
"""
|
||||
Create a new CUDA context
|
||||
Set device to an id or pci_bus_id to select a specific GPU
|
||||
Set context_flags to cuda.ctx_flags.SCHED_BLOCKING_SYNC for a blocking context
|
||||
"""
|
||||
self.use_cache = use_cache
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.modules = {}
|
||||
|
||||
self.module_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
#Initialize cuda (must be first call to PyCUDA)
|
||||
cuda.init(flags=0)
|
||||
|
||||
self.logger.info("PyCUDA version %s", str(pycuda.VERSION_TEXT))
|
||||
|
||||
#Print some info about CUDA
|
||||
self.logger.info("CUDA version %s", str(cuda.get_version()))
|
||||
self.logger.info("Driver version %s", str(cuda.get_driver_version()))
|
||||
|
||||
if device is None:
|
||||
device = 0
|
||||
|
||||
self.cuda_device = cuda.Device(device)
|
||||
self.logger.info("Using device %d/%d '%s' (%s) GPU", device, cuda.Device.count(), self.cuda_device.name(), self.cuda_device.pci_bus_id())
|
||||
self.logger.debug(" => compute capability: %s", str(self.cuda_device.compute_capability()))
|
||||
|
||||
# Create the CUDA context
|
||||
if context_flags is None:
|
||||
context_flags=cuda.ctx_flags.SCHED_AUTO
|
||||
|
||||
self.cuda_context = self.cuda_device.make_context(flags=context_flags)
|
||||
|
||||
free, total = cuda.mem_get_info()
|
||||
self.logger.debug(" => memory: %d / %d MB available", int(free/(1024*1024)), int(total/(1024*1024)))
|
||||
|
||||
self.logger.info("Created context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
#Create cache dir for cubin files
|
||||
self.cache_path = os.path.join(self.module_path, "cuda_cache")
|
||||
if (self.use_cache):
|
||||
if not os.path.isdir(self.cache_path):
|
||||
os.mkdir(self.cache_path)
|
||||
self.logger.info("Using CUDA cache dir %s", self.cache_path)
|
||||
|
||||
self.autotuner = None
|
||||
if (autotuning):
|
||||
self.logger.info("Autotuning enabled. It may take several minutes to run the code the first time: have patience")
|
||||
self.autotuner = Autotuner.Autotuner()
|
||||
|
||||
|
||||
def __del__(self, *args):
|
||||
self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
# Loop over all contexts in stack, and remove "this"
|
||||
other_contexts = []
|
||||
while (cuda.Context.get_current() != None):
|
||||
context = cuda.Context.get_current()
|
||||
if (context.handle != self.cuda_context.handle):
|
||||
self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
other_contexts = [context] + other_contexts
|
||||
cuda.Context.pop()
|
||||
else:
|
||||
self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
cuda.Context.pop()
|
||||
|
||||
# Add all the contexts we popped that were not our own
|
||||
for context in other_contexts:
|
||||
self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context.handle), str(context.handle))
|
||||
cuda.Context.push(context)
|
||||
|
||||
self.logger.debug("<%s> Detaching", str(self.cuda_context.handle))
|
||||
self.cuda_context.detach()
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return "CudaContext id " + str(self.cuda_context.handle)
|
||||
|
||||
|
||||
def hash_kernel(kernel_filename, include_dirs):
|
||||
# Generate a kernel ID for our caches
|
||||
num_includes = 0
|
||||
max_includes = 100
|
||||
kernel_hasher = hashlib.md5()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Loop over file and includes, and check if something has changed
|
||||
files = [kernel_filename]
|
||||
while len(files):
|
||||
|
||||
if (num_includes > max_includes):
|
||||
raise("Maximum number of includes reached - circular include in {:}?".format(kernel_filename))
|
||||
|
||||
filename = files.pop()
|
||||
|
||||
#logger.debug("Hashing %s", filename)
|
||||
|
||||
modified = os.path.getmtime(filename)
|
||||
|
||||
# Open the file
|
||||
with io.open(filename, "r") as file:
|
||||
|
||||
# Search for #inclue <something> and also hash the file
|
||||
file_str = file.read()
|
||||
kernel_hasher.update(file_str.encode('utf-8'))
|
||||
kernel_hasher.update(str(modified).encode('utf-8'))
|
||||
|
||||
#Find all includes
|
||||
includes = re.findall('^\W*#include\W+(.+?)\W*$', file_str, re.M)
|
||||
|
||||
# Loop over everything that looks like an include
|
||||
for include_file in includes:
|
||||
|
||||
#Search through include directories for the file
|
||||
file_path = os.path.dirname(filename)
|
||||
for include_path in [file_path] + include_dirs:
|
||||
|
||||
# If we find it, add it to list of files to check
|
||||
temp_path = os.path.join(include_path, include_file)
|
||||
if (os.path.isfile(temp_path)):
|
||||
files = files + [temp_path]
|
||||
num_includes = num_includes + 1 #For circular includes...
|
||||
break
|
||||
|
||||
return kernel_hasher.hexdigest()
|
||||
|
||||
|
||||
"""
|
||||
Reads a text file and creates an OpenCL kernel from that
|
||||
"""
|
||||
def get_module(self, kernel_filename,
|
||||
include_dirs=[], \
|
||||
defines={}, \
|
||||
compile_args={'no_extern_c', True}, jit_compile_args={}):
|
||||
"""
|
||||
Helper function to print compilation output
|
||||
"""
|
||||
def cuda_compile_message_handler(compile_success_bool, info_str, error_str):
|
||||
self.logger.debug("Compilation returned %s", str(compile_success_bool))
|
||||
if info_str:
|
||||
self.logger.debug("Info: %s", info_str)
|
||||
if error_str:
|
||||
self.logger.debug("Error: %s", error_str)
|
||||
|
||||
kernel_filename = os.path.normpath(kernel_filename)
|
||||
kernel_path = os.path.abspath(os.path.join(self.module_path, kernel_filename))
|
||||
#self.logger.debug("Getting %s", kernel_filename)
|
||||
|
||||
# Create a hash of the kernel options
|
||||
options_hasher = hashlib.md5()
|
||||
options_hasher.update(str(defines).encode('utf-8') + str(compile_args).encode('utf-8'));
|
||||
options_hash = options_hasher.hexdigest()
|
||||
|
||||
# Create hash of kernel souce
|
||||
source_hash = CudaContext.hash_kernel( \
|
||||
kernel_path, \
|
||||
include_dirs=[self.module_path] + include_dirs)
|
||||
|
||||
# Create final hash
|
||||
root, ext = os.path.splitext(kernel_filename)
|
||||
kernel_hash = root \
|
||||
+ "_" + source_hash \
|
||||
+ "_" + options_hash \
|
||||
+ ext
|
||||
cached_kernel_filename = os.path.join(self.cache_path, kernel_hash)
|
||||
|
||||
# If we have the kernel in our hashmap, return it
|
||||
if (kernel_hash in self.modules.keys()):
|
||||
self.logger.debug("Found kernel %s cached in hashmap (%s)", kernel_filename, kernel_hash)
|
||||
return self.modules[kernel_hash]
|
||||
|
||||
# If we have it on disk, return it
|
||||
elif (self.use_cache and os.path.isfile(cached_kernel_filename)):
|
||||
self.logger.debug("Found kernel %s cached on disk (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
with io.open(cached_kernel_filename, "rb") as file:
|
||||
file_str = file.read()
|
||||
module = cuda.module_from_buffer(file_str, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
|
||||
self.modules[kernel_hash] = module
|
||||
return module
|
||||
|
||||
# Otherwise, compile it from source
|
||||
else:
|
||||
self.logger.debug("Compiling %s (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
#Create kernel string
|
||||
kernel_string = ""
|
||||
for key, value in defines.items():
|
||||
kernel_string += "#define {:s} {:s}\n".format(str(key), str(value))
|
||||
kernel_string += '#include "{:s}"'.format(os.path.join(self.module_path, kernel_filename))
|
||||
if (self.use_cache):
|
||||
cached_kernel_dir = os.path.dirname(cached_kernel_filename)
|
||||
if not os.path.isdir(cached_kernel_dir):
|
||||
os.mkdir(cached_kernel_dir)
|
||||
with io.open(cached_kernel_filename + ".txt", "w") as file:
|
||||
file.write(kernel_string)
|
||||
|
||||
|
||||
with Common.Timer("compiler") as timer:
|
||||
import warnings
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore", message="The CUDA compiler succeeded, but said the following:\nkernel.cu", category=UserWarning)
|
||||
cubin = cuda_compiler.compile(kernel_string, include_dirs=include_dirs, cache_dir=False, **compile_args)
|
||||
module = cuda.module_from_buffer(cubin, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
if (self.use_cache):
|
||||
with io.open(cached_kernel_filename, "wb") as file:
|
||||
file.write(cubin)
|
||||
|
||||
self.modules[kernel_hash] = module
|
||||
return module
|
||||
|
||||
"""
|
||||
Clears the kernel cache (useful for debugging & development)
|
||||
"""
|
||||
def clear_kernel_cache(self):
|
||||
self.logger.debug("Clearing cache")
|
||||
self.modules = {}
|
||||
gc.collect()
|
||||
|
||||
"""
|
||||
Synchronizes all streams etc
|
||||
"""
|
||||
def synchronize(self):
|
||||
self.cuda_context.synchronize()
|
144
GPUSimulators/EE2D_KP07_dimsplit.py
Normal file
144
GPUSimulators/EE2D_KP07_dimsplit.py
Normal file
@ -0,0 +1,144 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the 2nd order HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
|
||||
from pycuda import gpuarray
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Forward-Backward linear scheme
|
||||
"""
|
||||
class EE2D_KP07_dimsplit (BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
rho: Density
|
||||
rho_u: Momentum along x-axis
|
||||
rho_v: Momentum along y-axis
|
||||
E: energy
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis
|
||||
dy: Grid cell spacing along y-axis
|
||||
dt: Size of each timestep
|
||||
g: Gravitational constant
|
||||
gamma: Gas constant
|
||||
p: pressure
|
||||
"""
|
||||
def __init__(self,
|
||||
context,
|
||||
rho, rho_u, rho_v, E,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
gamma,
|
||||
theta=1.3,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=8):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
self.gamma = np.float32(gamma)
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
#Get kernels
|
||||
module = context.get_module("cuda/EE2D_KP07_dimsplit.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("KP07DimsplitKernel")
|
||||
self.kernel.prepare("iiffffffiiPiPiPiPiPiPiPiPiP")
|
||||
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[rho, rho_u, rho_v, E])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None, None])
|
||||
self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
dt_x = np.min(self.dx / (np.abs(rho_u/rho) + np.sqrt(gamma*rho)))
|
||||
dt_y = np.min(self.dy / (np.abs(rho_v/rho) + np.sqrt(gamma*rho)))
|
||||
self.dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(0.5*dt, step_number)
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.gamma,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
return max_dt*0.5
|
@ -21,8 +21,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
|
||||
from pycuda import gpuarray
|
||||
|
||||
|
||||
|
||||
@ -50,47 +53,77 @@ class FORCE (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def __init__(self, \
|
||||
context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
1, 1, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
block_width, block_height);
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
1,
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
|
||||
#Get kernels
|
||||
self.kernel = context.get_prepared_kernel("FORCE_kernel.cu", "FORCEKernel", \
|
||||
"iiffffPiPiPiPiPiPi", \
|
||||
BLOCK_WIDTH=self.local_size[0], \
|
||||
BLOCK_HEIGHT=self.local_size[1])
|
||||
module = context.get_module("cuda/SWE2D_FORCE.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("FORCEKernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
|
||||
def __str__(self):
|
||||
return "First order centered"
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[None, None, None])
|
||||
self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def simulate(self, t_end):
|
||||
return super().simulateEuler(t_end)
|
||||
def substep(self, dt, step_number):
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
def stepEuler(self, dt):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
self.t += dt
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
return max_dt
|
@ -20,8 +20,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
|
||||
from pycuda import gpuarray
|
||||
|
||||
|
||||
|
||||
@ -45,47 +48,77 @@ class HLL (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def __init__(self, \
|
||||
context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
1, 1, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
1,
|
||||
block_width, block_height);
|
||||
self.g = np.float32(g)
|
||||
|
||||
#Get kernels
|
||||
self.kernel = context.get_prepared_kernel("HLL_kernel.cu", "HLLKernel", \
|
||||
"iiffffPiPiPiPiPiPi", \
|
||||
BLOCK_WIDTH=self.local_size[0], \
|
||||
BLOCK_HEIGHT=self.local_size[1])
|
||||
module = context.get_module("cuda/SWE2D_HLL.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("HLLKernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
|
||||
def __str__(self):
|
||||
return "Harten-Lax-van Leer"
|
||||
|
||||
def simulate(self, t_end):
|
||||
return super().simulateEuler(t_end)
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[None, None, None])
|
||||
self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def stepEuler(self, dt):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
self.t += dt
|
||||
def substep(self, dt, step_number):
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
return max_dt*0.5
|
@ -20,9 +20,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
from GPUSimulators import Simulator
|
||||
|
||||
from pycuda import gpuarray
|
||||
|
||||
|
||||
|
||||
@ -48,70 +50,84 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def __init__(self, \
|
||||
context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
theta=1.8, \
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
theta=1.8,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
2, 2, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height);
|
||||
|
||||
self.g = np.float32(g)
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
|
||||
#Get kernels
|
||||
self.kernel = context.get_prepared_kernel("HLL2_kernel.cu", "HLL2Kernel", \
|
||||
"iifffffiPiPiPiPiPiPi", \
|
||||
BLOCK_WIDTH=self.local_size[0], \
|
||||
BLOCK_HEIGHT=self.local_size[1])
|
||||
module = context.get_module("cuda/SWE2D_HLL2.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("HLL2Kernel")
|
||||
self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
|
||||
def __str__(self):
|
||||
return "Harten-Lax-van Leer (2nd order)"
|
||||
|
||||
def simulate(self, t_end):
|
||||
return super().simulateDimsplit(t_end)
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def stepEuler(self, dt):
|
||||
return self.stepDimsplitXY(dt)
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def stepDimsplitXY(self, dt):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
self.theta, \
|
||||
np.int32(0), \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
self.t += dt
|
||||
|
||||
def stepDimsplitYX(self, dt):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
self.theta, \
|
||||
np.int32(1), \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
self.t += dt
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
return max_dt*0.5
|
@ -1,170 +0,0 @@
|
||||
/*
|
||||
This GPU kernel implements the HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.cu"
|
||||
#include "fluxes/HartenLaxVanLeer.cu"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const float g_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
{
|
||||
const int j=ty;
|
||||
const int l = j + 1; //Skip ghost cells
|
||||
for (int i=tx; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
const int k = i;
|
||||
|
||||
const float3 Q_l = make_float3(Q[0][l][k ], Q[1][l][k ], Q[2][l][k ]);
|
||||
const float3 Q_r = make_float3(Q[0][l][k+1], Q[1][l][k+1], Q[2][l][k+1]);
|
||||
|
||||
const float3 flux = HLL_flux(Q_l, Q_r, g_);
|
||||
|
||||
//Write to shared memory
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const float g_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
for (int j=ty; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
const int l = j;
|
||||
{
|
||||
const int i=tx;
|
||||
const int k = i + 1; //Skip ghost cells
|
||||
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float3 Q_l = make_float3(Q[0][l ][k], Q[2][l ][k], Q[1][l ][k]);
|
||||
const float3 Q_r = make_float3(Q[0][l+1][k], Q[2][l+1][k], Q[1][l+1][k]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = HLL_flux(Q_l, Q_r, g_);
|
||||
|
||||
//Write to shared memory
|
||||
//Note that we here swap hu and hv back to the original
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern "C" {
|
||||
|
||||
__global__ void HLLKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
|
||||
const int block_width = BLOCK_WIDTH;
|
||||
const int block_height = BLOCK_HEIGHT;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][block_height+2][block_width+2];
|
||||
__shared__ float F[3][block_height+1][block_width+1];
|
||||
|
||||
|
||||
//Read into shared memory
|
||||
readBlock1(h0_ptr_, h0_pitch_,
|
||||
hu0_ptr_, hu0_pitch_,
|
||||
hv0_ptr_, hv0_pitch_,
|
||||
Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
|
||||
noFlowBoundary1(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute F flux
|
||||
computeFluxF(Q, F, g_);
|
||||
__syncthreads();
|
||||
evolveF1(Q, F, nx_, ny_, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary1(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute G flux
|
||||
computeFluxG(Q, F, g_);
|
||||
__syncthreads();
|
||||
evolveG1(Q, F, nx_, ny_, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
//Q[0][get_local_id(1) + 1][get_local_id(0) + 1] += 0.1;
|
||||
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock1(h1_ptr_, h1_pitch_,
|
||||
hu1_ptr_, hu1_pitch_,
|
||||
hv1_ptr_, hv1_pitch_,
|
||||
Q, nx_, ny_);
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -20,16 +20,17 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import gc
|
||||
|
||||
from IPython.core import magic_arguments
|
||||
from IPython.core.magic import line_magic, Magics, magics_class
|
||||
import pycuda.driver as cuda
|
||||
|
||||
from GPUSimulators import Common
|
||||
from GPUSimulators import Common, CudaContext
|
||||
|
||||
|
||||
@magics_class
|
||||
class MyIPythonMagic(Magics):
|
||||
class MagicCudaContext(Magics):
|
||||
@line_magic
|
||||
@magic_arguments.magic_arguments()
|
||||
@magic_arguments.argument(
|
||||
@ -46,6 +47,10 @@ class MyIPythonMagic(Magics):
|
||||
|
||||
self.logger.info("Registering %s in user workspace", args.name)
|
||||
|
||||
context_flags = None
|
||||
if (args.blocking):
|
||||
context_flags = cuda.ctx_flags.SCHED_BLOCKING_SYNC
|
||||
|
||||
if args.name in self.shell.user_ns.keys():
|
||||
self.logger.debug("Context already registered! Ignoring")
|
||||
return
|
||||
@ -53,7 +58,7 @@ class MyIPythonMagic(Magics):
|
||||
self.logger.debug("Creating context")
|
||||
use_cache = False if args.no_cache else True
|
||||
use_autotuning = False if args.no_autotuning else True
|
||||
self.shell.user_ns[args.name] = Common.CudaContext(blocking=args.blocking, use_cache=use_cache, autotuning=use_autotuning)
|
||||
self.shell.user_ns[args.name] = CudaContext.CudaContext(context_flags=context_flags, use_cache=use_cache, autotuning=use_autotuning)
|
||||
|
||||
# this function will be called on exceptions in any cell
|
||||
def custom_exc(shell, etype, evalue, tb, tb_offset=None):
|
||||
@ -90,8 +95,21 @@ class MyIPythonMagic(Magics):
|
||||
self.logger.debug("==================================================================")
|
||||
atexit.register(exitfunc)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@magics_class
|
||||
class MagicLogger(Magics):
|
||||
logger_initialized = False
|
||||
|
||||
@line_magic
|
||||
@magic_arguments.magic_arguments()
|
||||
@magic_arguments.argument(
|
||||
'name', type=str, help='Name of context to create')
|
||||
@magic_arguments.argument(
|
||||
'--out', '-o', type=str, default='output.log', help='The filename to store the log to')
|
||||
@magic_arguments.argument(
|
||||
@ -99,30 +117,77 @@ class MyIPythonMagic(Magics):
|
||||
@magic_arguments.argument(
|
||||
'--file_level', '-f', type=int, default=10, help='The level of logging to file [0, 50]')
|
||||
def setup_logging(self, line):
|
||||
args = magic_arguments.parse_argstring(self.setup_logging, line)
|
||||
import sys
|
||||
|
||||
#Get root logger
|
||||
logger = logging.getLogger('')
|
||||
logger.setLevel(min(args.level, args.file_level))
|
||||
if (self.logger_initialized):
|
||||
logging.getLogger('GPUSimulators').info("Global logger already initialized!")
|
||||
return;
|
||||
else:
|
||||
self.logger_initialized = True
|
||||
|
||||
args = magic_arguments.parse_argstring(self.setup_logging, line)
|
||||
import sys
|
||||
|
||||
#Get root logger
|
||||
logger = logging.getLogger('GPUSimulators')
|
||||
logger.setLevel(min(args.level, args.file_level))
|
||||
|
||||
#Add log to screen
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(args.level)
|
||||
logger.addHandler(ch)
|
||||
logger.log(args.level, "Console logger using level %s", logging.getLevelName(args.level))
|
||||
|
||||
#Add log to file
|
||||
logger.log(args.level, "File logger using level %s to %s", logging.getLevelName(args.file_level), args.out)
|
||||
fh = logging.FileHandler(args.out)
|
||||
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
|
||||
fh.setFormatter(formatter)
|
||||
fh.setLevel(args.file_level)
|
||||
logger.addHandler(fh)
|
||||
#Add log to screen
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(args.level)
|
||||
logger.addHandler(ch)
|
||||
logger.log(args.level, "Console logger using level %s", logging.getLevelName(args.level))
|
||||
|
||||
#Get the outfilename (try to evaluate if Python expression...)
|
||||
try:
|
||||
outfile = eval(args.out, self.shell.user_global_ns, self.shell.user_ns)
|
||||
except:
|
||||
outfile = args.out
|
||||
|
||||
#Add log to file
|
||||
logger.log(args.level, "File logger using level %s to %s", logging.getLevelName(args.file_level), outfile)
|
||||
|
||||
fh = logging.FileHandler(outfile)
|
||||
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
|
||||
fh.setFormatter(formatter)
|
||||
fh.setLevel(args.file_level)
|
||||
logger.addHandler(fh)
|
||||
|
||||
logger.info("Python version %s", sys.version)
|
||||
self.shell.user_ns[args.name] = logger
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
@magics_class
|
||||
class MagicMPI(Magics):
|
||||
|
||||
@line_magic
|
||||
@magic_arguments.magic_arguments()
|
||||
@magic_arguments.argument(
|
||||
'name', type=str, help='Name of context to create')
|
||||
@magic_arguments.argument(
|
||||
'--num_engines', '-n', type=int, default=4, help='Number of engines to start')
|
||||
def setup_mpi(self, line):
|
||||
args = magic_arguments.parse_argstring(self.setup_mpi, line)
|
||||
logger = logging.getLogger('GPUSimulators')
|
||||
if args.name in self.shell.user_ns.keys():
|
||||
logger.warning("MPI alreay set up, resetting")
|
||||
self.shell.user_ns[args.name].shutdown()
|
||||
self.shell.user_ns[args.name] = None
|
||||
gc.collect()
|
||||
self.shell.user_ns[args.name] = Common.IPEngine(args.num_engines)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
# Register
|
||||
ip = get_ipython()
|
||||
ip.register_magics(MyIPythonMagic)
|
||||
ip.register_magics(MagicCudaContext)
|
||||
ip.register_magics(MagicLogger)
|
||||
ip.register_magics(MagicMPI)
|
||||
|
||||
|
@ -25,9 +25,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
from GPUSimulators import Simulator
|
||||
|
||||
from pycuda import gpuarray
|
||||
|
||||
|
||||
|
||||
@ -49,64 +51,88 @@ class KP07 (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def __init__(self, \
|
||||
context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
theta=1.3, \
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
theta=1.3,
|
||||
cfl_scale=0.9,
|
||||
order=2,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
2, 2, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
order,
|
||||
block_width, block_height);
|
||||
|
||||
self.theta = np.float32(theta)
|
||||
self.g = np.float32(g)
|
||||
self.theta = np.float32(theta)
|
||||
self.order = np.int32(order)
|
||||
|
||||
#Get kernels
|
||||
self.kernel = context.get_prepared_kernel("KP07_kernel.cu", "KP07Kernel", \
|
||||
"iifffffiPiPiPiPiPiPi", \
|
||||
BLOCK_WIDTH=self.local_size[0], \
|
||||
BLOCK_HEIGHT=self.local_size[1])
|
||||
module = context.get_module("cuda/SWE2D_KP07.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("KP07Kernel")
|
||||
self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
|
||||
def __str__(self):
|
||||
return "Kurganov-Petrova 2007"
|
||||
|
||||
def simulate(self, t_end):
|
||||
return super().simulateRK(t_end, 2)
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepRK(dt, step_number)
|
||||
|
||||
|
||||
def substepRK(self, dt, substep):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
self.theta, \
|
||||
np.int32(substep), \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
|
||||
def stepEuler(self, dt):
|
||||
self.substepRK(dt, 0)
|
||||
self.t += dt
|
||||
|
||||
def stepRK(self, dt, order):
|
||||
if (order != 2):
|
||||
raise NotImplementedError("Only second order implemented")
|
||||
self.substepRK(dt, 0)
|
||||
self.substepRK(dt, 1)
|
||||
self.t += dt
|
||||
|
||||
def download(self):
|
||||
return self.data.download(self.stream)
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.theta,
|
||||
Simulator.stepOrderToCodedInt(step=substep, order=self.order),
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
return max_dt*0.5**(self.order-1)
|
@ -25,8 +25,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
from GPUSimulators import Simulator
|
||||
|
||||
from pycuda import gpuarray
|
||||
|
||||
|
||||
|
||||
@ -35,7 +38,7 @@ from GPUSimulators import Simulator
|
||||
"""
|
||||
Class that solves the SW equations using the dimentionally split KP07 scheme
|
||||
"""
|
||||
class KP07_dimsplit (Simulator.BaseSimulator):
|
||||
class KP07_dimsplit(Simulator.BaseSimulator):
|
||||
|
||||
"""
|
||||
Initialization routine
|
||||
@ -49,71 +52,86 @@ class KP07_dimsplit (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def __init__(self, \
|
||||
context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
theta=1.3, \
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
theta=1.3,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
2, 2, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
block_width, block_height);
|
||||
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height)
|
||||
self.gc_x = 2
|
||||
self.gc_y = 2
|
||||
self.g = np.float32(g)
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
#Get kernels
|
||||
self.kernel = context.get_prepared_kernel("KP07_dimsplit_kernel.cu", "KP07DimsplitKernel", \
|
||||
"iifffffiPiPiPiPiPiPi", \
|
||||
BLOCK_WIDTH=self.local_size[0], \
|
||||
BLOCK_HEIGHT=self.local_size[1])
|
||||
module = context.get_module("cuda/SWE2D_KP07_dimsplit.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("KP07DimsplitKernel")
|
||||
self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
|
||||
def __str__(self):
|
||||
return "Kurganov-Petrova 2007 dimensionally split"
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
self.gc_x, self.gc_y,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
self.gc_x, self.gc_y,
|
||||
[None, None, None])
|
||||
self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def simulate(self, t_end):
|
||||
return super().simulateDimsplit(t_end)
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def stepEuler(self, dt):
|
||||
return self.stepDimsplitXY(dt)
|
||||
|
||||
def stepDimsplitXY(self, dt):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
self.theta, \
|
||||
np.int32(0), \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
self.t += dt
|
||||
|
||||
def stepDimsplitYX(self, dt):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
self.theta, \
|
||||
np.int32(1), \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
self.t += dt
|
||||
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
return max_dt*0.5
|
@ -21,8 +21,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
|
||||
from pycuda import gpuarray
|
||||
|
||||
|
||||
|
||||
@ -46,47 +49,77 @@ class LxF (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def __init__(self, \
|
||||
context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
1, 1, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
1,
|
||||
block_width, block_height);
|
||||
self.g = np.float32(g)
|
||||
|
||||
# Get kernels
|
||||
self.kernel = context.get_prepared_kernel("LxF_kernel.cu", "LxFKernel", \
|
||||
"iiffffPiPiPiPiPiPi", \
|
||||
BLOCK_WIDTH=self.local_size[0], \
|
||||
BLOCK_HEIGHT=self.local_size[1])
|
||||
|
||||
def __str__(self):
|
||||
return "Lax Friedrichs"
|
||||
|
||||
def simulate(self, t_end):
|
||||
return super().simulateEuler(t_end)
|
||||
|
||||
def stepEuler(self, dt):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
self.t += dt
|
||||
|
||||
module = context.get_module("cuda/SWE2D_LxF.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("LxFKernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
1, 1,
|
||||
[None, None, None])
|
||||
self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
return max_dt*0.5
|
413
GPUSimulators/MPISimulator.py
Normal file
413
GPUSimulators/MPISimulator.py
Normal file
@ -0,0 +1,413 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements MPI simulator class
|
||||
|
||||
Copyright (C) 2018 SINTEF Digital
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
import logging
|
||||
from GPUSimulators import Simulator
|
||||
import numpy as np
|
||||
from mpi4py import MPI
|
||||
|
||||
|
||||
|
||||
|
||||
class MPIGrid(object):
|
||||
"""
|
||||
Class which represents an MPI grid of nodes. Facilitates easy communication between
|
||||
neighboring nodes
|
||||
"""
|
||||
def __init__(self, comm, ndims=2):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
assert ndims == 2, "Unsupported number of dimensions. Must be two at the moment"
|
||||
assert comm.size >= 1, "Must have at least one node"
|
||||
|
||||
self.grid = MPIGrid.getGrid(comm.size, ndims)
|
||||
self.comm = comm
|
||||
|
||||
self.logger.debug("Created MPI grid: {:}. Rank {:d} has coordinate {:}".format(
|
||||
self.grid, self.comm.rank, self.getCoordinate()))
|
||||
|
||||
def getCoordinate(self, rank=None):
|
||||
if (rank is None):
|
||||
rank = self.comm.rank
|
||||
i = (rank % self.grid[0])
|
||||
j = (rank // self.grid[0])
|
||||
return i, j
|
||||
|
||||
def getRank(self, i, j):
|
||||
return j*self.grid[0] + i
|
||||
|
||||
def getEast(self):
|
||||
i, j = self.getCoordinate(self.comm.rank)
|
||||
i = (i+1) % self.grid[0]
|
||||
return self.getRank(i, j)
|
||||
|
||||
def getWest(self):
|
||||
i, j = self.getCoordinate(self.comm.rank)
|
||||
i = (i+self.grid[0]-1) % self.grid[0]
|
||||
return self.getRank(i, j)
|
||||
|
||||
def getNorth(self):
|
||||
i, j = self.getCoordinate(self.comm.rank)
|
||||
j = (j+1) % self.grid[1]
|
||||
return self.getRank(i, j)
|
||||
|
||||
def getSouth(self):
|
||||
i, j = self.getCoordinate(self.comm.rank)
|
||||
j = (j+self.grid[1]-1) % self.grid[1]
|
||||
return self.getRank(i, j)
|
||||
|
||||
def getGrid(num_nodes, num_dims):
|
||||
assert(isinstance(num_nodes, int))
|
||||
assert(isinstance(num_dims, int))
|
||||
|
||||
# Adapted from https://stackoverflow.com/questions/28057307/factoring-a-number-into-roughly-equal-factors
|
||||
# Original code by https://stackoverflow.com/users/3928385/ishamael
|
||||
# Factorizes a number into n roughly equal factors
|
||||
|
||||
#Dictionary to remember already computed permutations
|
||||
memo = {}
|
||||
def dp(n, left): # returns tuple (cost, [factors])
|
||||
"""
|
||||
Recursively searches through all factorizations
|
||||
"""
|
||||
|
||||
#Already tried: return existing result
|
||||
if (n, left) in memo:
|
||||
return memo[(n, left)]
|
||||
|
||||
#Spent all factors: return number itself
|
||||
if left == 1:
|
||||
return (n, [n])
|
||||
|
||||
#Find new factor
|
||||
i = 2
|
||||
best = n
|
||||
bestTuple = [n]
|
||||
while i * i < n:
|
||||
#If factor found
|
||||
if n % i == 0:
|
||||
#Factorize remainder
|
||||
rem = dp(n // i, left - 1)
|
||||
|
||||
#If new permutation better, save it
|
||||
if rem[0] + i < best:
|
||||
best = rem[0] + i
|
||||
bestTuple = [i] + rem[1]
|
||||
i += 1
|
||||
|
||||
#Store calculation
|
||||
memo[(n, left)] = (best, bestTuple)
|
||||
return memo[(n, left)]
|
||||
|
||||
|
||||
grid = dp(num_nodes, num_dims)[1]
|
||||
|
||||
if (len(grid) < num_dims):
|
||||
#Split problematic 4
|
||||
if (4 in grid):
|
||||
grid.remove(4)
|
||||
grid.append(2)
|
||||
grid.append(2)
|
||||
|
||||
#Pad with ones to guarantee num_dims
|
||||
grid = grid + [1]*(num_dims - len(grid))
|
||||
|
||||
#Sort in descending order
|
||||
grid = np.sort(grid)
|
||||
grid = grid[::-1]
|
||||
|
||||
return grid
|
||||
|
||||
|
||||
def gather(self, data, root=0):
|
||||
out_data = None
|
||||
if (self.comm.rank == root):
|
||||
out_data = np.empty([self.comm.size] + list(data.shape), dtype=data.dtype)
|
||||
self.comm.Gather(data, out_data, root)
|
||||
return out_data
|
||||
|
||||
def getLocalRank(self):
|
||||
"""
|
||||
Returns the local rank on this node for this MPI process
|
||||
"""
|
||||
|
||||
# This function has been adapted from
|
||||
# https://github.com/SheffieldML/PyDeepGP/blob/master/deepgp/util/parallel.py
|
||||
# by Zhenwen Dai released under BSD 3-Clause "New" or "Revised" License:
|
||||
#
|
||||
# Copyright (c) 2016, Zhenwen Dai
|
||||
# All rights reserved.
|
||||
#
|
||||
# Redistribution and use in source and binary forms, with or without
|
||||
# modification, are permitted provided that the following conditions are met:
|
||||
#
|
||||
# * Redistributions of source code must retain the above copyright notice, this
|
||||
# list of conditions and the following disclaimer.
|
||||
#
|
||||
# * Redistributions in binary form must reproduce the above copyright notice,
|
||||
# this list of conditions and the following disclaimer in the documentation
|
||||
# and/or other materials provided with the distribution.
|
||||
#
|
||||
# * Neither the name of DGP nor the names of its
|
||||
# contributors may be used to endorse or promote products derived from
|
||||
# this software without specific prior written permission.
|
||||
#
|
||||
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
|
||||
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
|
||||
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
|
||||
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
|
||||
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
|
||||
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
|
||||
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
|
||||
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
#Get this ranks unique (physical) node name
|
||||
node_name = MPI.Get_processor_name()
|
||||
|
||||
#Gather the list of all node names on all nodes
|
||||
node_names = self.comm.allgather(node_name)
|
||||
|
||||
#Loop over all node names up until our rank
|
||||
#and count how many duplicates of our nodename we find
|
||||
local_rank = len([0 for name in node_names[:self.comm.rank] if name==node_name])
|
||||
|
||||
return local_rank
|
||||
|
||||
|
||||
class MPISimulator(Simulator.BaseSimulator):
|
||||
"""
|
||||
Class which handles communication between simulators on different MPI nodes
|
||||
"""
|
||||
def __init__(self, sim, grid):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
autotuner = sim.context.autotuner
|
||||
sim.context.autotuner = None;
|
||||
boundary_conditions = sim.getBoundaryConditions()
|
||||
super().__init__(sim.context,
|
||||
sim.nx, sim.ny,
|
||||
sim.dx, sim.dy,
|
||||
boundary_conditions,
|
||||
sim.cfl_scale,
|
||||
sim.num_substeps,
|
||||
sim.block_size[0], sim.block_size[1])
|
||||
sim.context.autotuner = autotuner
|
||||
|
||||
self.sim = sim
|
||||
self.grid = grid
|
||||
|
||||
#Get neighbor node ids
|
||||
self.east = grid.getEast()
|
||||
self.west = grid.getWest()
|
||||
self.north = grid.getNorth()
|
||||
self.south = grid.getSouth()
|
||||
|
||||
#Get coordinate of this node
|
||||
#and handle global boundary conditions
|
||||
new_boundary_conditions = Simulator.BoundaryCondition({
|
||||
'north': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'south': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'east': Simulator.BoundaryCondition.Type.Dirichlet,
|
||||
'west': Simulator.BoundaryCondition.Type.Dirichlet
|
||||
})
|
||||
gi, gj = grid.getCoordinate()
|
||||
if (gi == 0 and boundary_conditions.west != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.west = None
|
||||
new_boundary_conditions.west = boundary_conditions.west;
|
||||
if (gj == 0 and boundary_conditions.south != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.south = None
|
||||
new_boundary_conditions.south = boundary_conditions.south;
|
||||
if (gi == grid.grid[0]-1 and boundary_conditions.east != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.east = None
|
||||
new_boundary_conditions.east = boundary_conditions.east;
|
||||
if (gj == grid.grid[1]-1 and boundary_conditions.north != Simulator.BoundaryCondition.Type.Periodic):
|
||||
self.north = None
|
||||
new_boundary_conditions.north = boundary_conditions.north;
|
||||
sim.setBoundaryConditions(new_boundary_conditions)
|
||||
|
||||
#Get number of variables
|
||||
self.nvars = len(self.getOutput().gpu_variables)
|
||||
|
||||
#Shorthands for computing extents and sizes
|
||||
gc_x = int(self.sim.getOutput()[0].x_halo)
|
||||
gc_y = int(self.sim.getOutput()[0].y_halo)
|
||||
nx = int(self.sim.nx)
|
||||
ny = int(self.sim.ny)
|
||||
|
||||
#Set regions for ghost cells to read from
|
||||
#These have the format [x0, y0, width, height]
|
||||
self.read_e = np.array([ nx, 0, gc_x, ny + 2*gc_y])
|
||||
self.read_w = np.array([gc_x, 0, gc_x, ny + 2*gc_y])
|
||||
self.read_n = np.array([gc_x, ny, nx, gc_y])
|
||||
self.read_s = np.array([gc_x, gc_y, nx, gc_y])
|
||||
|
||||
#Set regions for ghost cells to write to
|
||||
self.write_e = self.read_e + np.array([gc_x, 0, 0, 0])
|
||||
self.write_w = self.read_w - np.array([gc_x, 0, 0, 0])
|
||||
self.write_n = self.read_n + np.array([0, gc_y, 0, 0])
|
||||
self.write_s = self.read_s - np.array([0, gc_y, 0, 0])
|
||||
|
||||
#Allocate data for receiving
|
||||
#Note that east and west also transfer ghost cells
|
||||
#whilst north/south only transfer internal cells
|
||||
#Reuses the width/height defined in the read-extets above
|
||||
self.in_e = np.empty((self.nvars, self.read_e[3], self.read_e[2]), dtype=np.float32)
|
||||
self.in_w = np.empty((self.nvars, self.read_w[3], self.read_w[2]), dtype=np.float32)
|
||||
self.in_n = np.empty((self.nvars, self.read_n[3], self.read_n[2]), dtype=np.float32)
|
||||
self.in_s = np.empty((self.nvars, self.read_s[3], self.read_s[2]), dtype=np.float32)
|
||||
|
||||
#Allocate data for sending
|
||||
self.out_e = np.empty_like(self.in_e)
|
||||
self.out_w = np.empty_like(self.in_w)
|
||||
self.out_n = np.empty_like(self.in_n)
|
||||
self.out_s = np.empty_like(self.in_s)
|
||||
|
||||
self.logger.debug("Simlator rank {:d} initialized on {:s}".format(self.grid.comm.rank, MPI.Get_processor_name()))
|
||||
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.exchange()
|
||||
self.sim.substep(dt, step_number)
|
||||
|
||||
def getOutput(self):
|
||||
return self.sim.getOutput()
|
||||
|
||||
def synchronize(self):
|
||||
self.sim.synchronize()
|
||||
|
||||
def check(self):
|
||||
return self.sim.check()
|
||||
|
||||
def computeDt(self):
|
||||
local_dt = np.array([np.float32(self.sim.computeDt())]);
|
||||
global_dt = np.empty(1, dtype=np.float32)
|
||||
self.grid.comm.Allreduce(local_dt, global_dt, op=MPI.MIN)
|
||||
self.logger.debug("Local dt: {:f}, global dt: {:f}".format(local_dt[0], global_dt[0]))
|
||||
return global_dt[0]
|
||||
|
||||
|
||||
def getExtent(self):
|
||||
"""
|
||||
Function which returns the extent of node with rank
|
||||
rank in the grid
|
||||
"""
|
||||
width = self.sim.nx*self.sim.dx
|
||||
height = self.sim.ny*self.sim.dy
|
||||
i, j = self.grid.getCoordinate()
|
||||
x0 = i * width
|
||||
y0 = j * height
|
||||
x1 = x0 + width
|
||||
y1 = y0 + height
|
||||
return [x0, x1, y0, y1]
|
||||
|
||||
def exchange(self):
|
||||
####
|
||||
# FIXME: This function can be optimized using persitent communications.
|
||||
# Also by overlapping some of the communications north/south and east/west of GPU and intra-node
|
||||
# communications
|
||||
####
|
||||
|
||||
####
|
||||
# First transfer internal cells north-south
|
||||
####
|
||||
|
||||
#Download from the GPU
|
||||
if self.north is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_n[k,:,:], asynch=True, extent=self.read_n)
|
||||
if self.south is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_s[k,:,:], asynch=True, extent=self.read_s)
|
||||
self.sim.stream.synchronize()
|
||||
|
||||
#Send/receive to north/south neighbours
|
||||
comm_send = []
|
||||
comm_recv = []
|
||||
if self.north is not None:
|
||||
comm_send += [self.grid.comm.Isend(self.out_n, dest=self.north, tag=4*self.nt + 0)]
|
||||
comm_recv += [self.grid.comm.Irecv(self.in_n, source=self.north, tag=4*self.nt + 1)]
|
||||
if self.south is not None:
|
||||
comm_send += [self.grid.comm.Isend(self.out_s, dest=self.south, tag=4*self.nt + 1)]
|
||||
comm_recv += [self.grid.comm.Irecv(self.in_s, source=self.south, tag=4*self.nt + 0)]
|
||||
|
||||
#Wait for incoming transfers to complete
|
||||
for comm in comm_recv:
|
||||
comm.wait()
|
||||
|
||||
#Upload to the GPU
|
||||
if self.north is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].upload(self.sim.stream, self.in_n[k,:,:], extent=self.write_n)
|
||||
if self.south is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].upload(self.sim.stream, self.in_s[k,:,:], extent=self.write_s)
|
||||
|
||||
#Wait for sending to complete
|
||||
for comm in comm_send:
|
||||
comm.wait()
|
||||
|
||||
|
||||
|
||||
####
|
||||
# Then transfer east-west including ghost cells that have been filled in by north-south transfer above
|
||||
####
|
||||
|
||||
#Download from the GPU
|
||||
if self.east is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_e[k,:,:], asynch=True, extent=self.read_e)
|
||||
if self.west is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_w[k,:,:], asynch=True, extent=self.read_w)
|
||||
self.sim.stream.synchronize()
|
||||
|
||||
#Send/receive to east/west neighbours
|
||||
comm_send = []
|
||||
comm_recv = []
|
||||
if self.east is not None:
|
||||
comm_send += [self.grid.comm.Isend(self.out_e, dest=self.east, tag=4*self.nt + 2)]
|
||||
comm_recv += [self.grid.comm.Irecv(self.in_e, source=self.east, tag=4*self.nt + 3)]
|
||||
if self.west is not None:
|
||||
comm_send += [self.grid.comm.Isend(self.out_w, dest=self.west, tag=4*self.nt + 3)]
|
||||
comm_recv += [self.grid.comm.Irecv(self.in_w, source=self.west, tag=4*self.nt + 2)]
|
||||
|
||||
|
||||
#Wait for incoming transfers to complete
|
||||
for comm in comm_recv:
|
||||
comm.wait()
|
||||
|
||||
#Upload to the GPU
|
||||
if self.east is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].upload(self.sim.stream, self.in_e[k,:,:], extent=self.write_e)
|
||||
if self.west is not None:
|
||||
for k in range(self.nvars):
|
||||
self.sim.u0[k].upload(self.sim.stream, self.in_w[k,:,:], extent=self.write_w)
|
||||
|
||||
#Wait for sending to complete
|
||||
for comm in comm_send:
|
||||
comm.wait()
|
||||
|
||||
|
||||
|
@ -23,6 +23,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#Import packages we need
|
||||
import numpy as np
|
||||
import logging
|
||||
from enum import IntEnum
|
||||
|
||||
import pycuda.compiler as cuda_compiler
|
||||
import pycuda.gpuarray
|
||||
@ -31,169 +32,256 @@ import pycuda.driver as cuda
|
||||
from GPUSimulators import Common
|
||||
|
||||
|
||||
class BaseSimulator:
|
||||
|
||||
|
||||
|
||||
class BoundaryCondition(object):
|
||||
"""
|
||||
Initialization routine
|
||||
context: GPU context to use
|
||||
kernel_wrapper: wrapper function of GPU kernel
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
Class for holding boundary conditions for global boundaries
|
||||
"""
|
||||
def __init__(self, \
|
||||
context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
ghost_cells_x, ghost_cells_y, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
|
||||
|
||||
class Type(IntEnum):
|
||||
"""
|
||||
Enum that describes the different types of boundary conditions
|
||||
WARNING: MUST MATCH THAT OF common.h IN CUDA
|
||||
"""
|
||||
Dirichlet = 0,
|
||||
Neumann = 1,
|
||||
Periodic = 2,
|
||||
Reflective = 3
|
||||
|
||||
def __init__(self, types={
|
||||
'north': Type.Reflective,
|
||||
'south': Type.Reflective,
|
||||
'east': Type.Reflective,
|
||||
'west': Type.Reflective
|
||||
}):
|
||||
"""
|
||||
Constructor
|
||||
"""
|
||||
self.north = types['north']
|
||||
self.south = types['south']
|
||||
self.east = types['east']
|
||||
self.west = types['west']
|
||||
|
||||
if (self.north == BoundaryCondition.Type.Neumann \
|
||||
or self.south == BoundaryCondition.Type.Neumann \
|
||||
or self.east == BoundaryCondition.Type.Neumann \
|
||||
or self.west == BoundaryCondition.Type.Neumann):
|
||||
raise(NotImplementedError("Neumann boundary condition not supported"))
|
||||
|
||||
def __str__(self):
|
||||
return '[north={:s}, south={:s}, east={:s}, west={:s}]'.format(str(self.north), str(self.south), str(self.east), str(self.west))
|
||||
|
||||
|
||||
def asCodedInt(self):
|
||||
"""
|
||||
Helper function which packs four boundary conditions into one integer
|
||||
"""
|
||||
bc = 0
|
||||
bc = bc | (self.north & 0x0000000F) << 24
|
||||
bc = bc | (self.south & 0x0000000F) << 16
|
||||
bc = bc | (self.east & 0x0000000F) << 8
|
||||
bc = bc | (self.west & 0x0000000F) << 0
|
||||
|
||||
#for t in types:
|
||||
# print("{0:s}, {1:d}, {1:032b}, {1:08b}".format(t, types[t]))
|
||||
#print("bc: {0:032b}".format(bc))
|
||||
|
||||
return np.int32(bc)
|
||||
|
||||
def getTypes(bc):
|
||||
types = {}
|
||||
types['north'] = BoundaryCondition.Type((bc >> 24) & 0x0000000F)
|
||||
types['south'] = BoundaryCondition.Type((bc >> 16) & 0x0000000F)
|
||||
types['east'] = BoundaryCondition.Type((bc >> 8) & 0x0000000F)
|
||||
types['west'] = BoundaryCondition.Type((bc >> 0) & 0x0000000F)
|
||||
return types
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class BaseSimulator(object):
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
num_substeps,
|
||||
block_width, block_height):
|
||||
"""
|
||||
Initialization routine
|
||||
context: GPU context to use
|
||||
kernel_wrapper: wrapper function of GPU kernel
|
||||
h0: Water depth incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hu0: Initial momentum along x-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
hv0: Initial momentum along y-axis incl ghost cells, (nx+1)*(ny+1) cells
|
||||
nx: Number of cells along x-axis
|
||||
ny: Number of cells along y-axis
|
||||
dx: Grid cell spacing along x-axis (20 000 m)
|
||||
dy: Grid cell spacing along y-axis (20 000 m)
|
||||
dt: Size of each timestep (90 s)
|
||||
cfl_scale: Courant number
|
||||
num_substeps: Number of substeps to perform for a full step
|
||||
"""
|
||||
#Get logger
|
||||
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
|
||||
|
||||
#Save input parameters
|
||||
#Notice that we need to specify them in the correct dataformat for the
|
||||
#GPU kernel
|
||||
self.context = context
|
||||
self.nx = np.int32(nx)
|
||||
self.ny = np.int32(ny)
|
||||
self.dx = np.float32(dx)
|
||||
self.dy = np.float32(dy)
|
||||
self.setBoundaryConditions(boundary_conditions)
|
||||
self.cfl_scale = cfl_scale
|
||||
self.num_substeps = num_substeps
|
||||
|
||||
#Handle autotuning block size
|
||||
if (self.context.autotuner):
|
||||
peak_configuration = self.context.autotuner.get_peak_performance(self.__class__)
|
||||
block_width = int(peak_configuration["block_width"])
|
||||
block_height = int(peak_configuration["block_height"])
|
||||
self.logger.debug("Used autotuning to get block size [%d x %d]", block_width, block_height)
|
||||
|
||||
#Compute kernel launch parameters
|
||||
self.block_size = (block_width, block_height, 1)
|
||||
self.grid_size = (
|
||||
int(np.ceil(self.nx / float(self.block_size[0]))),
|
||||
int(np.ceil(self.ny / float(self.block_size[1])))
|
||||
)
|
||||
|
||||
#Create a CUDA stream
|
||||
self.stream = cuda.Stream()
|
||||
|
||||
#Create data by uploading to device
|
||||
free, total = cuda.mem_get_info()
|
||||
self.logger.debug("GPU memory: %d / %d MB available", int(free/(1024*1024)), int(total/(1024*1024)))
|
||||
self.data = Common.SWEDataArakawaA(self.stream, \
|
||||
nx, ny, \
|
||||
ghost_cells_x, ghost_cells_y, \
|
||||
h0, hu0, hv0)
|
||||
|
||||
#Save input parameters
|
||||
#Notice that we need to specify them in the correct dataformat for the
|
||||
#GPU kernel
|
||||
self.nx = np.int32(nx)
|
||||
self.ny = np.int32(ny)
|
||||
self.dx = np.float32(dx)
|
||||
self.dy = np.float32(dy)
|
||||
self.dt = np.float32(dt)
|
||||
self.g = np.float32(g)
|
||||
#Keep track of simulation time and number of timesteps
|
||||
self.t = 0.0
|
||||
self.nt = 0
|
||||
|
||||
#Keep track of simulation time
|
||||
self.t = 0.0;
|
||||
|
||||
#Compute kernel launch parameters
|
||||
self.local_size = (block_width, block_height, 1)
|
||||
self.global_size = ( \
|
||||
int(np.ceil(self.nx / float(self.local_size[0]))), \
|
||||
int(np.ceil(self.ny / float(self.local_size[1]))) \
|
||||
)
|
||||
|
||||
"""
|
||||
Function which simulates forward in time using the default simulation type
|
||||
"""
|
||||
def simulate(self, t_end):
|
||||
raise(exceptions.NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
"""
|
||||
Function which simulates t_end seconds using forward Euler
|
||||
Requires that the stepEuler functionality is implemented in the subclasses
|
||||
"""
|
||||
def simulateEuler(self, t_end):
|
||||
with Common.Timer(self.__class__.__name__ + ".simulateEuler") as t:
|
||||
# Compute number of timesteps to perform
|
||||
n = int(t_end / self.dt + 1)
|
||||
|
||||
for i in range(0, n):
|
||||
# Compute timestep for "this" iteration
|
||||
local_dt = np.float32(min(self.dt, t_end-i*self.dt))
|
||||
|
||||
# Stop if end reached (should not happen)
|
||||
if (local_dt <= 0.0):
|
||||
break
|
||||
|
||||
# Step with forward Euler
|
||||
self.stepEuler(local_dt)
|
||||
|
||||
self.logger.info("%s simulated %f seconds to %f with %d steps in %f seconds", self.__class__.__name__, t_end, self.t, n, t.secs)
|
||||
return self.t, n
|
||||
|
||||
"""
|
||||
Function which simulates t_end seconds using Runge-Kutta 2
|
||||
Requires that the stepRK functionality is implemented in the subclasses
|
||||
"""
|
||||
def simulateRK(self, t_end, order):
|
||||
with Common.Timer(self.__class__.__name__ + ".simulateRK") as t:
|
||||
# Compute number of timesteps to perform
|
||||
n = int(t_end / self.dt + 1)
|
||||
|
||||
for i in range(0, n):
|
||||
# Compute timestep for "this" iteration
|
||||
local_dt = np.float32(min(self.dt, t_end-i*self.dt))
|
||||
|
||||
# Stop if end reached (should not happen)
|
||||
if (local_dt <= 0.0):
|
||||
break
|
||||
|
||||
# Perform all the Runge-Kutta substeps
|
||||
self.stepRK(local_dt, order)
|
||||
|
||||
self.logger.info("%s simulated %f seconds to %f with %d steps in %f seconds", self.__class__.__name__, t_end, self.t, n, t.secs)
|
||||
return self.t, n
|
||||
|
||||
"""
|
||||
Function which simulates t_end seconds using second order dimensional splitting (XYYX)
|
||||
Requires that the stepDimsplitX and stepDimsplitY functionality is implemented in the subclasses
|
||||
"""
|
||||
def simulateDimsplit(self, t_end):
|
||||
with Common.Timer(self.__class__.__name__ + ".simulateDimsplit") as t:
|
||||
# Compute number of timesteps to perform
|
||||
n = int(t_end / (2.0*self.dt) + 1)
|
||||
|
||||
for i in range(0, n):
|
||||
# Compute timestep for "this" iteration
|
||||
local_dt = np.float32(0.5*min(2*self.dt, t_end-2*i*self.dt))
|
||||
|
||||
# Stop if end reached (should not happen)
|
||||
if (local_dt <= 0.0):
|
||||
break
|
||||
|
||||
# Perform the dimensional split substeps
|
||||
self.stepDimsplitXY(local_dt)
|
||||
self.stepDimsplitYX(local_dt)
|
||||
|
||||
self.logger.info("%s simulated %f seconds to %f with %d steps in %f seconds", self.__class__.__name__, t_end, self.t, 2*n, t.secs)
|
||||
return self.t, 2*n
|
||||
|
||||
|
||||
"""
|
||||
Function which performs one single timestep of size dt using forward euler
|
||||
"""
|
||||
def stepEuler(self, dt):
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def stepRK(self, dt, substep):
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def stepDimsplitXY(self, dt):
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def stepDimsplitYX(self, dt):
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def sim_time(self):
|
||||
return self.t
|
||||
|
||||
def download(self):
|
||||
return self.data.download(self.stream)
|
||||
def __str__(self):
|
||||
return "{:s} [{:d}x{:d}]".format(self.__class__.__name__, self.nx, self.ny)
|
||||
|
||||
|
||||
def simulate(self, t, dt=None):
|
||||
"""
|
||||
Function which simulates t_end seconds using the step function
|
||||
Requires that the step() function is implemented in the subclasses
|
||||
"""
|
||||
|
||||
printer = Common.ProgressPrinter(t)
|
||||
|
||||
t_start = self.simTime()
|
||||
t_end = t_start + t
|
||||
|
||||
update_dt = True
|
||||
if (dt is not None):
|
||||
update_dt = False
|
||||
self.dt = dt
|
||||
|
||||
while(self.simTime() < t_end):
|
||||
# Update dt every 100 timesteps and cross your fingers it works
|
||||
# for the next 100
|
||||
if (update_dt and (self.simSteps() % 100 == 0)):
|
||||
self.dt = self.computeDt()*self.cfl_scale
|
||||
|
||||
# Compute timestep for "this" iteration (i.e., shorten last timestep)
|
||||
current_dt = np.float32(min(self.dt, t_end-self.simTime()))
|
||||
|
||||
# Stop if end reached (should not happen)
|
||||
if (current_dt <= 0.0):
|
||||
self.logger.warning("Timestep size {:d} is less than or equal to zero!".format(self.simSteps()))
|
||||
break
|
||||
|
||||
# Step forward in time
|
||||
self.step(current_dt)
|
||||
|
||||
#Print info
|
||||
print_string = printer.getPrintString(self.simTime() - t_start)
|
||||
if (print_string):
|
||||
self.logger.info("%s: %s", self, print_string)
|
||||
try:
|
||||
self.check()
|
||||
except AssertionError as e:
|
||||
e.args += ("Step={:d}, time={:f}".format(self.simSteps(), self.simTime()),)
|
||||
raise
|
||||
|
||||
|
||||
def step(self, dt):
|
||||
"""
|
||||
Function which performs one single timestep of size dt
|
||||
"""
|
||||
for i in range(self.num_substeps):
|
||||
self.substep(dt, i)
|
||||
|
||||
self.t += dt
|
||||
self.nt += 1
|
||||
|
||||
def download(self, variables=None):
|
||||
return self.getOutput().download(self.stream, variables)
|
||||
|
||||
def synchronize(self):
|
||||
self.stream.synchronize()
|
||||
|
||||
def simTime(self):
|
||||
return self.t
|
||||
|
||||
def simSteps(self):
|
||||
return self.nt
|
||||
|
||||
def getExtent(self):
|
||||
return [0, 0, self.nx*self.dx, self.ny*self.dy]
|
||||
|
||||
def setBoundaryConditions(self, boundary_conditions):
|
||||
self.logger.debug("Boundary conditions set to {:s}".format(str(boundary_conditions)))
|
||||
self.boundary_conditions = boundary_conditions.asCodedInt()
|
||||
|
||||
def getBoundaryConditions(self):
|
||||
return BoundaryCondition(BoundaryCondition.getTypes(self.boundary_conditions))
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
"""
|
||||
Function which performs one single substep with stepsize dt
|
||||
"""
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def getOutput(self):
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def check(self):
|
||||
self.logger.warning("check() is not implemented - please implement")
|
||||
#raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
def computeDt(self):
|
||||
raise(NotImplementedError("Needs to be implemented in subclass"))
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def stepOrderToCodedInt(step, order):
|
||||
"""
|
||||
Helper function which packs the step and order into a single integer
|
||||
"""
|
||||
step_order = (step << 16) | (order & 0x0000ffff)
|
||||
#print("Step: {0:032b}".format(step))
|
||||
#print("Order: {0:032b}".format(order))
|
||||
#print("Mix: {0:032b}".format(step_order))
|
||||
return np.int32(step_order)
|
@ -21,9 +21,11 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
from GPUSimulators import Simulator
|
||||
|
||||
from pycuda import gpuarray
|
||||
|
||||
|
||||
|
||||
@ -45,64 +47,81 @@ class WAF (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def __init__(self, \
|
||||
context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
g,
|
||||
cfl_scale=0.9,
|
||||
boundary_conditions=BoundaryCondition(),
|
||||
block_width=16, block_height=16):
|
||||
|
||||
# Call super constructor
|
||||
super().__init__(context, \
|
||||
h0, hu0, hv0, \
|
||||
nx, ny, \
|
||||
2, 2, \
|
||||
dx, dy, dt, \
|
||||
g, \
|
||||
super().__init__(context,
|
||||
nx, ny,
|
||||
dx, dy,
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height);
|
||||
self.g = np.float32(g)
|
||||
|
||||
#Get kernels
|
||||
self.kernel = context.get_prepared_kernel("WAF_kernel.cu", "WAFKernel", \
|
||||
"iiffffiPiPiPiPiPiPi", \
|
||||
BLOCK_WIDTH=self.local_size[0], \
|
||||
BLOCK_HEIGHT=self.local_size[1])
|
||||
module = context.get_module("cuda/SWE2D_WAF.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("WAFKernel")
|
||||
self.kernel.prepare("iiffffiiPiPiPiPiPiPiP")
|
||||
|
||||
def __str__(self):
|
||||
return "Weighted average flux"
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[h0, hu0, hv0])
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
|
||||
def simulate(self, t_end):
|
||||
return super().simulateDimsplit(t_end)
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def stepEuler(self, dt):
|
||||
return self.stepDimsplitXY(dt)
|
||||
def substepDimsplit(self, dt, substep):
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
def stepDimsplitXY(self, dt):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
np.int32(0), \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
self.t += dt
|
||||
def check(self):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
def stepDimsplitYX(self, dt):
|
||||
self.kernel.prepared_async_call(self.global_size, self.local_size, self.stream, \
|
||||
self.nx, self.ny, \
|
||||
self.dx, self.dy, dt, \
|
||||
self.g, \
|
||||
np.int32(1), \
|
||||
self.data.h0.data.gpudata, self.data.h0.data.strides[0], \
|
||||
self.data.hu0.data.gpudata, self.data.hu0.data.strides[0], \
|
||||
self.data.hv0.data.gpudata, self.data.hv0.data.strides[0], \
|
||||
self.data.h1.data.gpudata, self.data.h1.data.strides[0], \
|
||||
self.data.hu1.data.gpudata, self.data.hu1.data.strides[0], \
|
||||
self.data.hv1.data.gpudata, self.data.hv1.data.strides[0])
|
||||
self.data.swap()
|
||||
self.t += dt
|
||||
def computeDt(self):
|
||||
max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
return max_dt*0.5
|
@ -1,500 +0,0 @@
|
||||
/*
|
||||
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Location of thread in block
|
||||
*/
|
||||
inline __device__ int get_local_id(int dim) {
|
||||
switch(dim) {
|
||||
case 0: return threadIdx.x;
|
||||
case 1: return threadIdx.y;
|
||||
case 2: return threadIdx.z;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Get block index
|
||||
*/
|
||||
__device__ int get_group_id(int dim) {
|
||||
switch(dim) {
|
||||
case 0: return blockIdx.x;
|
||||
case 1: return blockIdx.y;
|
||||
case 2: return blockIdx.z;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Location of thread in global domain
|
||||
*/
|
||||
__device__ int get_global_id(int dim) {
|
||||
switch(dim) {
|
||||
case 0: return blockDim.x*blockIdx.x + threadIdx.x;
|
||||
case 1: return blockDim.y*blockIdx.y + threadIdx.y;
|
||||
case 2: return blockDim.z*blockIdx.z + threadIdx.z;
|
||||
default: return -1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Float3 operators
|
||||
*/
|
||||
inline __device__ float3 operator*(const float a, const float3 b) {
|
||||
return make_float3(a*b.x, a*b.y, a*b.z);
|
||||
}
|
||||
|
||||
inline __device__ float3 operator/(const float3 a, const float b) {
|
||||
return make_float3(a.x/b, a.y/b, a.z/b);
|
||||
}
|
||||
|
||||
inline __device__ float3 operator-(const float3 a, const float3 b) {
|
||||
return make_float3(a.x-b.x, a.y-b.y, a.z-b.z);
|
||||
}
|
||||
|
||||
inline __device__ float3 operator+(const float3 a, const float3 b) {
|
||||
return make_float3(a.x+b.x, a.y+b.y, a.z+b.z);
|
||||
}
|
||||
|
||||
inline __device__ __host__ float clamp(const float f, const float a, const float b) {
|
||||
return fmaxf(a, fminf(f, b));
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Reads a block of data with one ghost cell for the shallow water equations
|
||||
*/
|
||||
__device__ void readBlock1(float* h_ptr_, int h_pitch_,
|
||||
float* hu_ptr_, int hu_pitch_,
|
||||
float* hv_ptr_, int hv_pitch_,
|
||||
float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const int nx_, const int ny_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Index of block within domain
|
||||
const int bx = BLOCK_WIDTH * get_group_id(0);
|
||||
const int by = BLOCK_HEIGHT * get_group_id(1);
|
||||
|
||||
//Read into shared memory
|
||||
for (int j=ty; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
const int l = clamp(by + j, 0, ny_+1); // Out of bounds
|
||||
|
||||
//Compute the pointer to current row in the arrays
|
||||
float* const h_row = (float*) ((char*) h_ptr_ + h_pitch_*l);
|
||||
float* const hu_row = (float*) ((char*) hu_ptr_ + hu_pitch_*l);
|
||||
float* const hv_row = (float*) ((char*) hv_ptr_ + hv_pitch_*l);
|
||||
|
||||
for (int i=tx; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
const int k = clamp(bx + i, 0, nx_+1); // Out of bounds
|
||||
|
||||
Q[0][j][i] = h_row[k];
|
||||
Q[1][j][i] = hu_row[k];
|
||||
Q[2][j][i] = hv_row[k];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Reads a block of data with two ghost cells for the shallow water equations
|
||||
*/
|
||||
__device__ void readBlock2(float* h_ptr_, int h_pitch_,
|
||||
float* hu_ptr_, int hu_pitch_,
|
||||
float* hv_ptr_, int hv_pitch_,
|
||||
float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const int nx_, const int ny_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Index of block within domain
|
||||
const int bx = BLOCK_WIDTH * get_group_id(0);
|
||||
const int by = BLOCK_HEIGHT * get_group_id(1);
|
||||
|
||||
//Read into shared memory
|
||||
for (int j=ty; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
const int l = clamp(by + j, 0, ny_+3); // Out of bounds
|
||||
|
||||
//Compute the pointer to current row in the arrays
|
||||
float* const h_row = (float*) ((char*) h_ptr_ + h_pitch_*l);
|
||||
float* const hu_row = (float*) ((char*) hu_ptr_ + hu_pitch_*l);
|
||||
float* const hv_row = (float*) ((char*) hv_ptr_ + hv_pitch_*l);
|
||||
|
||||
for (int i=tx; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
const int k = clamp(bx + i, 0, nx_+3); // Out of bounds
|
||||
|
||||
Q[0][j][i] = h_row[k];
|
||||
Q[1][j][i] = hu_row[k];
|
||||
Q[2][j][i] = hv_row[k];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Writes a block of data to global memory for the shallow water equations.
|
||||
*/
|
||||
__device__ void writeBlock1(float* h_ptr_, int h_pitch_,
|
||||
float* hu_ptr_, int hu_pitch_,
|
||||
float* hv_ptr_, int hv_pitch_,
|
||||
float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const int nx_, const int ny_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = get_global_id(0) + 1; //Skip global ghost cells, i.e., +1
|
||||
const int tj = get_global_id(1) + 1;
|
||||
|
||||
//Only write internal cells
|
||||
if (ti > 0 && ti < nx_+1 && tj > 0 && tj < ny_+1) {
|
||||
const int i = tx + 1; //Skip local ghost cells, i.e., +1
|
||||
const int j = ty + 1;
|
||||
|
||||
float* const h_row = (float*) ((char*) h_ptr_ + h_pitch_*tj);
|
||||
float* const hu_row = (float*) ((char*) hu_ptr_ + hu_pitch_*tj);
|
||||
float* const hv_row = (float*) ((char*) hv_ptr_ + hv_pitch_*tj);
|
||||
|
||||
h_row[ti] = Q[0][j][i];
|
||||
hu_row[ti] = Q[1][j][i];
|
||||
hv_row[ti] = Q[2][j][i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Writes a block of data to global memory for the shallow water equations.
|
||||
*/
|
||||
__device__ void writeBlock2(float* h_ptr_, int h_pitch_,
|
||||
float* hu_ptr_, int hu_pitch_,
|
||||
float* hv_ptr_, int hv_pitch_,
|
||||
float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const int nx_, const int ny_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = get_global_id(0) + 2; //Skip global ghost cells, i.e., +2
|
||||
const int tj = get_global_id(1) + 2;
|
||||
|
||||
//Only write internal cells
|
||||
if (ti > 1 && ti < nx_+2 && tj > 1 && tj < ny_+2) {
|
||||
const int i = tx + 2; //Skip local ghost cells, i.e., +2
|
||||
const int j = ty + 2;
|
||||
|
||||
float* const h_row = (float*) ((char*) h_ptr_ + h_pitch_*tj);
|
||||
float* const hu_row = (float*) ((char*) hu_ptr_ + hu_pitch_*tj);
|
||||
float* const hv_row = (float*) ((char*) hv_ptr_ + hv_pitch_*tj);
|
||||
|
||||
h_row[ti] = Q[0][j][i];
|
||||
hu_row[ti] = Q[1][j][i];
|
||||
hv_row[ti] = Q[2][j][i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* No flow boundary conditions for the shallow water equations
|
||||
* with one ghost cell in each direction
|
||||
*/
|
||||
__device__ void noFlowBoundary1(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2], const int nx_, const int ny_) {
|
||||
//Global index
|
||||
const int ti = get_global_id(0) + 1; //Skip global ghost cells, i.e., +1
|
||||
const int tj = get_global_id(1) + 1;
|
||||
|
||||
//Block-local indices
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
const int i = tx + 1; //Skip local ghost cells, i.e., +1
|
||||
const int j = ty + 1;
|
||||
|
||||
//Fix boundary conditions
|
||||
if (ti == 1) {
|
||||
Q[0][j][i-1] = Q[0][j][i];
|
||||
Q[1][j][i-1] = -Q[1][j][i];
|
||||
Q[2][j][i-1] = Q[2][j][i];
|
||||
}
|
||||
if (ti == nx_) {
|
||||
Q[0][j][i+1] = Q[0][j][i];
|
||||
Q[1][j][i+1] = -Q[1][j][i];
|
||||
Q[2][j][i+1] = Q[2][j][i];
|
||||
}
|
||||
if (tj == 1) {
|
||||
Q[0][j-1][i] = Q[0][j][i];
|
||||
Q[1][j-1][i] = Q[1][j][i];
|
||||
Q[2][j-1][i] = -Q[2][j][i];
|
||||
}
|
||||
if (tj == ny_) {
|
||||
Q[0][j+1][i] = Q[0][j][i];
|
||||
Q[1][j+1][i] = Q[1][j][i];
|
||||
Q[2][j+1][i] = -Q[2][j][i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* No flow boundary conditions for the shallow water equations
|
||||
* with two ghost cells in each direction
|
||||
*/
|
||||
__device__ void noFlowBoundary2(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4], const int nx_, const int ny_) {
|
||||
//Global index
|
||||
const int ti = get_global_id(0) + 2; //Skip global ghost cells, i.e., +2
|
||||
const int tj = get_global_id(1) + 2;
|
||||
|
||||
//Block-local indices
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
const int i = tx + 2; //Skip local ghost cells, i.e., +2
|
||||
const int j = ty + 2;
|
||||
|
||||
if (ti == 2) {
|
||||
Q[0][j][i-1] = Q[0][j][i];
|
||||
Q[1][j][i-1] = -Q[1][j][i];
|
||||
Q[2][j][i-1] = Q[2][j][i];
|
||||
|
||||
Q[0][j][i-2] = Q[0][j][i+1];
|
||||
Q[1][j][i-2] = -Q[1][j][i+1];
|
||||
Q[2][j][i-2] = Q[2][j][i+1];
|
||||
}
|
||||
if (ti == nx_+1) {
|
||||
Q[0][j][i+1] = Q[0][j][i];
|
||||
Q[1][j][i+1] = -Q[1][j][i];
|
||||
Q[2][j][i+1] = Q[2][j][i];
|
||||
|
||||
Q[0][j][i+2] = Q[0][j][i-1];
|
||||
Q[1][j][i+2] = -Q[1][j][i-1];
|
||||
Q[2][j][i+2] = Q[2][j][i-1];
|
||||
}
|
||||
if (tj == 2) {
|
||||
Q[0][j-1][i] = Q[0][j][i];
|
||||
Q[1][j-1][i] = Q[1][j][i];
|
||||
Q[2][j-1][i] = -Q[2][j][i];
|
||||
|
||||
Q[0][j-2][i] = Q[0][j+1][i];
|
||||
Q[1][j-2][i] = Q[1][j+1][i];
|
||||
Q[2][j-2][i] = -Q[2][j+1][i];
|
||||
}
|
||||
if (tj == ny_+1) {
|
||||
Q[0][j+1][i] = Q[0][j][i];
|
||||
Q[1][j+1][i] = Q[1][j][i];
|
||||
Q[2][j+1][i] = -Q[2][j][i];
|
||||
|
||||
Q[0][j+2][i] = Q[0][j-1][i];
|
||||
Q[1][j+2][i] = Q[1][j-1][i];
|
||||
Q[2][j+2][i] = -Q[2][j-1][i];
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Evolves the solution in time along the x axis (dimensional splitting)
|
||||
*/
|
||||
__device__ void evolveF1(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const int nx_, const int ny_,
|
||||
const float dx_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = get_global_id(0) + 1; //Skip global ghost cells, i.e., +1
|
||||
const int tj = get_global_id(1) + 1;
|
||||
|
||||
if (ti > 0 && ti < nx_+1 && tj > 0 && tj < ny_+1) {
|
||||
const int i = tx + 1; //Skip local ghost cells, i.e., +1
|
||||
const int j = ty + 1;
|
||||
|
||||
Q[0][j][i] = Q[0][j][i] + (F[0][ty][tx] - F[0][ty][tx+1]) * dt_ / dx_;
|
||||
Q[1][j][i] = Q[1][j][i] + (F[1][ty][tx] - F[1][ty][tx+1]) * dt_ / dx_;
|
||||
Q[2][j][i] = Q[2][j][i] + (F[2][ty][tx] - F[2][ty][tx+1]) * dt_ / dx_;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Evolves the solution in time along the x axis (dimensional splitting)
|
||||
*/
|
||||
__device__ void evolveF2(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const int nx_, const int ny_,
|
||||
const float dx_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = get_global_id(0) + 2; //Skip global ghost cells, i.e., +2
|
||||
const int tj = get_global_id(1) + 2;
|
||||
|
||||
if (ti > 1 && ti < nx_+2 && tj > 1 && tj < ny_+2) {
|
||||
const int i = tx + 2; //Skip local ghost cells, i.e., +1
|
||||
const int j = ty + 2;
|
||||
|
||||
Q[0][j][i] = Q[0][j][i] + (F[0][ty][tx] - F[0][ty][tx+1]) * dt_ / dx_;
|
||||
Q[1][j][i] = Q[1][j][i] + (F[1][ty][tx] - F[1][ty][tx+1]) * dt_ / dx_;
|
||||
Q[2][j][i] = Q[2][j][i] + (F[2][ty][tx] - F[2][ty][tx+1]) * dt_ / dx_;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Evolves the solution in time along the y axis (dimensional splitting)
|
||||
*/
|
||||
__device__ void evolveG1(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const int nx_, const int ny_,
|
||||
const float dy_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = get_global_id(0) + 1; //Skip global ghost cells, i.e., +1
|
||||
const int tj = get_global_id(1) + 1;
|
||||
|
||||
if (ti > 0 && ti < nx_+1 && tj > 0 && tj < ny_+1) {
|
||||
const int i = tx + 1; //Skip local ghost cells, i.e., +1
|
||||
const int j = ty + 1;
|
||||
|
||||
Q[0][j][i] = Q[0][j][i] + (G[0][ty][tx] - G[0][ty+1][tx]) * dt_ / dy_;
|
||||
Q[1][j][i] = Q[1][j][i] + (G[1][ty][tx] - G[1][ty+1][tx]) * dt_ / dy_;
|
||||
Q[2][j][i] = Q[2][j][i] + (G[2][ty][tx] - G[2][ty+1][tx]) * dt_ / dy_;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Evolves the solution in time along the y axis (dimensional splitting)
|
||||
*/
|
||||
__device__ void evolveG2(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const int nx_, const int ny_,
|
||||
const float dy_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = get_global_id(0) + 2; //Skip global ghost cells, i.e., +2
|
||||
const int tj = get_global_id(1) + 2;
|
||||
|
||||
if (ti > 1 && ti < nx_+2 && tj > 1 && tj < ny_+2) {
|
||||
const int i = tx + 2; //Skip local ghost cells, i.e., +2
|
||||
const int j = ty + 2;
|
||||
|
||||
Q[0][j][i] = Q[0][j][i] + (G[0][ty][tx] - G[0][ty+1][tx]) * dt_ / dy_;
|
||||
Q[1][j][i] = Q[1][j][i] + (G[1][ty][tx] - G[1][ty+1][tx]) * dt_ / dy_;
|
||||
Q[2][j][i] = Q[2][j][i] + (G[2][ty][tx] - G[2][ty+1][tx]) * dt_ / dy_;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ float3 F_func(const float3 Q, const float g) {
|
||||
float3 F;
|
||||
|
||||
F.x = Q.y; //hu
|
||||
F.y = Q.y*Q.y / Q.x + 0.5f*g*Q.x*Q.x; //hu*hu/h + 0.5f*g*h*h;
|
||||
F.z = Q.y*Q.z / Q.x; //hu*hv/h;
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
239
GPUSimulators/cuda/EE2D_KP07_dimsplit.cu
Normal file
239
GPUSimulators/cuda/EE2D_KP07_dimsplit.cu
Normal file
@ -0,0 +1,239 @@
|
||||
/*
|
||||
This kernel implements the Central Upwind flux function to
|
||||
solve the Euler equations
|
||||
|
||||
Copyright (C) 2018 SINTEF Digital
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "EulerCommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
__device__
|
||||
void computeFluxF(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float gamma_, const float dx_, const float dt_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
const float4 Q_rl = make_float4(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1],
|
||||
Q[3][j][i+1] - 0.5f*Qx[3][j][i+1]);
|
||||
const float4 Q_rr = make_float4(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1],
|
||||
Q[3][j][i+1] + 0.5f*Qx[3][j][i+1]);
|
||||
|
||||
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] - 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] - 0.5f*Qx[2][j][i],
|
||||
Q[3][j][i] - 0.5f*Qx[3][j][i]);
|
||||
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] + 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] + 0.5f*Qx[2][j][i],
|
||||
Q[3][j][i] + 0.5f*Qx[3][j][i]);
|
||||
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float4 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
|
||||
const float4 Q_l_bar = Q_lr + dt_/(2.0f*dx_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
//const float4 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
|
||||
const float4 flux = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
|
||||
|
||||
//Write to shared memory
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
F[3][j][i] = flux.w;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__device__
|
||||
void computeFluxG(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float gamma_, const float dy_, const float dt_) {
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float4 Q_rl = make_float4(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i],
|
||||
Q[3][j+1][i] - 0.5f*Qy[3][j+1][i]);
|
||||
const float4 Q_rr = make_float4(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i],
|
||||
Q[3][j+1][i] + 0.5f*Qy[3][j+1][i]);
|
||||
|
||||
const float4 Q_ll = make_float4(Q[0][j][i] - 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] - 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] - 0.5f*Qy[1][j][i],
|
||||
Q[3][j][i] - 0.5f*Qy[3][j][i]);
|
||||
const float4 Q_lr = make_float4(Q[0][j][i] + 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] + 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] + 0.5f*Qy[1][j][i],
|
||||
Q[3][j][i] + 0.5f*Qy[3][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float4 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, gamma_) - F_func(Q_rr, gamma_));
|
||||
const float4 Q_l_bar = Q_lr + dt_/(2.0f*dy_) * (F_func(Q_ll, gamma_) - F_func(Q_lr, gamma_));
|
||||
|
||||
// Compute flux based on prediction
|
||||
const float4 flux = CentralUpwindFlux(Q_l_bar, Q_r_bar, gamma_);
|
||||
//const float4 flux = HLL_flux(Q_l_bar, Q_r_bar, gamma_);
|
||||
|
||||
//Write to shared memory
|
||||
//Note that we here swap hu and hv back to the original
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
G[3][j][i] = flux.w;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
|
||||
*/
|
||||
extern "C" {
|
||||
|
||||
__global__ void KP07DimsplitKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
float gamma_,
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* rho0_ptr_, int rho0_pitch_,
|
||||
float* rho_u0_ptr_, int rho_u0_pitch_,
|
||||
float* rho_v0_ptr_, int rho_v0_pitch_,
|
||||
float* E0_ptr_, int E0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* rho1_ptr_, int rho1_pitch_,
|
||||
float* rho_u1_ptr_, int rho_u1_pitch_,
|
||||
float* rho_v1_ptr_, int rho_v1_pitch_,
|
||||
float* E1_ptr_, int E1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 4;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[4][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float Qx[4][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[4][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( rho0_ptr_, rho0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(rho_u0_ptr_, rho_u0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(rho_v0_ptr_, rho_v0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( E0_ptr_, E0_pitch_, Q[3], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Step 0 => evolve x first, then y
|
||||
if (step_ == 0) {
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Gravity source term
|
||||
if (g_ > 0.0f) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const float rho_v = Q[2][j][i];
|
||||
Q[2][j][i] -= g_*Q[0][j][i]*dt_;
|
||||
Q[3][j][i] -= g_*rho_v*dt_;
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
else {
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, gamma_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, gamma_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Gravity source term
|
||||
if (g_ > 0.0f) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const float rho_v = Q[2][j][i];
|
||||
Q[2][j][i] -= g_*Q[0][j][i]*dt_;
|
||||
Q[3][j][i] -= g_*rho_v*dt_;
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( rho1_ptr_, rho1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(rho_u1_ptr_, rho_u1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(rho_v1_ptr_, rho_v1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>( E1_ptr_, E1_pitch_, Q[3], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, gamma_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
} // extern "C"
|
187
GPUSimulators/cuda/EulerCommon.h
Normal file
187
GPUSimulators/cuda/EulerCommon.h
Normal file
@ -0,0 +1,187 @@
|
||||
/*
|
||||
These CUDA functions implement different types of numerical flux
|
||||
functions for the shallow water equations
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF Digital
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void writeCfl(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float shmem[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
const float dx_, const float dy_, const float gamma_,
|
||||
float* output_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x + gc_x;
|
||||
const int ty = threadIdx.y + gc_y;
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = blockDim.x*blockIdx.x + tx;
|
||||
const int tj = blockDim.y*blockIdx.y + ty;
|
||||
|
||||
//Only internal cells
|
||||
if (ti < nx_+gc_x && tj < ny_+gc_y) {
|
||||
const float rho = Q[0][ty][tx];
|
||||
const float u = Q[1][ty][tx] / rho;
|
||||
const float v = Q[2][ty][tx] / rho;
|
||||
|
||||
const float max_u = dx_ / (fabsf(u) + sqrtf(gamma_*rho));
|
||||
const float max_v = dy_ / (fabsf(v) + sqrtf(gamma_*rho));
|
||||
|
||||
shmem[ty][tx] = fminf(max_u, max_v);
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//One row of threads loop over all rows
|
||||
if (ti < nx_+gc_x && tj < ny_+gc_y) {
|
||||
if (ty == gc_y) {
|
||||
float min_val = shmem[ty][tx];
|
||||
const int max_y = min(h, ny_+gc_y - tj);
|
||||
for (int j=gc_y; j<max_y+gc_y; j++) {
|
||||
min_val = fminf(min_val, shmem[j][tx]);
|
||||
}
|
||||
shmem[ty][tx] = min_val;
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//One thread loops over first row to find global max
|
||||
if (tx == gc_x && ty == gc_y) {
|
||||
float min_val = shmem[ty][tx];
|
||||
const int max_x = min(w, nx_+gc_x - ti);
|
||||
for (int i=gc_x; i<max_x+gc_x; ++i) {
|
||||
min_val = fminf(min_val, shmem[ty][i]);
|
||||
}
|
||||
|
||||
const int idx = gridDim.x*blockIdx.y + blockIdx.x;
|
||||
output_[idx] = min_val;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
inline __device__ float pressure(float4 Q, float gamma) {
|
||||
const float rho = Q.x;
|
||||
const float rho_u = Q.y;
|
||||
const float rho_v = Q.z;
|
||||
const float E = Q.w;
|
||||
|
||||
return (gamma-1.0f)*(E-0.5f*(rho_u*rho_u + rho_v*rho_v)/rho);
|
||||
}
|
||||
|
||||
|
||||
__device__ float4 F_func(const float4 Q, float P) {
|
||||
const float rho = Q.x;
|
||||
const float rho_u = Q.y;
|
||||
const float rho_v = Q.z;
|
||||
const float E = Q.w;
|
||||
|
||||
const float u = rho_u/rho;
|
||||
|
||||
float4 F;
|
||||
|
||||
F.x = rho_u;
|
||||
F.y = rho_u*u + P;
|
||||
F.z = rho_v*u;
|
||||
F.w = u*(E+P);
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 180)
|
||||
*/
|
||||
__device__ float4 HLL_flux(const float4 Q_l, const float4 Q_r, const float gamma) {
|
||||
const float h_l = Q_l.x;
|
||||
const float h_r = Q_r.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l.y / h_l;
|
||||
const float u_r = Q_r.y / h_r;
|
||||
|
||||
// Calculate pressures
|
||||
const float P_l = pressure(Q_l, gamma);
|
||||
const float P_r = pressure(Q_r, gamma);
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(gamma*P_l/Q_l.x);
|
||||
const float c_r = sqrt(gamma*P_r/Q_r.x);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
|
||||
//Upwind selection
|
||||
if (S_l >= 0.0f) {
|
||||
return F_func(Q_l, P_l);
|
||||
}
|
||||
else if (S_r <= 0.0f) {
|
||||
return F_func(Q_r, P_r);
|
||||
}
|
||||
//Or estimate flux in the star region
|
||||
else {
|
||||
const float4 F_l = F_func(Q_l, P_l);
|
||||
const float4 F_r = F_func(Q_r, P_r);
|
||||
const float4 flux = (S_r*F_l - S_l*F_r + S_r*S_l*(Q_r - Q_l)) / (S_r-S_l);
|
||||
return flux;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Central upwind flux function
|
||||
*/
|
||||
__device__ float4 CentralUpwindFlux(const float4 Qm, const float4 Qp, const float gamma) {
|
||||
|
||||
const float Pp = pressure(Qp, gamma);
|
||||
const float4 Fp = F_func(Qp, Pp);
|
||||
const float up = Qp.y / Qp.x; // rho*u / rho
|
||||
const float cp = sqrt(gamma*Pp/Qp.x); // sqrt(gamma*P/rho)
|
||||
|
||||
const float Pm = pressure(Qm, gamma);
|
||||
const float4 Fm = F_func(Qm, Pm);
|
||||
const float um = Qm.y / Qm.x; // rho*u / rho
|
||||
const float cm = sqrt(gamma*Pm/Qm.x); // sqrt(gamma*P/rho)
|
||||
|
||||
const float am = min(min(um-cm, up-cp), 0.0f); // largest negative wave speed
|
||||
const float ap = max(max(um+cm, up+cp), 0.0f); // largest positive wave speed
|
||||
|
||||
return ((ap*Fm - am*Fp) + ap*am*(Qp-Qm))/(ap-am);
|
||||
}
|
@ -19,8 +19,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "common.cu"
|
||||
#include "fluxes/FirstOrderCentered.cu"
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
/**
|
||||
@ -28,27 +28,18 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Compute fluxes along the x axis
|
||||
{
|
||||
int j=ty;
|
||||
const int l = j + 1; //Skip ghost cells
|
||||
for (int i=tx; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
const int k = i;
|
||||
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
const float3 Qp = make_float3(Q[0][l][k+1],
|
||||
Q[1][l][k+1],
|
||||
Q[2][l][k+1]);
|
||||
const float3 Qm = make_float3(Q[0][l][k],
|
||||
Q[1][l][k],
|
||||
Q[2][l][k]);
|
||||
const float3 Qp = make_float3(Q[0][j][i+1],
|
||||
Q[1][j][i+1],
|
||||
Q[2][j][i+1]);
|
||||
const float3 Qm = make_float3(Q[0][j][i],
|
||||
Q[1][j][i],
|
||||
Q[2][j][i]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = FORCE_1D_flux(Qm, Qp, g_, dx_, dt_);
|
||||
@ -65,27 +56,19 @@ void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
float G[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Compute fluxes along the y axis
|
||||
for (int j=ty; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
const int l = j;
|
||||
{
|
||||
int i=tx;
|
||||
const int k = i + 1; //Skip ghost cells
|
||||
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Qp = make_float3(Q[0][l+1][k],
|
||||
Q[2][l+1][k],
|
||||
Q[1][l+1][k]);
|
||||
const float3 Qm = make_float3(Q[0][l][k],
|
||||
Q[2][l][k],
|
||||
Q[1][l][k]);
|
||||
const float3 Qp = make_float3(Q[0][j+1][i],
|
||||
Q[2][j+1][i],
|
||||
Q[1][j+1][i]);
|
||||
const float3 Qm = make_float3(Q[0][j][i],
|
||||
Q[2][j][i],
|
||||
Q[1][j][i]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
@ -104,6 +87,8 @@ __global__ void FORCEKernel(
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
@ -112,45 +97,47 @@ __global__ void FORCEKernel(
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
__shared__ float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2];
|
||||
__shared__ float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1];
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 1;
|
||||
const unsigned int gc_y = 1;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock1(h0_ptr_, h0_pitch_,
|
||||
hu0_ptr_, hu0_pitch_,
|
||||
hv0_ptr_, hv0_pitch_,
|
||||
Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary1(Q, nx_, ny_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute flux along x, and evolve
|
||||
computeFluxF(Q, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF1(Q, F, nx_, ny_, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary1(Q, nx_, ny_);
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute flux along y, and evolve
|
||||
computeFluxG(Q, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG1(Q, F, nx_, ny_, dy_, dt_);
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Write to main memory
|
||||
writeBlock1(h1_ptr_, h1_pitch_,
|
||||
hu1_ptr_, hu1_pitch_,
|
||||
hv1_ptr_, hv1_pitch_,
|
||||
Q, nx_, ny_);
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
161
GPUSimulators/cuda/SWE2D_HLL.cu
Normal file
161
GPUSimulators/cuda/SWE2D_HLL.cu
Normal file
@ -0,0 +1,161 @@
|
||||
/*
|
||||
This GPU kernel implements the HLL flux
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the x axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_) {
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
const float3 Q_r = make_float3(Q[0][j][i+1],
|
||||
Q[1][j][i+1],
|
||||
Q[2][j][i+1]);
|
||||
const float3 Q_l = make_float3(Q[0][j][i],
|
||||
Q[1][j][i],
|
||||
Q[2][j][i]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = HLL_flux(Q_l, Q_r, g_);
|
||||
F[0][j][i] = flux.x;
|
||||
F[1][j][i] = flux.y;
|
||||
F[2][j][i] = flux.z;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Computes the flux along the y axis for all faces
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float g_) {
|
||||
//Compute fluxes along the y axis
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Q_r = make_float3(Q[0][j+1][i],
|
||||
Q[2][j+1][i],
|
||||
Q[1][j+1][i]);
|
||||
const float3 Q_l = make_float3(Q[0][j][i],
|
||||
Q[2][j][i],
|
||||
Q[1][j][i]);
|
||||
|
||||
// Computed flux
|
||||
//Note that we here swap hu and hv back to the original
|
||||
const float3 flux = HLL_flux(Q_l, Q_r, g_);
|
||||
G[0][j][i] = flux.x;
|
||||
G[1][j][i] = flux.z;
|
||||
G[2][j][i] = flux.y;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
extern "C" {
|
||||
|
||||
__global__ void HLLKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
float* hv0_ptr_, int hv0_pitch_,
|
||||
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 1;
|
||||
const unsigned int gc_y = 1;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Compute F flux
|
||||
computeFluxF(Q, F, g_);
|
||||
__syncthreads();
|
||||
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute G flux
|
||||
computeFluxG(Q, F, g_);
|
||||
__syncthreads();
|
||||
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -18,9 +18,9 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "common.cu"
|
||||
#include "limiters.cu"
|
||||
#include "fluxes/HartenLaxVanLeer.cu"
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
|
||||
@ -33,33 +33,26 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
float Qx[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
{
|
||||
const int j=ty;
|
||||
const int l = j + 2; //Skip ghost cells
|
||||
for (int i=tx; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
const int k = i + 1;
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
const float3 Q_rl = make_float3(Q[0][l][k+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][l][k+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][l][k+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Q_rr = make_float3(Q[0][l][k+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][l][k+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][l][k+1] + 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Q_rl = make_float3(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Q_rr = make_float3(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][l][k] - 0.5f*Qx[0][j][i],
|
||||
Q[1][l][k] - 0.5f*Qx[1][j][i],
|
||||
Q[2][l][k] - 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][l][k] + 0.5f*Qx[0][j][i],
|
||||
Q[1][l][k] + 0.5f*Qx[1][j][i],
|
||||
Q[2][l][k] + 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] - 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] - 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] + 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] + 0.5f*Qx[2][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
@ -85,34 +78,27 @@ void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
float Qy[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
for (int j=ty; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
const int l = j + 1;
|
||||
{
|
||||
int i=tx;
|
||||
const int k = i + 2; //Skip ghost cells
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float3 Q_rl = make_float3(Q[0][l+1][k] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][l+1][k] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][l+1][k] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rr = make_float3(Q[0][l+1][k] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][l+1][k] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][l+1][k] + 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rl = make_float3(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rr = make_float3(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][l][k] - 0.5f*Qy[0][j][i],
|
||||
Q[2][l][k] - 0.5f*Qy[2][j][i],
|
||||
Q[1][l][k] - 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][l][k] + 0.5f*Qy[0][j][i],
|
||||
Q[2][l][k] + 0.5f*Qy[2][j][i],
|
||||
Q[1][l][k] + 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] - 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] - 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] + 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] + 0.5f*Qy[1][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
@ -145,6 +131,7 @@ __global__ void HLL2Kernel(
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
@ -154,69 +141,61 @@ __global__ void HLL2Kernel(
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4];
|
||||
__shared__ float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2];
|
||||
__shared__ float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1];
|
||||
|
||||
|
||||
|
||||
__shared__ float Q[3][h+4][w+4];
|
||||
__shared__ float Qx[3][h+4][w+4];
|
||||
__shared__ float F[3][h+4][w+4];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock2(h0_ptr_, h0_pitch_,
|
||||
hu0_ptr_, hu0_pitch_,
|
||||
hv0_ptr_, hv0_pitch_,
|
||||
Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Step 0 => evolve x first, then y
|
||||
if (step_ == 0) {
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX(Q, Qx, theta_);
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF2(Q, F, nx_, ny_, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY(Q, Qx, theta_);
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG2(Q, F, nx_, ny_, dy_, dt_);
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
else {
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY(Q, Qx, theta_);
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG2(Q, F, nx_, ny_, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX(Q, Qx, theta_);
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF2(Q, F, nx_, ny_, dx_, dt_);
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
@ -224,10 +203,14 @@ __global__ void HLL2Kernel(
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock2(h1_ptr_, h1_pitch_,
|
||||
hu1_ptr_, hu1_pitch_,
|
||||
hv1_ptr_, hv1_pitch_,
|
||||
Q, nx_, ny_);
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
} // extern "C"
|
@ -24,9 +24,9 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
|
||||
#include "common.cu"
|
||||
#include "limiters.cu"
|
||||
#include "fluxes/CentralUpwind.cu"
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
__device__
|
||||
@ -35,8 +35,8 @@ void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const float g_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
{
|
||||
int j=ty;
|
||||
@ -66,8 +66,8 @@ void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
const float g_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
for (int j=ty; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
const int l = j + 1;
|
||||
@ -96,6 +96,39 @@ void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
|
||||
|
||||
|
||||
__device__ void minmodSlopeX(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float theta_) {
|
||||
//Reconstruct slopes along x axis
|
||||
for (int p=0; p<3; ++p) {
|
||||
{
|
||||
const int j = threadIdx.y+2;
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+3; i+=BLOCK_WIDTH) {
|
||||
Qx[p][j-2][i-1] = minmodSlope(Q[p][j][i-1], Q[p][j][i], Q[p][j][i+1], theta_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* Reconstructs a minmod slope for a whole block along the ordinate
|
||||
*/
|
||||
__device__ void minmodSlopeY(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
const float theta_) {
|
||||
//Reconstruct slopes along y axis
|
||||
for (int p=0; p<3; ++p) {
|
||||
const int i = threadIdx.x + 2;
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+3; j+=BLOCK_HEIGHT) {
|
||||
{
|
||||
Qy[p][j-1][i-2] = minmodSlope(Q[p][j-1][i], Q[p][j][i], Q[p][j+1][i], theta_);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
/**
|
||||
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
|
||||
*/
|
||||
@ -107,7 +140,8 @@ __global__ void KP07Kernel(
|
||||
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int step_order_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
@ -117,39 +151,37 @@ __global__ void KP07Kernel(
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = get_global_id(0) + 2; //Skip global ghost cells, i.e., +2
|
||||
const int tj = get_global_id(1) + 2;
|
||||
const int ti = blockDim.x*blockIdx.x + threadIdx.x + 2; //Skip global ghost cells, i.e., +2
|
||||
const int tj = blockDim.y*blockIdx.y + threadIdx.y + 2;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4];
|
||||
|
||||
//The following slightly wastes memory, but enables us to reuse the
|
||||
//funcitons in common.opencl
|
||||
__shared__ float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2];
|
||||
__shared__ float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2];
|
||||
__shared__ float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1];
|
||||
__shared__ float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1];
|
||||
__shared__ float Q[3][h+4][w+4];
|
||||
__shared__ float Qx[3][h+2][w+2];
|
||||
__shared__ float Qy[3][h+2][w+2];
|
||||
__shared__ float F[3][h+1][w+1];
|
||||
__shared__ float G[3][h+1][w+1];
|
||||
|
||||
|
||||
|
||||
//Read into shared memory
|
||||
readBlock2(h0_ptr_, h0_pitch_,
|
||||
hu0_ptr_, hu0_pitch_,
|
||||
hv0_ptr_, hv0_pitch_,
|
||||
Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
//Fix boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
|
||||
//Reconstruct slopes along x and axis
|
||||
@ -169,42 +201,33 @@ __global__ void KP07Kernel(
|
||||
const int i = tx + 2; //Skip local ghost cells, i.e., +2
|
||||
const int j = ty + 2;
|
||||
|
||||
const float h1 = Q[0][j][i] + (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
|
||||
const float hu1 = Q[1][j][i] + (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
|
||||
const float hv1 = Q[2][j][i] + (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
|
||||
|
||||
float* const h_row = (float*) ((char*) h1_ptr_ + h1_pitch_*tj);
|
||||
float* const hu_row = (float*) ((char*) hu1_ptr_ + hu1_pitch_*tj);
|
||||
float* const hv_row = (float*) ((char*) hv1_ptr_ + hv1_pitch_*tj);
|
||||
|
||||
if (step_ == 0) {
|
||||
//First step of RK2 ODE integrator
|
||||
|
||||
h_row[ti] = h1;
|
||||
hu_row[ti] = hu1;
|
||||
hv_row[ti] = hv1;
|
||||
}
|
||||
else if (step_ == 1) {
|
||||
//Second step of RK2 ODE integrator
|
||||
|
||||
//First read Q^n
|
||||
const float h_a = h_row[ti];
|
||||
const float hu_a = hu_row[ti];
|
||||
const float hv_a = hv_row[ti];
|
||||
|
||||
//Compute Q^n+1
|
||||
const float h_b = 0.5f*(h_a + h1);
|
||||
const float hu_b = 0.5f*(hu_a + hu1);
|
||||
const float hv_b = 0.5f*(hv_a + hv1);
|
||||
|
||||
|
||||
if (getOrder(step_order_) == 2 && getStep(step_order_) == 1) {
|
||||
//Write to main memory
|
||||
h_row[ti] = h_b;
|
||||
hu_row[ti] = hu_b;
|
||||
hv_row[ti] = hv_b;
|
||||
h_row[ti] = 0.5f*(h_row[ti] + Q[0][j][i]);
|
||||
hu_row[ti] = 0.5f*(hu_row[ti] + Q[1][j][i]);
|
||||
hv_row[ti] = 0.5f*(hv_row[ti] + Q[2][j][i]);
|
||||
}
|
||||
else {
|
||||
h_row[ti] = Q[0][j][i];
|
||||
hu_row[ti] = Q[1][j][i];
|
||||
hv_row[ti] = Q[2][j][i];
|
||||
}
|
||||
}
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
} //extern "C"
|
@ -24,40 +24,34 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
|
||||
#include "common.cu"
|
||||
#include "limiters.cu"
|
||||
#include "fluxes/CentralUpwind.cu"
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
template <int w, int h, int gc_x, int gc_y>
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
void computeFluxF(float Q[3][h+2*gc_y][w+2*gc_x],
|
||||
float Qx[3][h+2*gc_y][w+2*gc_x],
|
||||
float F[3][h+2*gc_y][w+2*gc_x],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
{
|
||||
int j=ty;
|
||||
const int l = j + 2; //Skip ghost cells
|
||||
for (int i=tx; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
const int k = i + 1;
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
for (int i=threadIdx.x+1; i<w+2*gc_x-2; i+=w) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
const float3 Q_rl = make_float3(Q[0][l][k+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][l][k+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][l][k+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Q_rr = make_float3(Q[0][l][k+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][l][k+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][l][k+1] + 0.5f*Qx[2][j][i+1]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][l][k] - 0.5f*Qx[0][j][i],
|
||||
Q[1][l][k] - 0.5f*Qx[1][j][i],
|
||||
Q[2][l][k] - 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][l][k] + 0.5f*Qx[0][j][i],
|
||||
Q[1][l][k] + 0.5f*Qx[1][j][i],
|
||||
Q[2][l][k] + 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_rl = make_float3(Q[0][j][i+1] - 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] - 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] - 0.5f*Qx[2][j][i+1]);
|
||||
const float3 Q_rr = make_float3(Q[0][j][i+1] + 0.5f*Qx[0][j][i+1],
|
||||
Q[1][j][i+1] + 0.5f*Qx[1][j][i+1],
|
||||
Q[2][j][i+1] + 0.5f*Qx[2][j][i+1]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] - 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] - 0.5f*Qx[2][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qx[0][j][i],
|
||||
Q[1][j][i] + 0.5f*Qx[1][j][i],
|
||||
Q[2][j][i] + 0.5f*Qx[2][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dx_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
@ -74,36 +68,30 @@ void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
}
|
||||
}
|
||||
|
||||
template <int w, int h, int gc_x, int gc_y>
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
void computeFluxG(float Q[3][h+2*gc_y][w+2*gc_x],
|
||||
float Qy[3][h+2*gc_y][w+2*gc_x],
|
||||
float G[3][h+2*gc_y][w+2*gc_x],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
for (int j=ty; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
const int l = j + 1;
|
||||
{
|
||||
int i=tx;
|
||||
const int k = i + 2; //Skip ghost cells
|
||||
for (int j=threadIdx.y+1; j<h+2*gc_y-2; j+=h) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
// Reconstruct point values of Q at the left and right hand side
|
||||
// of the cell for both the left (i) and right (i+1) cell
|
||||
//NOte that hu and hv are swapped ("transposing" the domain)!
|
||||
const float3 Q_rl = make_float3(Q[0][l+1][k] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][l+1][k] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][l+1][k] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rr = make_float3(Q[0][l+1][k] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][l+1][k] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][l+1][k] + 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rl = make_float3(Q[0][j+1][i] - 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] - 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] - 0.5f*Qy[1][j+1][i]);
|
||||
const float3 Q_rr = make_float3(Q[0][j+1][i] + 0.5f*Qy[0][j+1][i],
|
||||
Q[2][j+1][i] + 0.5f*Qy[2][j+1][i],
|
||||
Q[1][j+1][i] + 0.5f*Qy[1][j+1][i]);
|
||||
|
||||
const float3 Q_ll = make_float3(Q[0][l][k] - 0.5f*Qy[0][j][i],
|
||||
Q[2][l][k] - 0.5f*Qy[2][j][i],
|
||||
Q[1][l][k] - 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][l][k] + 0.5f*Qy[0][j][i],
|
||||
Q[2][l][k] + 0.5f*Qy[2][j][i],
|
||||
Q[1][l][k] + 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_ll = make_float3(Q[0][j][i] - 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] - 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] - 0.5f*Qy[1][j][i]);
|
||||
const float3 Q_lr = make_float3(Q[0][j][i] + 0.5f*Qy[0][j][i],
|
||||
Q[2][j][i] + 0.5f*Qy[2][j][i],
|
||||
Q[1][j][i] + 0.5f*Qy[1][j][i]);
|
||||
|
||||
//Evolve half a timestep (predictor step)
|
||||
const float3 Q_r_bar = Q_rl + dt_/(2.0f*dy_) * (F_func(Q_rl, g_) - F_func(Q_rr, g_));
|
||||
@ -128,6 +116,10 @@ void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
* This unsplit kernel computes the 2D numerical scheme with a TVD RK2 time integration scheme
|
||||
*/
|
||||
extern "C" {
|
||||
|
||||
|
||||
|
||||
|
||||
__global__ void KP07DimsplitKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
@ -136,6 +128,7 @@ __global__ void KP07DimsplitKernel(
|
||||
float theta_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
@ -145,81 +138,79 @@ __global__ void KP07DimsplitKernel(
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4];
|
||||
__shared__ float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2];
|
||||
__shared__ float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1];
|
||||
|
||||
|
||||
__shared__ float Q[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float Qx[vars][h+2*gc_y][w+2*gc_x];
|
||||
__shared__ float F[vars][h+2*gc_y][w+2*gc_x];
|
||||
|
||||
//Read into shared memory
|
||||
readBlock2(h0_ptr_, h0_pitch_,
|
||||
hu0_ptr_, hu0_pitch_,
|
||||
hv0_ptr_, hv0_pitch_,
|
||||
Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
|
||||
//Fix boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
|
||||
//Step 0 => evolve x first, then y
|
||||
if (step_ == 0) {
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX(Q, Qx, theta_);
|
||||
//Along X
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, g_, dx_, dt_);
|
||||
computeFluxF<w, h, gc_x, gc_y>(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF2(Q, F, nx_, ny_, dx_, dt_);
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
//Along Y
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY(Q, Qx, theta_);
|
||||
computeFluxG<w, h, gc_x, gc_y>(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG2(Q, F, nx_, ny_, dy_, dt_);
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
else {
|
||||
//Compute fluxes along the y axis and evolve
|
||||
minmodSlopeY(Q, Qx, theta_);
|
||||
//Along Y
|
||||
minmodSlopeY<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
computeFluxG(Q, Qx, F, g_, dy_, dt_);
|
||||
computeFluxG<w, h, gc_x, gc_y>(Q, Qx, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG2(Q, F, nx_, ny_, dy_, dt_);
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
//Along X
|
||||
minmodSlopeX<w, h, gc_x, gc_y, vars>(Q, Qx, theta_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
minmodSlopeX(Q, Qx, theta_);
|
||||
computeFluxF<w, h, gc_x, gc_y>(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
computeFluxF(Q, Qx, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF2(Q, F, nx_, ny_, dx_, dt_);
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock2(h1_ptr_, h1_pitch_,
|
||||
hu1_ptr_, hu1_pitch_,
|
||||
hv1_ptr_, hv1_pitch_,
|
||||
Q, nx_, ny_);
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, F[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
} // extern "C"
|
@ -19,8 +19,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#include "common.cu"
|
||||
#include "fluxes/LaxFriedrichs.cu"
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
/**
|
||||
@ -32,8 +32,8 @@ void computeFluxF(float Q[3][block_height+2][block_width+2],
|
||||
float F[3][block_height][block_width+1],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
{
|
||||
const int j=ty;
|
||||
@ -68,8 +68,8 @@ void computeFluxG(float Q[3][block_height+2][block_width+2],
|
||||
float G[3][block_height+1][block_width],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
|
||||
for (int j=ty; j<block_height+1; j+=block_height) {
|
||||
const int l = j;
|
||||
@ -104,6 +104,8 @@ void LxFKernel(
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_,
|
||||
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
float* hu0_ptr_, int hu0_pitch_,
|
||||
@ -112,60 +114,53 @@ void LxFKernel(
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
|
||||
const int block_width = BLOCK_WIDTH;
|
||||
const int block_height = BLOCK_HEIGHT;
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = get_global_id(0) + 1; //Skip global ghost cells, i.e., +1
|
||||
const int tj = get_global_id(1) + 1;
|
||||
float* hv1_ptr_, int hv1_pitch_,
|
||||
|
||||
//Output CFL
|
||||
float* cfl_) {
|
||||
|
||||
__shared__ float Q[3][block_height+2][block_width+2];
|
||||
__shared__ float F[3][block_height][block_width+1];
|
||||
__shared__ float G[3][block_height+1][block_width];
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 1;
|
||||
const unsigned int gc_y = 1;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Read into shared memory
|
||||
readBlock1(h0_ptr_, h0_pitch_,
|
||||
hu0_ptr_, hu0_pitch_,
|
||||
hv0_ptr_, hv0_pitch_,
|
||||
Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary1(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
__shared__ float Q[vars][h+2][w+2];
|
||||
__shared__ float F[vars][h ][w+1];
|
||||
__shared__ float G[vars][h+1][w ];
|
||||
|
||||
//Read from global memory
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
|
||||
//Compute fluxes along the x and y axis
|
||||
computeFluxF<block_width, block_height>(Q, F, g_, dx_, dt_);
|
||||
computeFluxG<block_width, block_height>(Q, G, g_, dy_, dt_);
|
||||
computeFluxF<w, h>(Q, F, g_, dx_, dt_);
|
||||
computeFluxG<w, h>(Q, G, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
//Evolve for all internal cells
|
||||
if (ti > 0 && ti < nx_+1 && tj > 0 && tj < ny_+1) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
const int i = tx + 1; //Skip local ghost cells, i.e., +1
|
||||
const int j = ty + 1;
|
||||
|
||||
const float h1 = Q[0][j][i] + (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
|
||||
const float hu1 = Q[1][j][i] + (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
|
||||
const float hv1 = Q[2][j][i] + (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
|
||||
|
||||
float* const h_row = (float*) ((char*) h1_ptr_ + h1_pitch_*tj);
|
||||
float* const hu_row = (float*) ((char*) hu1_ptr_ + hu1_pitch_*tj);
|
||||
float* const hv_row = (float*) ((char*) hv1_ptr_ + hv1_pitch_*tj);
|
||||
|
||||
h_row[ti] = h1;
|
||||
hu_row[ti] = hu1;
|
||||
hv_row[ti] = hv1;
|
||||
//Evolve for all cells
|
||||
const int tx = threadIdx.x;
|
||||
const int ty = threadIdx.y;
|
||||
const int i = tx + 1; //Skip local ghost cells, i.e., +1
|
||||
const int j = ty + 1;
|
||||
|
||||
Q[0][j][i] += (F[0][ty][tx] - F[0][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[0][ty][tx] - G[0][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[1][j][i] += (F[1][ty][tx] - F[1][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[1][ty][tx] - G[1][ty+1][tx ]) * dt_ / dy_;
|
||||
Q[2][j][i] += (F[2][ty][tx] - F[2][ty ][tx+1]) * dt_ / dx_
|
||||
+ (G[2][ty][tx] - G[2][ty+1][tx ]) * dt_ / dy_;
|
||||
__syncthreads();
|
||||
|
||||
//Write to main memory
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
|
||||
//Compute the CFL for this block
|
||||
if (cfl_ != NULL) {
|
||||
writeCfl<w, h, gc_x, gc_y, vars>(Q, Q[0], nx_, ny_, dx_, dy_, g_, cfl_);
|
||||
}
|
||||
}
|
||||
|
@ -24,8 +24,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
|
||||
|
||||
#include "common.cu"
|
||||
#include "fluxes/WeightedAverageFlux.cu"
|
||||
#include "common.h"
|
||||
#include "SWECommon.h"
|
||||
|
||||
|
||||
|
||||
@ -34,23 +34,15 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
__device__
|
||||
void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
float F[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dx_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
{
|
||||
int j=ty;
|
||||
const int l = j + 2; //Skip ghost cells
|
||||
for (int i=tx; i<BLOCK_WIDTH+1; i+=BLOCK_WIDTH) {
|
||||
const int k = i + 1;
|
||||
|
||||
for (int j=threadIdx.y; j<BLOCK_HEIGHT+4; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x+1; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
const float3 Ql2 = make_float3(Q[0][l][k-1], Q[1][l][k-1], Q[2][l][k-1]);
|
||||
const float3 Ql1 = make_float3(Q[0][l][k ], Q[1][l][k ], Q[2][l][k ]);
|
||||
const float3 Qr1 = make_float3(Q[0][l][k+1], Q[1][l][k+1], Q[2][l][k+1]);
|
||||
const float3 Qr2 = make_float3(Q[0][l][k+2], Q[1][l][k+2], Q[2][l][k+2]);
|
||||
const float3 Ql2 = make_float3(Q[0][j][i-1], Q[1][j][i-1], Q[2][j][i-1]);
|
||||
const float3 Ql1 = make_float3(Q[0][j][i ], Q[1][j][i ], Q[2][j][i ]);
|
||||
const float3 Qr1 = make_float3(Q[0][j][i+1], Q[1][j][i+1], Q[2][j][i+1]);
|
||||
const float3 Qr2 = make_float3(Q[0][j][i+2], Q[1][j][i+2], Q[2][j][i+2]);
|
||||
|
||||
// Computed flux
|
||||
const float3 flux = WAF_1D_flux(Ql2, Ql1, Qr1, Qr2, g_, dx_, dt_);
|
||||
@ -73,24 +65,16 @@ void computeFluxF(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
*/
|
||||
__device__
|
||||
void computeFluxG(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float G[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1],
|
||||
float G[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
const float g_, const float dy_, const float dt_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Compute fluxes along the y axis
|
||||
for (int j=ty; j<BLOCK_HEIGHT+1; j+=BLOCK_HEIGHT) {
|
||||
const int l = j + 1;
|
||||
{
|
||||
int i=tx;
|
||||
const int k = i + 2; //Skip ghost cells
|
||||
for (int j=threadIdx.y+1; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
for (int i=threadIdx.x; i<BLOCK_WIDTH+4; i+=BLOCK_WIDTH) {
|
||||
// Q at interface from the right and left
|
||||
// Note that we swap hu and hv
|
||||
const float3 Ql2 = make_float3(Q[0][l-1][k], Q[2][l-1][k], Q[1][l-1][k]);
|
||||
const float3 Ql1 = make_float3(Q[0][l ][k], Q[2][l ][k], Q[1][l ][k]);
|
||||
const float3 Qr1 = make_float3(Q[0][l+1][k], Q[2][l+1][k], Q[1][l+1][k]);
|
||||
const float3 Qr2 = make_float3(Q[0][l+2][k], Q[2][l+2][k], Q[1][l+2][k]);
|
||||
const float3 Ql2 = make_float3(Q[0][j-1][i], Q[2][j-1][i], Q[1][j-1][i]);
|
||||
const float3 Ql1 = make_float3(Q[0][j ][i], Q[2][j ][i], Q[1][j ][i]);
|
||||
const float3 Qr1 = make_float3(Q[0][j+1][i], Q[2][j+1][i], Q[1][j+1][i]);
|
||||
const float3 Qr2 = make_float3(Q[0][j+2][i], Q[2][j+2][i], Q[1][j+2][i]);
|
||||
|
||||
// Computed flux
|
||||
// Note that we swap back
|
||||
@ -119,7 +103,10 @@ extern "C" {
|
||||
__global__ void WAFKernel(
|
||||
int nx_, int ny_,
|
||||
float dx_, float dy_, float dt_,
|
||||
float g_, int step_,
|
||||
float g_,
|
||||
|
||||
int step_,
|
||||
int boundary_conditions_,
|
||||
|
||||
//Input h^n
|
||||
float* h0_ptr_, int h0_pitch_,
|
||||
@ -129,23 +116,24 @@ __global__ void WAFKernel(
|
||||
//Output h^{n+1}
|
||||
float* h1_ptr_, int h1_pitch_,
|
||||
float* hu1_ptr_, int hu1_pitch_,
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
float* hv1_ptr_, int hv1_pitch_) {
|
||||
|
||||
const unsigned int w = BLOCK_WIDTH;
|
||||
const unsigned int h = BLOCK_HEIGHT;
|
||||
const unsigned int gc_x = 2;
|
||||
const unsigned int gc_y = 2;
|
||||
const unsigned int vars = 3;
|
||||
|
||||
//Shared memory variables
|
||||
__shared__ float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4];
|
||||
__shared__ float F[3][BLOCK_HEIGHT+1][BLOCK_WIDTH+1];
|
||||
__shared__ float Q[3][h+4][w+4];
|
||||
__shared__ float F[3][h+4][w+4];
|
||||
|
||||
|
||||
|
||||
//Read into shared memory Q from global memory
|
||||
readBlock2(h0_ptr_, h0_pitch_,
|
||||
hu0_ptr_, hu0_pitch_,
|
||||
hv0_ptr_, hv0_pitch_,
|
||||
Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
//Set boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, 1>( h0_ptr_, h0_pitch_, Q[0], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, -1, 1>(hu0_ptr_, hu0_pitch_, Q[1], nx_, ny_, boundary_conditions_);
|
||||
readBlock<w, h, gc_x, gc_y, 1, -1>(hv0_ptr_, hv0_pitch_, Q[2], nx_, ny_, boundary_conditions_);
|
||||
__syncthreads();
|
||||
|
||||
|
||||
@ -155,17 +143,13 @@ __global__ void WAFKernel(
|
||||
//Compute fluxes along the x axis and evolve
|
||||
computeFluxF(Q, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF2(Q, F, nx_, ny_, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Fix boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the y axis and evolve
|
||||
computeFluxG(Q, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG2(Q, F, nx_, ny_, dy_, dt_);
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
//Step 1 => evolve y first, then x
|
||||
@ -173,27 +157,22 @@ __global__ void WAFKernel(
|
||||
//Compute fluxes along the y axis and evolve
|
||||
computeFluxG(Q, F, g_, dy_, dt_);
|
||||
__syncthreads();
|
||||
evolveG2(Q, F, nx_, ny_, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Fix boundary conditions
|
||||
noFlowBoundary2(Q, nx_, ny_);
|
||||
evolveG<w, h, gc_x, gc_y, vars>(Q, F, dy_, dt_);
|
||||
__syncthreads();
|
||||
|
||||
//Compute fluxes along the x axis and evolve
|
||||
computeFluxF(Q, F, g_, dx_, dt_);
|
||||
__syncthreads();
|
||||
evolveF2(Q, F, nx_, ny_, dx_, dt_);
|
||||
evolveF<w, h, gc_x, gc_y, vars>(Q, F, dx_, dt_);
|
||||
__syncthreads();
|
||||
}
|
||||
|
||||
|
||||
|
||||
// Write to main memory for all internal cells
|
||||
writeBlock2(h1_ptr_, h1_pitch_,
|
||||
hu1_ptr_, hu1_pitch_,
|
||||
hv1_ptr_, hv1_pitch_,
|
||||
Q, nx_, ny_);
|
||||
writeBlock<w, h, gc_x, gc_y>( h1_ptr_, h1_pitch_, Q[0], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hu1_ptr_, hu1_pitch_, Q[1], nx_, ny_, 0, 1);
|
||||
writeBlock<w, h, gc_x, gc_y>(hv1_ptr_, hv1_pitch_, Q[2], nx_, ny_, 0, 1);
|
||||
}
|
||||
|
||||
} // extern "C"
|
533
GPUSimulators/cuda/SWECommon.h
Normal file
533
GPUSimulators/cuda/SWECommon.h
Normal file
@ -0,0 +1,533 @@
|
||||
/*
|
||||
These CUDA functions implement different types of numerical flux
|
||||
functions for the shallow water equations
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF Digital
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ float3 F_func(const float3 Q, const float g) {
|
||||
float3 F;
|
||||
|
||||
F.x = Q.y; //hu
|
||||
F.y = Q.y*Q.y / Q.x + 0.5f*g*Q.x*Q.x; //hu*hu/h + 0.5f*g*h*h;
|
||||
F.z = Q.y*Q.z / Q.x; //hu*hv/h;
|
||||
|
||||
return F;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Superbee flux limiter for WAF.
|
||||
* Related to superbee limiter so that WAF_superbee(r, c) = 1 - (1-|c|)*superbee(r)
|
||||
* @param r_ the ratio of upwind change (see Toro 2001, p. 203/204)
|
||||
* @param c_ the courant number for wave k, dt*S_k/dx
|
||||
*/
|
||||
__device__ float WAF_superbee(float r_, float c_) {
|
||||
// r <= 0.0
|
||||
if (r_ <= 0.0f) {
|
||||
return 1.0f;
|
||||
}
|
||||
// 0.0 <= r <= 1/2
|
||||
else if (r_ <= 0.5f) {
|
||||
return 1.0f - 2.0f*(1.0f - fabsf(c_))*r_;
|
||||
}
|
||||
// 1/2 <= r <= 1
|
||||
else if (r_ <= 1.0f) {
|
||||
return fabs(c_);
|
||||
}
|
||||
// 1 <= r <= 2
|
||||
else if (r_ <= 2.0f) {
|
||||
return 1.0f - (1.0f - fabsf(c_))*r_;
|
||||
}
|
||||
// r >= 2
|
||||
else {
|
||||
return 2.0f*fabsf(c_) - 1.0f;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ float WAF_albada(float r_, float c_) {
|
||||
if (r_ <= 0.0f) {
|
||||
return 1.0f;
|
||||
}
|
||||
else {
|
||||
return 1.0f - (1.0f - fabsf(c_)) * r_ * (1.0f + r_) / (1.0f + r_*r_);
|
||||
}
|
||||
}
|
||||
|
||||
__device__ float WAF_minbee(float r_, float c_) {
|
||||
r_ = fmaxf(-1.0f, fminf(2.0f, r_));
|
||||
if (r_ <= 0.0f) {
|
||||
return 1.0f;
|
||||
}
|
||||
if (r_ >= 0.0f && r_ <= 1.0f) {
|
||||
return 1.0f - (1.0f - fabsf(c_)) * r_;
|
||||
}
|
||||
else {
|
||||
return fabsf(c_);
|
||||
}
|
||||
}
|
||||
|
||||
__device__ float WAF_minmod(float r_, float c_) {
|
||||
return 1.0f - (1.0f - fabsf(c_)) * fmaxf(0.0f, fminf(1.0f, r_));
|
||||
}
|
||||
|
||||
__device__ float limiterToWAFLimiter(float r_, float c_) {
|
||||
return 1.0f - (1.0f - fabsf(c_))*r_;
|
||||
}
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
__device__ __inline__ float computeHStar(float h_l, float h_r, float u_l, float u_r, float c_l, float c_r, float g_) {
|
||||
|
||||
//This estimate for the h* gives rise to spurious oscillations.
|
||||
//return 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float h_tmp = 0.5f * (c_l + c_r) + 0.25f * (u_l - u_r);
|
||||
return h_tmp*h_tmp / g_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Weighted average flux (Toro 2001, p 200) for interface {i+1/2}
|
||||
* @param r_ The flux limiter parameter (see Toro 2001, p. 203)
|
||||
* @param Q_l2 Q_{i-1}
|
||||
* @param Q_l1 Q_{i}
|
||||
* @param Q_r1 Q_{i+1}
|
||||
* @param Q_r2 Q_{i+2}
|
||||
*/
|
||||
__device__ float3 WAF_1D_flux(const float3 Q_l2, const float3 Q_l1, const float3 Q_r1, const float3 Q_r2, const float g_, const float dx_, const float dt_) {
|
||||
const float h_l = Q_l1.x;
|
||||
const float h_r = Q_r1.x;
|
||||
|
||||
const float h_l2 = Q_l2.x;
|
||||
const float h_r2 = Q_r2.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l1.y / h_l;
|
||||
const float u_r = Q_r1.y / h_r;
|
||||
|
||||
const float u_l2 = Q_l2.y / h_l2;
|
||||
const float u_r2 = Q_r2.y / h_r2;
|
||||
|
||||
const float v_l = Q_l1.z / h_l;
|
||||
const float v_r = Q_r1.z / h_r;
|
||||
|
||||
const float v_l2 = Q_l2.z / h_l2;
|
||||
const float v_r2 = Q_r2.z / h_r2;
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(g_*h_l);
|
||||
const float c_r = sqrt(g_*h_r);
|
||||
|
||||
const float c_l2 = sqrt(g_*h_l2);
|
||||
const float c_r2 = sqrt(g_*h_r2);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag_l = computeHStar(h_l2, h_l, u_l2, u_l, c_l2, c_l, g_);
|
||||
const float h_dag = computeHStar( h_l, h_r, u_l, u_r, c_l, c_r, g_);
|
||||
const float h_dag_r = computeHStar( h_r, h_r2, u_r, u_r2, c_r, c_r2, g_);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag ) ) / h_l;
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag ) ) / h_r;
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
|
||||
|
||||
const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1.0, S_star, v_l);
|
||||
const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1.0, S_star, v_r);
|
||||
|
||||
// Estimate the fluxes in the four regions
|
||||
const float3 F_1 = F_func(Q_l1, g_);
|
||||
const float3 F_4 = F_func(Q_r1, g_);
|
||||
|
||||
const float3 F_2 = F_1 + S_l*(Q_star_l - Q_l1);
|
||||
const float3 F_3 = F_4 + S_r*(Q_star_r - Q_r1);
|
||||
//const float3 F_2 = F_func(Q_star_l, g_);
|
||||
//const float3 F_3 = F_func(Q_star_r, g_);
|
||||
|
||||
// Compute the courant numbers for the waves
|
||||
const float c_1 = S_l * dt_ / dx_;
|
||||
const float c_2 = S_star * dt_ / dx_;
|
||||
const float c_3 = S_r * dt_ / dx_;
|
||||
|
||||
// Compute the "upwind change" vectors for the i-3/2 and i+3/2 interfaces
|
||||
const float eps = 1.0e-6f;
|
||||
const float r_1 = desingularize( (c_1 > 0.0f) ? (h_dag_l - h_l2) : (h_dag_r - h_r), eps) / desingularize((h_dag - h_l), eps);
|
||||
const float r_2 = desingularize( (c_2 > 0.0f) ? (v_l - v_l2) : (v_r2 - v_r), eps ) / desingularize((v_r - v_l), eps);
|
||||
const float r_3 = desingularize( (c_3 > 0.0f) ? (h_l - h_dag_l) : (h_r2 - h_dag_r), eps ) / desingularize((h_r - h_dag), eps);
|
||||
|
||||
// Compute the limiter
|
||||
// We use h for the nonlinear waves, and v for the middle shear wave
|
||||
const float A_1 = copysign(1.0f, c_1) * limiterToWAFLimiter(generalized_minmod(r_1, 1.9f), c_1);
|
||||
const float A_2 = copysign(1.0f, c_2) * limiterToWAFLimiter(generalized_minmod(r_2, 1.9f), c_2);
|
||||
const float A_3 = copysign(1.0f, c_3) * limiterToWAFLimiter(generalized_minmod(r_3, 1.9f), c_3);
|
||||
|
||||
//Average the fluxes
|
||||
const float3 flux = 0.5f*( F_1 + F_4 )
|
||||
- 0.5f*( A_1 * (F_2 - F_1)
|
||||
+ A_2 * (F_3 - F_2)
|
||||
+ A_3 * (F_4 - F_3) );
|
||||
|
||||
return flux;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Central upwind flux function
|
||||
*/
|
||||
__device__ float3 CentralUpwindFlux(const float3 Qm, float3 Qp, const float g) {
|
||||
const float3 Fp = F_func(Qp, g);
|
||||
const float up = Qp.y / Qp.x; // hu / h
|
||||
const float cp = sqrt(g*Qp.x); // sqrt(g*h)
|
||||
|
||||
const float3 Fm = F_func(Qm, g);
|
||||
const float um = Qm.y / Qm.x; // hu / h
|
||||
const float cm = sqrt(g*Qm.x); // sqrt(g*h)
|
||||
|
||||
const float am = min(min(um-cm, up-cp), 0.0f); // largest negative wave speed
|
||||
const float ap = max(max(um+cm, up+cp), 0.0f); // largest positive wave speed
|
||||
|
||||
return ((ap*Fm - am*Fp) + ap*am*(Qp-Qm))/(ap-am);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Godunovs centered scheme (Toro 2001, p 165)
|
||||
*/
|
||||
__device__ float3 GodC_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
const float3 Q_godc = 0.5f*(Q_l + Q_r) + (dt_/dx_)*(F_l - F_r);
|
||||
|
||||
return F_func(Q_godc, g_);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 180)
|
||||
*/
|
||||
__device__ float3 HLL_flux(const float3 Q_l, const float3 Q_r, const float g_) {
|
||||
const float h_l = Q_l.x;
|
||||
const float h_r = Q_r.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l.y / h_l;
|
||||
const float u_r = Q_r.y / h_r;
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(g_*h_l);
|
||||
const float c_r = sqrt(g_*h_r);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
|
||||
//Upwind selection
|
||||
if (S_l >= 0.0f) {
|
||||
return F_func(Q_l, g_);
|
||||
}
|
||||
else if (S_r <= 0.0f) {
|
||||
return F_func(Q_r, g_);
|
||||
}
|
||||
//Or estimate flux in the star region
|
||||
else {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
const float3 flux = (S_r*F_l - S_l*F_r + S_r*S_l*(Q_r - Q_l)) / (S_r-S_l);
|
||||
return flux;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 181)
|
||||
*/
|
||||
__device__ float3 HLLC_flux(const float3 Q_l, const float3 Q_r, const float g_) {
|
||||
const float h_l = Q_l.x;
|
||||
const float h_r = Q_r.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l.y / h_l;
|
||||
const float u_r = Q_r.y / h_r;
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(g_*h_l);
|
||||
const float c_r = sqrt(g_*h_r);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
|
||||
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
//Upwind selection
|
||||
if (S_l >= 0.0f) {
|
||||
return F_l;
|
||||
}
|
||||
else if (S_r <= 0.0f) {
|
||||
return F_r;
|
||||
}
|
||||
//Or estimate flux in the "left star" region
|
||||
else if (S_l <= 0.0f && 0.0f <=S_star) {
|
||||
const float v_l = Q_l.z / h_l;
|
||||
const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1, S_star, v_l);
|
||||
const float3 flux = F_l + S_l*(Q_star_l - Q_l);
|
||||
return flux;
|
||||
}
|
||||
//Or estimate flux in the "righ star" region
|
||||
else if (S_star <= 0.0f && 0.0f <=S_r) {
|
||||
const float v_r = Q_r.z / h_r;
|
||||
const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1, S_star, v_r);
|
||||
const float3 flux = F_r + S_r*(Q_star_r - Q_r);
|
||||
return flux;
|
||||
}
|
||||
else {
|
||||
return make_float3(-99999.9f, -99999.9f, -99999.9f); //Something wrong here
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Lax-Friedrichs flux (Toro 2001, p 163)
|
||||
*/
|
||||
__device__ float3 LxF_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
return 0.5f*(F_l + F_r) + (dx_/(2.0f*dt_))*(Q_l - Q_r);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Lax-Friedrichs extended to 2D
|
||||
*/
|
||||
__device__ float3 LxF_2D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
//Note numerical diffusion for 2D here (0.25)
|
||||
return 0.5f*(F_l + F_r) + (dx_/(4.0f*dt_))*(Q_l - Q_r);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Richtmeyer / Two-step Lax-Wendroff flux (Toro 2001, p 164)
|
||||
*/
|
||||
__device__ float3 LxW2_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
const float3 Q_lw2 = 0.5f*(Q_l + Q_r) + (dt_/(2.0f*dx_))*(F_l - F_r);
|
||||
|
||||
return F_func(Q_lw2, g_);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* First Ordered Centered (Toro 2001, p.163)
|
||||
*/
|
||||
__device__ float3 FORCE_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_lf = LxF_1D_flux(Q_l, Q_r, g_, dx_, dt_);
|
||||
const float3 F_lw2 = LxW2_1D_flux(Q_l, Q_r, g_, dx_, dt_);
|
||||
return 0.5f*(F_lf + F_lw2);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void writeCfl(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float shmem[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
const float dx_, const float dy_, const float g_,
|
||||
float* output_) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x + gc_x;
|
||||
const int ty = threadIdx.y + gc_y;
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = blockDim.x*blockIdx.x + tx;
|
||||
const int tj = blockDim.y*blockIdx.y + ty;
|
||||
|
||||
//Only internal cells
|
||||
if (ti < nx_+gc_x && tj < ny_+gc_y) {
|
||||
const float h = Q[0][ty][tx];
|
||||
const float u = Q[1][ty][tx] / h;
|
||||
const float v = Q[2][ty][tx] / h;
|
||||
|
||||
const float max_u = dx_ / (fabsf(u) + sqrtf(g_*h));
|
||||
const float max_v = dy_ / (fabsf(v) + sqrtf(g_*h));
|
||||
|
||||
shmem[ty][tx] = fminf(max_u, max_v);
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//One row of threads loop over all rows
|
||||
if (ti < nx_+gc_x && tj < ny_+gc_y) {
|
||||
if (ty == gc_y) {
|
||||
float min_val = shmem[ty][tx];
|
||||
const int max_y = min(h, ny_+gc_y - tj);
|
||||
for (int j=gc_y; j<max_y+gc_y; j++) {
|
||||
min_val = fminf(min_val, shmem[j][tx]);
|
||||
}
|
||||
shmem[ty][tx] = min_val;
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//One thread loops over first row to find global max
|
||||
if (tx == gc_x && ty == gc_y) {
|
||||
float min_val = shmem[ty][tx];
|
||||
const int max_x = min(w, nx_+gc_x - ti);
|
||||
for (int i=gc_x; i<max_x+gc_x; ++i) {
|
||||
min_val = fminf(min_val, shmem[ty][i]);
|
||||
}
|
||||
|
||||
const int idx = gridDim.x*blockIdx.y + blockIdx.x;
|
||||
output_[idx] = min_val;
|
||||
}
|
||||
}
|
||||
|
546
GPUSimulators/cuda/common.h
Normal file
546
GPUSimulators/cuda/common.h
Normal file
@ -0,0 +1,546 @@
|
||||
/*
|
||||
This OpenCL kernel implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
|
||||
|
||||
/**
|
||||
* Float3 operators
|
||||
*/
|
||||
inline __device__ float3 operator*(const float a, const float3 b) {
|
||||
return make_float3(a*b.x, a*b.y, a*b.z);
|
||||
}
|
||||
|
||||
inline __device__ float3 operator/(const float3 a, const float b) {
|
||||
return make_float3(a.x/b, a.y/b, a.z/b);
|
||||
}
|
||||
|
||||
inline __device__ float3 operator-(const float3 a, const float3 b) {
|
||||
return make_float3(a.x-b.x, a.y-b.y, a.z-b.z);
|
||||
}
|
||||
|
||||
inline __device__ float3 operator+(const float3 a, const float3 b) {
|
||||
return make_float3(a.x+b.x, a.y+b.y, a.z+b.z);
|
||||
}
|
||||
|
||||
/**
|
||||
* Float4 operators
|
||||
*/
|
||||
inline __device__ float4 operator*(const float a, const float4 b) {
|
||||
return make_float4(a*b.x, a*b.y, a*b.z, a*b.w);
|
||||
}
|
||||
|
||||
inline __device__ float4 operator/(const float4 a, const float b) {
|
||||
return make_float4(a.x/b, a.y/b, a.z/b, a.w/b);
|
||||
}
|
||||
|
||||
inline __device__ float4 operator-(const float4 a, const float4 b) {
|
||||
return make_float4(a.x-b.x, a.y-b.y, a.z-b.z, a.w-b.w);
|
||||
}
|
||||
|
||||
inline __device__ float4 operator+(const float4 a, const float4 b) {
|
||||
return make_float4(a.x+b.x, a.y+b.y, a.z+b.z, a.w+b.w);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
inline __device__ __host__ float clamp(const float f, const float a, const float b) {
|
||||
return fmaxf(a, fminf(f, b));
|
||||
}
|
||||
|
||||
inline __device__ __host__ int clamp(const int f, const int a, const int b) {
|
||||
return (f < b) ? ( (f > a) ? f : a) : b;
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ float desingularize(float x_, float eps_) {
|
||||
return copysign(1.0f, x_)*fmaxf(fabsf(x_), fminf(x_*x_/(2.0f*eps_)+0.5f*eps_, eps_));
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns the step stored in the leftmost 16 bits
|
||||
* of the 32 bit step-order integer
|
||||
*/
|
||||
inline __device__ int getStep(int step_order_) {
|
||||
return step_order_ >> 16;
|
||||
}
|
||||
|
||||
/**
|
||||
* Returns the order stored in the rightmost 16 bits
|
||||
* of the 32 bit step-order integer
|
||||
*/
|
||||
inline __device__ int getOrder(int step_order_) {
|
||||
return step_order_ & 0x0000FFFF;
|
||||
}
|
||||
|
||||
|
||||
enum BoundaryCondition {
|
||||
Dirichlet = 0,
|
||||
Neumann = 1,
|
||||
Periodic = 2,
|
||||
Reflective = 3
|
||||
};
|
||||
|
||||
inline __device__ BoundaryCondition getBCNorth(int bc_) {
|
||||
return static_cast<BoundaryCondition>((bc_ >> 24) & 0x0000000F);
|
||||
}
|
||||
|
||||
inline __device__ BoundaryCondition getBCSouth(int bc_) {
|
||||
return static_cast<BoundaryCondition>((bc_ >> 16) & 0x0000000F);
|
||||
}
|
||||
|
||||
inline __device__ BoundaryCondition getBCEast(int bc_) {
|
||||
return static_cast<BoundaryCondition>((bc_ >> 8) & 0x0000000F);
|
||||
}
|
||||
|
||||
inline __device__ BoundaryCondition getBCWest(int bc_) {
|
||||
return static_cast<BoundaryCondition>((bc_ >> 0) & 0x0000000F);
|
||||
}
|
||||
|
||||
|
||||
// West boundary
|
||||
template<int w, int h, int gc_x, int gc_y, int sign>
|
||||
__device__ void bcWestReflective(float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int ti = blockDim.x*blockIdx.x + i;
|
||||
|
||||
if (gc_x >= 1 && ti == gc_x) {
|
||||
Q[j][i-1] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 2 && ti == gc_x + 1) {
|
||||
Q[j][i-3] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 3 && ti == gc_x + 2) {
|
||||
Q[j][i-5] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 4 && ti == gc_x + 3) {
|
||||
Q[j][i-7] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 5 && ti == gc_x + 4) {
|
||||
Q[j][i-9] = sign*Q[j][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// East boundary
|
||||
template<int w, int h, int gc_x, int gc_y, int sign>
|
||||
__device__ void bcEastReflective(float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
const int i = threadIdx.x + gc_x;
|
||||
const int ti = blockDim.x*blockIdx.x + i;
|
||||
|
||||
if (gc_x >= 1 && ti == nx_ + gc_x - 1) {
|
||||
Q[j][i+1] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 2 && ti == nx_ + gc_x - 2) {
|
||||
Q[j][i+3] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 3 && ti == nx_ + gc_x - 3) {
|
||||
Q[j][i+5] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 4 && ti == nx_ + gc_x - 4) {
|
||||
Q[j][i+7] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_x >= 5 && ti == nx_ + gc_x - 5) {
|
||||
Q[j][i+9] = sign*Q[j][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
// South boundary
|
||||
template<int w, int h, int gc_x, int gc_y, int sign>
|
||||
__device__ void bcSouthReflective(float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const int tj = blockDim.y*blockIdx.y + j;
|
||||
|
||||
if (gc_y >= 1 && tj == gc_y) {
|
||||
Q[j-1][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 2 && tj == gc_y + 1) {
|
||||
Q[j-3][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 3 && tj == gc_y + 2) {
|
||||
Q[j-5][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 4 && tj == gc_y + 3) {
|
||||
Q[j-7][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 5 && tj == gc_y + 4) {
|
||||
Q[j-9][i] = sign*Q[j][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
// North boundary
|
||||
template<int w, int h, int gc_x, int gc_y, int sign>
|
||||
__device__ void bcNorthReflective(float Q[h+2*gc_y][w+2*gc_x], const int nx_, const int ny_) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
const int j = threadIdx.y + gc_y;
|
||||
const int tj = blockDim.y*blockIdx.y + j;
|
||||
|
||||
if (gc_y >= 1 && tj == ny_ + gc_y - 1) {
|
||||
Q[j+1][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 2 && tj == ny_ + gc_y - 2) {
|
||||
Q[j+3][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 3 && tj == ny_ + gc_y - 3) {
|
||||
Q[j+5][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 4 && tj == ny_ + gc_y - 4) {
|
||||
Q[j+7][i] = sign*Q[j][i];
|
||||
}
|
||||
if (gc_y >= 5 && tj == ny_ + gc_y - 5) {
|
||||
Q[j+9][i] = sign*Q[j][i];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Alter the index l so that it gives periodic boundary conditions when reading
|
||||
*/
|
||||
template<int gc_x>
|
||||
inline __device__ int handlePeriodicBoundaryX(int k, int nx_, int boundary_conditions_) {
|
||||
const int gc_pad = gc_x;
|
||||
|
||||
//West boundary: add an offset to read from east of domain
|
||||
if (gc_x > 0) {
|
||||
if ((k < gc_pad)
|
||||
&& getBCWest(boundary_conditions_) == Periodic) {
|
||||
k += (nx_+2*gc_x - 2*gc_pad);
|
||||
}
|
||||
//East boundary: subtract an offset to read from west of domain
|
||||
else if ((k >= nx_+2*gc_x-gc_pad)
|
||||
&& getBCEast(boundary_conditions_) == Periodic) {
|
||||
k -= (nx_+2*gc_x - 2*gc_pad);
|
||||
}
|
||||
}
|
||||
|
||||
return k;
|
||||
}
|
||||
|
||||
/**
|
||||
* Alter the index l so that it gives periodic boundary conditions when reading
|
||||
*/
|
||||
template<int gc_y>
|
||||
inline __device__ int handlePeriodicBoundaryY(int l, int ny_, int boundary_conditions_) {
|
||||
const int gc_pad = gc_y;
|
||||
|
||||
//South boundary: add an offset to read from north of domain
|
||||
if (gc_y > 0) {
|
||||
if ((l < gc_pad)
|
||||
&& getBCSouth(boundary_conditions_) == Periodic) {
|
||||
l += (ny_+2*gc_y - 2*gc_pad);
|
||||
}
|
||||
//North boundary: subtract an offset to read from south of domain
|
||||
else if ((l >= ny_+2*gc_y-gc_pad)
|
||||
&& getBCNorth(boundary_conditions_) == Periodic) {
|
||||
l -= (ny_+2*gc_y - 2*gc_pad);
|
||||
}
|
||||
}
|
||||
|
||||
return l;
|
||||
}
|
||||
|
||||
|
||||
template<int w, int h, int gc_x, int gc_y, int sign_x, int sign_y>
|
||||
inline __device__
|
||||
void handleReflectiveBoundary(
|
||||
float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
const int boundary_conditions_) {
|
||||
|
||||
//Handle reflective boundary conditions
|
||||
if (getBCNorth(boundary_conditions_) == Reflective) {
|
||||
bcNorthReflective<w, h, gc_x, gc_y, sign_y>(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
}
|
||||
if (getBCSouth(boundary_conditions_) == Reflective) {
|
||||
bcSouthReflective<w, h, gc_x, gc_y, sign_y>(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
}
|
||||
if (getBCEast(boundary_conditions_) == Reflective) {
|
||||
bcEastReflective<w, h, gc_x, gc_y, sign_x>(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
}
|
||||
if (getBCWest(boundary_conditions_) == Reflective) {
|
||||
bcWestReflective<w, h, gc_x, gc_y, sign_x>(Q, nx_, ny_);
|
||||
__syncthreads();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Reads a block of data with ghost cells
|
||||
*/
|
||||
template<int w, int h, int gc_x, int gc_y, int sign_x, int sign_y>
|
||||
inline __device__ void readBlock(float* ptr_, int pitch_,
|
||||
float Q[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
const int boundary_conditions_) {
|
||||
//Index of block within domain
|
||||
const int bx = blockDim.x * blockIdx.x;
|
||||
const int by = blockDim.y * blockIdx.y;
|
||||
|
||||
//Read into shared memory
|
||||
//Loop over all variables
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
//Handle periodic boundary conditions here
|
||||
int l = handlePeriodicBoundaryY<gc_y>(by + j, ny_, boundary_conditions_);
|
||||
l = min(l, ny_+2*gc_y-1);
|
||||
float* row = (float*) ((char*) ptr_ + pitch_*l);
|
||||
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
//Handle periodic boundary conditions here
|
||||
int k = handlePeriodicBoundaryX<gc_x>(bx + i, nx_, boundary_conditions_);
|
||||
k = min(k, nx_+2*gc_x-1);
|
||||
|
||||
//Read from global memory
|
||||
Q[j][i] = row[k];
|
||||
}
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
handleReflectiveBoundary<w, h, gc_x, gc_y, sign_x, sign_y>(Q, nx_, ny_, boundary_conditions_);
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Writes a block of data to global memory for the shallow water equations.
|
||||
*/
|
||||
template<int w, int h, int gc_x, int gc_y>
|
||||
inline __device__ void writeBlock(float* ptr_, int pitch_,
|
||||
float shmem[h+2*gc_y][w+2*gc_x],
|
||||
const int nx_, const int ny_,
|
||||
int rk_step_, int rk_order_) {
|
||||
|
||||
//Index of cell within domain
|
||||
const int ti = blockDim.x*blockIdx.x + threadIdx.x + gc_x;
|
||||
const int tj = blockDim.y*blockIdx.y + threadIdx.y + gc_y;
|
||||
|
||||
//Only write internal cells
|
||||
if (ti < nx_+gc_x && tj < ny_+gc_y) {
|
||||
//Index of thread within block
|
||||
const int tx = threadIdx.x + gc_x;
|
||||
const int ty = threadIdx.y + gc_y;
|
||||
|
||||
float* const row = (float*) ((char*) ptr_ + pitch_*tj);
|
||||
|
||||
//Handle runge-kutta timestepping here
|
||||
row[ti] = shmem[ty][tx];
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* SSPRK1 (forward Euler)
|
||||
* u^1 = u^n + dt*f(u^n)
|
||||
*/
|
||||
if (rk_order_ == 1) {
|
||||
row[ti] = shmem[ty][tx];
|
||||
}
|
||||
/**
|
||||
* SSPRK2
|
||||
* u^1 = u^n + dt*f(u^n)
|
||||
* u^n+1 = 1/2*u^n + 1/2*(u^1 + dt*f(u^1))
|
||||
*/
|
||||
else if (rk_order_ == 2) {
|
||||
if (rk_step_ == 0) {
|
||||
row[ti] = shmem[ty][tx];
|
||||
}
|
||||
else if (rk_step_ == 1) {
|
||||
row[ti] = 0.5f*row[ti] + 0.5f*shmem[ty][tx];
|
||||
}
|
||||
}
|
||||
/**
|
||||
* SSPRK3
|
||||
* u^1 = u^n + dt*f(u^n)
|
||||
* u^2 = 3/4 * u^n + 1/4 * (u^1 + dt*f(u^1))
|
||||
* u^n+1 = 1/3 * u^n + 2/3 * (u^2 + dt*f(u^2))
|
||||
* FIXME: This is not correct now, need a temporary to hold intermediate step u^2
|
||||
*/
|
||||
else if (rk_order_ == 3) {
|
||||
if (rk_step_ == 0) {
|
||||
row[ti] = shmem[ty][tx];
|
||||
}
|
||||
else if (rk_step_ == 1) {
|
||||
row[ti] = 0.75f*row[ti] + 0.25f*shmem[ty][tx];
|
||||
}
|
||||
else if (rk_step_ == 2) {
|
||||
const float t = 1.0f / 3.0f; //Not representable in base 2
|
||||
row[ti] = t*row[ti] + (1.0f-t)*shmem[ty][tx];
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void evolveF(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float F[vars][h+2*gc_y][w+2*gc_x],
|
||||
const float dx_, const float dt_) {
|
||||
for (int var=0; var < vars; ++var) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
for (int i=threadIdx.x+gc_x; i<w+gc_x; i+=w) {
|
||||
Q[var][j][i] = Q[var][j][i] + (F[var][j][i-1] - F[var][j][i]) * dt_ / dx_;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Evolves the solution in time along the y axis (dimensional splitting)
|
||||
*/
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void evolveG(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float G[vars][h+2*gc_y][w+2*gc_x],
|
||||
const float dy_, const float dt_) {
|
||||
for (int var=0; var < vars; ++var) {
|
||||
for (int j=threadIdx.y+gc_y; j<h+gc_y; j+=h) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
Q[var][j][i] = Q[var][j][i] + (G[var][j-1][i] - G[var][j][i]) * dt_ / dy_;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Helper function for debugging etc.
|
||||
*/
|
||||
template<int shmem_width, int shmem_height, int vars>
|
||||
__device__ void memset(float Q[vars][shmem_height][shmem_width], float value) {
|
||||
for (int k=0; k<vars; ++k) {
|
||||
for (int j=threadIdx.y; j<shmem_height; ++j) {
|
||||
for (int i=threadIdx.x; i<shmem_width; ++i) {
|
||||
Q[k][j][i] = value;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
template <unsigned int threads>
|
||||
__device__ void reduce_max(float* data, unsigned int n) {
|
||||
__shared__ float sdata[threads];
|
||||
unsigned int tid = threadIdx.x;
|
||||
|
||||
//Reduce to "threads" elements
|
||||
sdata[tid] = FLT_MIN;
|
||||
for (unsigned int i=tid; i<n; i += threads) {
|
||||
sdata[tid] = max(sdata[tid], dt_ctx.L[i]);
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
//Now, reduce all elements into a single element
|
||||
if (threads >= 512) {
|
||||
if (tid < 256) {
|
||||
sdata[tid] = max(sdata[tid], sdata[tid + 256]);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (threads >= 256) {
|
||||
if (tid < 128) {
|
||||
sdata[tid] = max(sdata[tid], sdata[tid + 128]);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (threads >= 128) {
|
||||
if (tid < 64) {
|
||||
sdata[tid] = max(sdata[tid], sdata[tid + 64]);
|
||||
}
|
||||
__syncthreads();
|
||||
}
|
||||
if (tid < 32) {
|
||||
volatile float* sdata_volatile = sdata;
|
||||
if (threads >= 64) {
|
||||
sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 32]);
|
||||
}
|
||||
if (tid < 16) {
|
||||
if (threads >= 32) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 16]);
|
||||
if (threads >= 16) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 8]);
|
||||
if (threads >= 8) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 4]);
|
||||
if (threads >= 4) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 2]);
|
||||
if (threads >= 2) sdata_volatile[tid] = max(sdata_volatile[tid], sdata_volatile[tid + 1]);
|
||||
}
|
||||
|
||||
if (tid == 0) {
|
||||
return sdata_volatile[0];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
@ -17,7 +17,7 @@ You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
#pragma once
|
||||
|
||||
|
||||
|
||||
@ -46,21 +46,15 @@ __device__ __inline__ float minmodSlope(float left, float center, float right, f
|
||||
/**
|
||||
* Reconstructs a minmod slope for a whole block along the abscissa
|
||||
*/
|
||||
__device__ void minmodSlopeX(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void minmodSlopeX(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float Qx[vars][h+2*gc_y][w+2*gc_x],
|
||||
const float theta_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
//Reconstruct slopes along x axis
|
||||
{
|
||||
const int j = ty;
|
||||
const int l = j + 2; //Skip ghost cells
|
||||
for (int i=tx; i<BLOCK_WIDTH+2; i+=BLOCK_WIDTH) {
|
||||
const int k = i + 1;
|
||||
for (int p=0; p<3; ++p) {
|
||||
Qx[p][j][i] = minmodSlope(Q[p][l][k-1], Q[p][l][k], Q[p][l][k+1], theta_);
|
||||
for (int p=0; p<vars; ++p) {
|
||||
for (int j=threadIdx.y; j<h+2*gc_y; j+=h) {
|
||||
for (int i=threadIdx.x+1; i<w+2*gc_x-1; i+=w) {
|
||||
Qx[p][j][i] = minmodSlope(Q[p][j][i-1], Q[p][j][i], Q[p][j][i+1], theta_);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -70,20 +64,15 @@ __device__ void minmodSlopeX(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
/**
|
||||
* Reconstructs a minmod slope for a whole block along the ordinate
|
||||
*/
|
||||
__device__ void minmodSlopeY(float Q[3][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qy[3][BLOCK_HEIGHT+2][BLOCK_WIDTH+2],
|
||||
template<int w, int h, int gc_x, int gc_y, int vars>
|
||||
__device__ void minmodSlopeY(float Q[vars][h+2*gc_y][w+2*gc_x],
|
||||
float Qy[vars][h+2*gc_y][w+2*gc_x],
|
||||
const float theta_) {
|
||||
//Index of thread within block
|
||||
const int tx = get_local_id(0);
|
||||
const int ty = get_local_id(1);
|
||||
|
||||
for (int j=ty; j<BLOCK_HEIGHT+2; j+=BLOCK_HEIGHT) {
|
||||
const int l = j + 1;
|
||||
{
|
||||
const int i = tx;
|
||||
const int k = i + 2; //Skip ghost cells
|
||||
for (int p=0; p<3; ++p) {
|
||||
Qy[p][j][i] = minmodSlope(Q[p][l-1][k], Q[p][l][k], Q[p][l+1][k], theta_);
|
||||
//Reconstruct slopes along y axis
|
||||
for (int p=0; p<vars; ++p) {
|
||||
for (int j=threadIdx.y+1; j<h+2*gc_y-1; j+=h) {
|
||||
for (int i=threadIdx.x; i<w+2*gc_x; i+=w) {
|
||||
Qy[p][j][i] = minmodSlope(Q[p][j-1][i], Q[p][j][i], Q[p][j+1][i], theta_);
|
||||
}
|
||||
}
|
||||
}
|
@ -1,39 +0,0 @@
|
||||
/*
|
||||
This file implements the Central upwind flux
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Central upwind flux function
|
||||
*/
|
||||
__device__ float3 CentralUpwindFlux(const float3 Qm, float3 Qp, const float g) {
|
||||
const float3 Fp = F_func(Qp, g);
|
||||
const float up = Qp.y / Qp.x; // hu / h
|
||||
const float cp = sqrt(g*Qp.x); // sqrt(g*h)
|
||||
|
||||
const float3 Fm = F_func(Qm, g);
|
||||
const float um = Qm.y / Qm.x; // hu / h
|
||||
const float cm = sqrt(g*Qm.x); // sqrt(g*h)
|
||||
|
||||
const float am = min(min(um-cm, up-cp), 0.0f); // largest negative wave speed
|
||||
const float ap = max(max(um+cm, up+cp), 0.0f); // largest positive wave speed
|
||||
|
||||
return ((ap*Fm - am*Fp) + ap*am*(Qp-Qm))/(ap-am);
|
||||
}
|
||||
|
@ -1,33 +0,0 @@
|
||||
/*
|
||||
This file implements the First ORder CEntered flux
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "./LaxFriedrichs.cu"
|
||||
#include "./LaxWendroff.cu"
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* First Ordered Centered (Toro 2001, p.163)
|
||||
*/
|
||||
__device__ float3 FORCE_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_lf = LxF_1D_flux(Q_l, Q_r, g_, dx_, dt_);
|
||||
const float3 F_lw2 = LxW2_1D_flux(Q_l, Q_r, g_, dx_, dt_);
|
||||
return 0.5f*(F_lf + F_lw2);
|
||||
}
|
@ -1,36 +0,0 @@
|
||||
/*
|
||||
This file implements the Godunov flux
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Godunovs centered scheme (Toro 2001, p 165)
|
||||
*/
|
||||
__device__ float3 GodC_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
const float3 Q_godc = 0.5f*(Q_l + Q_r) + (dt_/dx_)*(F_l - F_r);
|
||||
|
||||
return F_func(Q_godc, g_);
|
||||
}
|
||||
|
||||
|
||||
|
@ -1,63 +0,0 @@
|
||||
/*
|
||||
This file implements the Harten-Lax-van Leer flux
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
/**
|
||||
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 180)
|
||||
*/
|
||||
__device__ float3 HLL_flux(const float3 Q_l, const float3 Q_r, const float g_) {
|
||||
const float h_l = Q_l.x;
|
||||
const float h_r = Q_r.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l.y / h_l;
|
||||
const float u_r = Q_r.y / h_r;
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(g_*h_l);
|
||||
const float c_r = sqrt(g_*h_r);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
|
||||
//Upwind selection
|
||||
if (S_l >= 0.0f) {
|
||||
return F_func(Q_l, g_);
|
||||
}
|
||||
else if (S_r <= 0.0f) {
|
||||
return F_func(Q_r, g_);
|
||||
}
|
||||
//Or estimate flux in the star region
|
||||
else {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
const float3 flux = (S_r*F_l - S_l*F_r + S_r*S_l*(Q_r - Q_l)) / (S_r-S_l);
|
||||
return flux;
|
||||
}
|
||||
}
|
@ -1,80 +0,0 @@
|
||||
/*
|
||||
This file implements the Harten-Lax-van Leer flux with contact discontinuity
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Harten-Lax-van Leer with contact discontinuity (Toro 2001, p 181)
|
||||
*/
|
||||
__device__ float3 HLLC_flux(const float3 Q_l, const float3 Q_r, const float g_) {
|
||||
const float h_l = Q_l.x;
|
||||
const float h_r = Q_r.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l.y / h_l;
|
||||
const float u_r = Q_r.y / h_r;
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(g_*h_l);
|
||||
const float c_r = sqrt(g_*h_r);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag = 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag / (h_l*h_l) ) );
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag / (h_r*h_r) ) );
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
|
||||
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
//Upwind selection
|
||||
if (S_l >= 0.0f) {
|
||||
return F_l;
|
||||
}
|
||||
else if (S_r <= 0.0f) {
|
||||
return F_r;
|
||||
}
|
||||
//Or estimate flux in the "left star" region
|
||||
else if (S_l <= 0.0f && 0.0f <=S_star) {
|
||||
const float v_l = Q_l.z / h_l;
|
||||
const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1, S_star, v_l);
|
||||
const float3 flux = F_l + S_l*(Q_star_l - Q_l);
|
||||
return flux;
|
||||
}
|
||||
//Or estimate flux in the "righ star" region
|
||||
else if (S_star <= 0.0f && 0.0f <=S_r) {
|
||||
const float v_r = Q_r.z / h_r;
|
||||
const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1, S_star, v_r);
|
||||
const float3 flux = F_r + S_r*(Q_star_r - Q_r);
|
||||
return flux;
|
||||
}
|
||||
else {
|
||||
return make_float3(-99999.9f, -99999.9f, -99999.9f); //Something wrong here
|
||||
}
|
||||
}
|
||||
|
@ -1,24 +0,0 @@
|
||||
|
||||
|
||||
/**
|
||||
* Lax-Friedrichs flux (Toro 2001, p 163)
|
||||
*/
|
||||
__device__ float3 LxF_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
return 0.5f*(F_l + F_r) + (dx_/(2.0f*dt_))*(Q_l - Q_r);
|
||||
}
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Lax-Friedrichs extended to 2D
|
||||
*/
|
||||
__device__ float3 LxF_2D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
//Note numerical diffusion for 2D here (0.25)
|
||||
return 0.5f*(F_l + F_r) + (dx_/(4.0f*dt_))*(Q_l - Q_r);
|
||||
}
|
@ -1,33 +0,0 @@
|
||||
/*
|
||||
This file implements the Lax-Wendroff flux
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Richtmeyer / Two-step Lax-Wendroff flux (Toro 2001, p 164)
|
||||
*/
|
||||
__device__ float3 LxW2_1D_flux(const float3 Q_l, const float3 Q_r, const float g_, const float dx_, const float dt_) {
|
||||
const float3 F_l = F_func(Q_l, g_);
|
||||
const float3 F_r = F_func(Q_r, g_);
|
||||
|
||||
const float3 Q_lw2 = 0.5f*(Q_l + Q_r) + (dt_/(2.0f*dx_))*(F_l - F_r);
|
||||
|
||||
return F_func(Q_lw2, g_);
|
||||
}
|
||||
|
@ -1,187 +0,0 @@
|
||||
/*
|
||||
This file implements the Weighted Average Flux
|
||||
|
||||
Copyright (C) 2016, 2017, 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
*/
|
||||
|
||||
#include "limiters.cu"
|
||||
|
||||
/**
|
||||
* Superbee flux limiter for WAF.
|
||||
* Related to superbee limiter so that WAF_superbee(r, c) = 1 - (1-|c|)*superbee(r)
|
||||
* @param r_ the ratio of upwind change (see Toro 2001, p. 203/204)
|
||||
* @param c_ the courant number for wave k, dt*S_k/dx
|
||||
*/
|
||||
__device__ float WAF_superbee(float r_, float c_) {
|
||||
// r <= 0.0
|
||||
if (r_ <= 0.0f) {
|
||||
return 1.0f;
|
||||
}
|
||||
// 0.0 <= r <= 1/2
|
||||
else if (r_ <= 0.5f) {
|
||||
return 1.0f - 2.0f*(1.0f - fabsf(c_))*r_;
|
||||
}
|
||||
// 1/2 <= r <= 1
|
||||
else if (r_ <= 1.0f) {
|
||||
return fabs(c_);
|
||||
}
|
||||
// 1 <= r <= 2
|
||||
else if (r_ <= 2.0f) {
|
||||
return 1.0f - (1.0f - fabsf(c_))*r_;
|
||||
}
|
||||
// r >= 2
|
||||
else {
|
||||
return 2.0f*fabsf(c_) - 1.0f;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
__device__ float WAF_albada(float r_, float c_) {
|
||||
if (r_ <= 0.0f) {
|
||||
return 1.0f;
|
||||
}
|
||||
else {
|
||||
return 1.0f - (1.0f - fabsf(c_)) * r_ * (1.0f + r_) / (1.0f + r_*r_);
|
||||
}
|
||||
}
|
||||
|
||||
__device__ float WAF_minbee(float r_, float c_) {
|
||||
r_ = fmaxf(-1.0f, fminf(2.0f, r_));
|
||||
if (r_ <= 0.0f) {
|
||||
return 1.0f;
|
||||
}
|
||||
if (r_ >= 0.0f && r_ <= 1.0f) {
|
||||
return 1.0f - (1.0f - fabsf(c_)) * r_;
|
||||
}
|
||||
else {
|
||||
return fabsf(c_);
|
||||
}
|
||||
}
|
||||
|
||||
__device__ float WAF_minmod(float r_, float c_) {
|
||||
return 1.0f - (1.0f - fabsf(c_)) * fmaxf(0.0f, fminf(1.0f, r_));
|
||||
}
|
||||
|
||||
|
||||
|
||||
__device__ float limiterToWAFLimiter(float r_, float c_) {
|
||||
return 1.0f - (1.0f - fabsf(c_))*r_;
|
||||
}
|
||||
|
||||
__device__ float desingularize(float x_, float eps_) {
|
||||
return copysign(1.0f, x_)*fmaxf(fabsf(x_), fminf(x_*x_/(2.0f*eps_)+0.5f*eps_, eps_));
|
||||
}
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
__device__ __inline__ float computeHStar(float h_l, float h_r, float u_l, float u_r, float c_l, float c_r, float g_) {
|
||||
|
||||
//This estimate for the h* gives rise to spurious oscillations.
|
||||
//return 0.5f * (h_l+h_r) - 0.25f * (u_r-u_l)*(h_l+h_r)/(c_l+c_r);
|
||||
|
||||
const float h_tmp = 0.5f * (c_l + c_r) + 0.25f * (u_l - u_r);
|
||||
return h_tmp*h_tmp / g_;
|
||||
}
|
||||
|
||||
/**
|
||||
* Weighted average flux (Toro 2001, p 200) for interface {i+1/2}
|
||||
* @param r_ The flux limiter parameter (see Toro 2001, p. 203)
|
||||
* @param Q_l2 Q_{i-1}
|
||||
* @param Q_l1 Q_{i}
|
||||
* @param Q_r1 Q_{i+1}
|
||||
* @param Q_r2 Q_{i+2}
|
||||
*/
|
||||
__device__ float3 WAF_1D_flux(const float3 Q_l2, const float3 Q_l1, const float3 Q_r1, const float3 Q_r2, const float g_, const float dx_, const float dt_) {
|
||||
const float h_l = Q_l1.x;
|
||||
const float h_r = Q_r1.x;
|
||||
|
||||
const float h_l2 = Q_l2.x;
|
||||
const float h_r2 = Q_r2.x;
|
||||
|
||||
// Calculate velocities
|
||||
const float u_l = Q_l1.y / h_l;
|
||||
const float u_r = Q_r1.y / h_r;
|
||||
|
||||
const float u_l2 = Q_l2.y / h_l2;
|
||||
const float u_r2 = Q_r2.y / h_r2;
|
||||
|
||||
const float v_l = Q_l1.z / h_l;
|
||||
const float v_r = Q_r1.z / h_r;
|
||||
|
||||
const float v_l2 = Q_l2.z / h_l2;
|
||||
const float v_r2 = Q_r2.z / h_r2;
|
||||
|
||||
// Estimate the potential wave speeds
|
||||
const float c_l = sqrt(g_*h_l);
|
||||
const float c_r = sqrt(g_*h_r);
|
||||
|
||||
const float c_l2 = sqrt(g_*h_l2);
|
||||
const float c_r2 = sqrt(g_*h_r2);
|
||||
|
||||
// Compute h in the "star region", h^dagger
|
||||
const float h_dag_l = computeHStar(h_l2, h_l, u_l2, u_l, c_l2, c_l, g_);
|
||||
const float h_dag = computeHStar( h_l, h_r, u_l, u_r, c_l, c_r, g_);
|
||||
const float h_dag_r = computeHStar( h_r, h_r2, u_r, u_r2, c_r, c_r2, g_);
|
||||
|
||||
const float q_l_tmp = sqrt(0.5f * ( (h_dag+h_l)*h_dag ) ) / h_l;
|
||||
const float q_r_tmp = sqrt(0.5f * ( (h_dag+h_r)*h_dag ) ) / h_r;
|
||||
|
||||
const float q_l = (h_dag > h_l) ? q_l_tmp : 1.0f;
|
||||
const float q_r = (h_dag > h_r) ? q_r_tmp : 1.0f;
|
||||
|
||||
// Compute wave speed estimates
|
||||
const float S_l = u_l - c_l*q_l;
|
||||
const float S_r = u_r + c_r*q_r;
|
||||
const float S_star = ( S_l*h_r*(u_r - S_r) - S_r*h_l*(u_l - S_l) ) / ( h_r*(u_r - S_r) - h_l*(u_l - S_l) );
|
||||
|
||||
const float3 Q_star_l = h_l * (S_l - u_l) / (S_l - S_star) * make_float3(1.0, S_star, v_l);
|
||||
const float3 Q_star_r = h_r * (S_r - u_r) / (S_r - S_star) * make_float3(1.0, S_star, v_r);
|
||||
|
||||
// Estimate the fluxes in the four regions
|
||||
const float3 F_1 = F_func(Q_l1, g_);
|
||||
const float3 F_4 = F_func(Q_r1, g_);
|
||||
|
||||
const float3 F_2 = F_1 + S_l*(Q_star_l - Q_l1);
|
||||
const float3 F_3 = F_4 + S_r*(Q_star_r - Q_r1);
|
||||
//const float3 F_2 = F_func(Q_star_l, g_);
|
||||
//const float3 F_3 = F_func(Q_star_r, g_);
|
||||
|
||||
// Compute the courant numbers for the waves
|
||||
const float c_1 = S_l * dt_ / dx_;
|
||||
const float c_2 = S_star * dt_ / dx_;
|
||||
const float c_3 = S_r * dt_ / dx_;
|
||||
|
||||
// Compute the "upwind change" vectors for the i-3/2 and i+3/2 interfaces
|
||||
const float eps = 1.0e-6f;
|
||||
const float r_1 = desingularize( (c_1 > 0.0f) ? (h_dag_l - h_l2) : (h_dag_r - h_r), eps) / desingularize((h_dag - h_l), eps);
|
||||
const float r_2 = desingularize( (c_2 > 0.0f) ? (v_l - v_l2) : (v_r2 - v_r), eps ) / desingularize((v_r - v_l), eps);
|
||||
const float r_3 = desingularize( (c_3 > 0.0f) ? (h_l - h_dag_l) : (h_r2 - h_dag_r), eps ) / desingularize((h_r - h_dag), eps);
|
||||
|
||||
// Compute the limiter
|
||||
// We use h for the nonlinear waves, and v for the middle shear wave
|
||||
const float A_1 = copysign(1.0f, c_1) * limiterToWAFLimiter(generalized_minmod(r_1, 1.9f), c_1);
|
||||
const float A_2 = copysign(1.0f, c_2) * limiterToWAFLimiter(generalized_minmod(r_2, 1.9f), c_2);
|
||||
const float A_3 = copysign(1.0f, c_3) * limiterToWAFLimiter(generalized_minmod(r_3, 1.9f), c_3);
|
||||
|
||||
//Average the fluxes
|
||||
const float3 flux = 0.5f*( F_1 + F_4 )
|
||||
- 0.5f*( A_1 * (F_2 - F_1)
|
||||
+ A_2 * (F_3 - F_2)
|
||||
+ A_3 * (F_4 - F_3) );
|
||||
|
||||
return flux;
|
||||
}
|
352
GPUSimulators/helpers/InitialConditions.py
Normal file
352
GPUSimulators/helpers/InitialConditions.py
Normal file
@ -0,0 +1,352 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements Cuda context handling
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
from GPUSimulators.Simulator import BoundaryCondition
|
||||
import numpy as np
|
||||
import gc
|
||||
|
||||
|
||||
def getExtent(width, height, nx, ny, grid):
|
||||
if grid is not None:
|
||||
gx = grid.grid[0]
|
||||
gy = grid.grid[1]
|
||||
i, j = grid.getCoordinate()
|
||||
|
||||
dx = (width / gx) / nx
|
||||
dy = (height / gy) / ny
|
||||
|
||||
x0 = width*i/gx + 0.5*dx
|
||||
y0 = height*j/gy + 0.5*dy
|
||||
x1 = width*(i+1)/gx - 0.5*dx
|
||||
y1 = height*(j+1)/gy - 0.5*dx
|
||||
|
||||
else:
|
||||
dx = width / nx
|
||||
dy = height / ny
|
||||
|
||||
x0 = 0.5*dx
|
||||
y0 = 0.5*dy
|
||||
x1 = width-0.5*dx
|
||||
y1 = height-0.5*dy
|
||||
|
||||
return [x0, x1, y0, y1, dx, dy]
|
||||
|
||||
|
||||
def downsample(highres_solution, x_factor, y_factor=None):
|
||||
if (y_factor == None):
|
||||
y_factor = x_factor
|
||||
|
||||
assert(highres_solution.shape[1] % x_factor == 0)
|
||||
assert(highres_solution.shape[0] % y_factor == 0)
|
||||
|
||||
if (x_factor*y_factor == 1):
|
||||
return highres_solution
|
||||
|
||||
if (len(highres_solution.shape) == 1):
|
||||
highres_solution = highres_solution.reshape((1, highres_solution.size))
|
||||
|
||||
nx = highres_solution.shape[1] / x_factor
|
||||
ny = highres_solution.shape[0] / y_factor
|
||||
|
||||
return highres_solution.reshape([int(ny), int(y_factor), int(nx), int(x_factor)]).mean(3).mean(1)
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def bump(nx, ny, width, height,
|
||||
bump_size=None,
|
||||
ref_nx=None, ref_ny=None,
|
||||
x_center=0.5, y_center=0.5,
|
||||
h_ref=0.5, h_amp=0.1, u_ref=0.0, u_amp=0.1, v_ref=0.0, v_amp=0.1):
|
||||
|
||||
if (ref_nx == None):
|
||||
ref_nx = nx
|
||||
assert(ref_nx >= nx)
|
||||
|
||||
if (ref_ny == None):
|
||||
ref_ny = ny
|
||||
assert(ref_ny >= ny)
|
||||
|
||||
if (bump_size == None):
|
||||
bump_size = width/5.0
|
||||
|
||||
ref_dx = width / float(ref_nx)
|
||||
ref_dy = height / float(ref_ny)
|
||||
|
||||
x_center = ref_dx*ref_nx*x_center
|
||||
y_center = ref_dy*ref_ny*y_center
|
||||
|
||||
x = ref_dx*(np.arange(0, ref_nx, dtype=np.float32)+0.5) - x_center
|
||||
y = ref_dy*(np.arange(0, ref_ny, dtype=np.float32)+0.5) - y_center
|
||||
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
|
||||
r = np.sqrt(xv**2 + yv**2)
|
||||
xv = None
|
||||
yv = None
|
||||
gc.collect()
|
||||
|
||||
#Generate highres then downsample
|
||||
#h_highres = 0.5 + 0.1*np.exp(-(xv**2/size + yv**2/size))
|
||||
h_highres = h_ref + h_amp*0.5*(1.0 + np.cos(np.pi*r/bump_size)) * (r < bump_size)
|
||||
h = downsample(h_highres, ref_nx/nx, ref_ny/ny)
|
||||
h_highres = None
|
||||
gc.collect()
|
||||
|
||||
#hu_highres = 0.1*np.exp(-(xv**2/size + yv**2/size))
|
||||
u_highres = u_ref + u_amp*0.5*(1.0 + np.cos(np.pi*r/bump_size)) * (r < bump_size)
|
||||
hu = downsample(u_highres, ref_nx/nx, ref_ny/ny)*h
|
||||
u_highres = None
|
||||
gc.collect()
|
||||
|
||||
#hu_highres = 0.1*np.exp(-(xv**2/size + yv**2/size))
|
||||
v_highres = v_ref + v_amp*0.5*(1.0 + np.cos(np.pi*r/bump_size)) * (r < bump_size)
|
||||
hv = downsample(v_highres, ref_nx/nx, ref_ny/ny)*h
|
||||
v_highres = None
|
||||
gc.collect()
|
||||
|
||||
dx = width/nx
|
||||
dy = height/ny
|
||||
|
||||
return h, hu, hv, dx, dy
|
||||
|
||||
|
||||
def genShockBubble(nx, ny, gamma, grid=None):
|
||||
"""
|
||||
Generate Shock-bubble interaction case for the Euler equations
|
||||
"""
|
||||
|
||||
width = 4.0
|
||||
height = 1.0
|
||||
g = 0.0
|
||||
|
||||
|
||||
rho = np.ones((ny, nx), dtype=np.float32)
|
||||
u = np.zeros((ny, nx), dtype=np.float32)
|
||||
v = np.zeros((ny, nx), dtype=np.float32)
|
||||
E = np.zeros((ny, nx), dtype=np.float32)
|
||||
p = np.ones((ny, nx), dtype=np.float32)
|
||||
|
||||
|
||||
x0, x1, y0, y1, dx, dy = getExtent(width, height, nx, ny, grid)
|
||||
x = np.linspace(x0, x1, nx, dtype=np.float32)
|
||||
y = np.linspace(y0, y1, ny, dtype=np.float32)
|
||||
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
|
||||
|
||||
#Bubble
|
||||
radius = 0.25
|
||||
x_center = 0.5
|
||||
y_center = 0.5
|
||||
bubble = np.sqrt((xv-x_center)**2+(yv-y_center)**2) <= radius
|
||||
rho = np.where(bubble, 0.1, rho)
|
||||
|
||||
#Left boundary
|
||||
left = (xv < 0.1)
|
||||
rho = np.where(left, 3.81250, rho)
|
||||
u = np.where(left, 2.57669, u)
|
||||
|
||||
#Energy
|
||||
p = np.where(left, 10.0, p)
|
||||
E = 0.5*rho*(u**2+v**2) + p/(gamma-1.0)
|
||||
|
||||
|
||||
bc = BoundaryCondition({
|
||||
'north': BoundaryCondition.Type.Reflective,
|
||||
'south': BoundaryCondition.Type.Reflective,
|
||||
'east': BoundaryCondition.Type.Periodic,
|
||||
'west': BoundaryCondition.Type.Periodic
|
||||
})
|
||||
|
||||
#Construct simulator
|
||||
arguments = {
|
||||
'rho': rho, 'rho_u': rho*u, 'rho_v': rho*v, 'E': E,
|
||||
'nx': nx, 'ny': ny,
|
||||
'dx': dx, 'dy': dy,
|
||||
'g': g,
|
||||
'gamma': gamma,
|
||||
'boundary_conditions': bc
|
||||
}
|
||||
return arguments
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def genKelvinHelmholtz(nx, ny, gamma, roughness=0.125, grid=None):
|
||||
"""
|
||||
Roughness parameter in (0, 1.0] determines how "squiggly"
|
||||
the interface betweeen the zones is
|
||||
"""
|
||||
|
||||
def genZones(nx, ny, n):
|
||||
"""
|
||||
Generates the zones of the two fluids of K-H
|
||||
"""
|
||||
zone = np.zeros((ny, nx), dtype=np.int32)
|
||||
|
||||
|
||||
def genSmoothRandom(nx, n):
|
||||
n = max(1, min(n, nx))
|
||||
|
||||
if n == nx:
|
||||
return np.random.random(nx)-0.5
|
||||
else:
|
||||
from scipy.interpolate import interp1d
|
||||
|
||||
#Control points and interpolator
|
||||
xp = np.linspace(0.0, 1.0, n)
|
||||
yp = np.random.random(n) - 0.5
|
||||
|
||||
if (n == 1):
|
||||
kind = 'nearest'
|
||||
elif (n == 2):
|
||||
kind = 'linear'
|
||||
elif (n == 3):
|
||||
kind = 'quadratic'
|
||||
else:
|
||||
kind = 'cubic'
|
||||
|
||||
f = interp1d(xp, yp, kind=kind)
|
||||
|
||||
#Interpolation points
|
||||
x = np.linspace(0.0, 1.0, nx)
|
||||
return f(x)
|
||||
|
||||
|
||||
|
||||
x0, x1, y0, y1, _, dy = getExtent(1.0, 1.0, nx, ny, grid)
|
||||
x = np.linspace(x0, x1, nx)
|
||||
y = np.linspace(y0, y1, ny)
|
||||
_, y = np.meshgrid(x, y)
|
||||
|
||||
#print(y+a[0])
|
||||
|
||||
a = genSmoothRandom(nx, n)*dy
|
||||
zone = np.where(y > 0.25+a, zone, 1)
|
||||
|
||||
a = genSmoothRandom(nx, n)*dy
|
||||
zone = np.where(y < 0.75+a, zone, 1)
|
||||
|
||||
return zone
|
||||
|
||||
width = 2.0
|
||||
height = 1.0
|
||||
g = 0.0
|
||||
gamma = 1.4
|
||||
|
||||
rho = np.empty((ny, nx), dtype=np.float32)
|
||||
u = np.empty((ny, nx), dtype=np.float32)
|
||||
v = np.zeros((ny, nx), dtype=np.float32)
|
||||
p = 2.5*np.ones((ny, nx), dtype=np.float32)
|
||||
|
||||
#Generate the different zones
|
||||
zones = genZones(nx, ny, max(1, min(nx, int(nx*roughness))))
|
||||
|
||||
#Zone 0
|
||||
zone0 = zones == 0
|
||||
rho = np.where(zone0, 1.0, rho)
|
||||
u = np.where(zone0, 0.5, u)
|
||||
|
||||
#Zone 1
|
||||
zone1 = zones == 1
|
||||
rho = np.where(zone1, 2.0, rho)
|
||||
u = np.where(zone1, -0.5, u)
|
||||
|
||||
E = 0.5*rho*(u**2+v**2) + p/(gamma-1.0)
|
||||
|
||||
_, _, _, _, dx, dy = getExtent(width, height, nx, ny, grid)
|
||||
|
||||
|
||||
bc = BoundaryCondition({
|
||||
'north': BoundaryCondition.Type.Periodic,
|
||||
'south': BoundaryCondition.Type.Periodic,
|
||||
'east': BoundaryCondition.Type.Periodic,
|
||||
'west': BoundaryCondition.Type.Periodic
|
||||
})
|
||||
|
||||
#Construct simulator
|
||||
arguments = {
|
||||
'rho': rho, 'rho_u': rho*u, 'rho_v': rho*v, 'E': E,
|
||||
'nx': nx, 'ny': ny,
|
||||
'dx': dx, 'dy': dy,
|
||||
'g': g,
|
||||
'gamma': gamma,
|
||||
'boundary_conditions': bc
|
||||
}
|
||||
|
||||
return arguments
|
||||
|
||||
|
||||
|
||||
def genRayleighTaylor(nx, ny, gamma, version=0, grid=None):
|
||||
"""
|
||||
Generates Rayleigh-Taylor instability case
|
||||
"""
|
||||
width = 0.5
|
||||
height = 1.5
|
||||
g = 0.1
|
||||
|
||||
rho = np.zeros((ny, nx), dtype=np.float32)
|
||||
u = np.zeros((ny, nx), dtype=np.float32)
|
||||
v = np.zeros((ny, nx), dtype=np.float32)
|
||||
p = np.zeros((ny, nx), dtype=np.float32)
|
||||
|
||||
|
||||
x0, x1, y0, y1, dx, dy = getExtent(width, height, nx, ny, grid)
|
||||
x = np.linspace(x0, x1, nx, dtype=np.float32)-width*0.5
|
||||
y = np.linspace(y0, y1, ny, dtype=np.float32)-height*0.5
|
||||
xv, yv = np.meshgrid(x, y, sparse=False, indexing='xy')
|
||||
|
||||
#This gives a squigly interfact
|
||||
if (version == 0):
|
||||
y_threshold = 0.01*np.cos(2*np.pi*np.abs(x)/0.5)
|
||||
rho = np.where(yv <= y_threshold, 1.0, rho)
|
||||
rho = np.where(yv > y_threshold, 2.0, rho)
|
||||
elif (version == 1):
|
||||
rho = np.where(yv <= 0.0, 1.0, rho)
|
||||
rho = np.where(yv > 0.0, 2.0, rho)
|
||||
v = 0.01*(1.0 + np.cos(2*np.pi*xv/0.5))/4
|
||||
else:
|
||||
assert False, "Invalid version"
|
||||
|
||||
p = 2.5 - rho*g*yv
|
||||
E = 0.5*rho*(u**2+v**2) + p/(gamma-1.0)
|
||||
|
||||
bc = BoundaryCondition({
|
||||
'north': BoundaryCondition.Type.Reflective,
|
||||
'south': BoundaryCondition.Type.Reflective,
|
||||
'east': BoundaryCondition.Type.Reflective,
|
||||
'west': BoundaryCondition.Type.Reflective
|
||||
})
|
||||
|
||||
#Construct simulator
|
||||
arguments = {
|
||||
'rho': rho, 'rho_u': rho*u, 'rho_v': rho*v, 'E': E,
|
||||
'nx': nx, 'ny': ny,
|
||||
'dx': dx, 'dy': dy,
|
||||
'g': g,
|
||||
'gamma': gamma,
|
||||
'boundary_conditions': bc
|
||||
}
|
||||
|
||||
return arguments
|
61
GPUSimulators/helpers/Visualization.py
Normal file
61
GPUSimulators/helpers/Visualization.py
Normal file
@ -0,0 +1,61 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements Cuda context handling
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
import numpy as np
|
||||
|
||||
from matplotlib.colors import Normalize
|
||||
|
||||
|
||||
|
||||
def genSchlieren(rho):
|
||||
#Compute length of z-component of normalized gradient vector
|
||||
normal = np.gradient(rho) #[x, y, 1]
|
||||
length = 1.0 / np.sqrt(normal[0]**2 + normal[1]**2 + 1.0)
|
||||
schlieren = np.power(length, 128)
|
||||
return schlieren
|
||||
|
||||
|
||||
def genVorticity(rho, rho_u, rho_v):
|
||||
u = rho_u / rho
|
||||
v = rho_v / rho
|
||||
u = np.sqrt(u**2 + v**2)
|
||||
u_max = u.max()
|
||||
|
||||
du_dy, _ = np.gradient(u)
|
||||
_, dv_dx = np.gradient(v)
|
||||
|
||||
#Length of curl
|
||||
curl = dv_dx - du_dy
|
||||
return curl
|
||||
|
||||
|
||||
def genColors(rho, rho_u, rho_v, cmap, vmax, vmin):
|
||||
schlieren = genSchlieren(rho)
|
||||
curl = genVorticity(rho, rho_u, rho_v)
|
||||
|
||||
colors = Normalize(vmin, vmax, clip=True)(curl)
|
||||
colors = cmap(colors)
|
||||
for k in range(3):
|
||||
colors[:,:,k] = colors[:,:,k]*schlieren
|
||||
|
||||
return colors
|
914
MPITest.ipynb
Normal file
914
MPITest.ipynb
Normal file
File diff suppressed because one or more lines are too long
387
TestSchemes.ipynb
Normal file
387
TestSchemes.ipynb
Normal file
File diff suppressed because one or more lines are too long
137
mpiTesting.py
Normal file
137
mpiTesting.py
Normal file
@ -0,0 +1,137 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements MPI simulations for benchmarking
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
import numpy as np
|
||||
import gc
|
||||
import time
|
||||
import json
|
||||
import logging
|
||||
|
||||
#MPI
|
||||
from mpi4py import MPI
|
||||
|
||||
#CUDA
|
||||
import pycuda.driver as cuda
|
||||
|
||||
#Simulator engine etc
|
||||
from GPUSimulators import MPISimulator, Common, CudaContext
|
||||
from GPUSimulators import EE2D_KP07_dimsplit
|
||||
from GPUSimulators.helpers import InitialConditions as IC
|
||||
from GPUSimulators.Simulator import BoundaryCondition as BC
|
||||
|
||||
|
||||
#Get MPI COMM to use
|
||||
comm = MPI.COMM_WORLD
|
||||
|
||||
|
||||
####
|
||||
#Initialize logging
|
||||
####
|
||||
log_level_console = 20
|
||||
log_level_file = 10
|
||||
log_filename = 'mpi_' + str(comm.rank) + '.log'
|
||||
logger = logging.getLogger('GPUSimulators')
|
||||
logger.setLevel(min(log_level_console, log_level_file))
|
||||
|
||||
ch = logging.StreamHandler()
|
||||
ch.setLevel(log_level_console)
|
||||
logger.addHandler(ch)
|
||||
logger.info("Console logger using level %s", logging.getLevelName(log_level_console))
|
||||
|
||||
fh = logging.FileHandler(log_filename)
|
||||
formatter = logging.Formatter('%(asctime)s:%(name)s:%(levelname)s: %(message)s')
|
||||
fh.setFormatter(formatter)
|
||||
fh.setLevel(log_level_file)
|
||||
logger.addHandler(fh)
|
||||
logger.info("File logger using level %s to %s", logging.getLevelName(log_level_file), log_filename)
|
||||
|
||||
|
||||
|
||||
####
|
||||
# Initialize MPI grid etc
|
||||
####
|
||||
logger.info("Creating MPI grid")
|
||||
grid = MPISimulator.MPIGrid(MPI.COMM_WORLD)
|
||||
|
||||
|
||||
|
||||
####
|
||||
# Initialize CUDA
|
||||
####
|
||||
cuda.init(flags=0)
|
||||
logger.info("Initializing CUDA")
|
||||
local_rank = grid.getLocalRank()
|
||||
num_cuda_devices = cuda.Device.count()
|
||||
cuda_device = local_rank % num_cuda_devices
|
||||
cuda_context = CudaContext.CudaContext(device=cuda_device, autotuning=False)
|
||||
|
||||
|
||||
|
||||
####
|
||||
# Set initial conditions
|
||||
####
|
||||
logger.info("Generating initial conditions")
|
||||
nx = 128
|
||||
ny = 128
|
||||
gamma = 1.4
|
||||
save_times = np.linspace(0, 5.0, 10)
|
||||
outfile = "mpi_out_" + str(MPI.COMM_WORLD.rank) + ".nc"
|
||||
save_var_names = ['rho', 'rho_u', 'rho_v', 'E']
|
||||
|
||||
arguments = IC.genKelvinHelmholtz(nx, ny, gamma, grid=grid)
|
||||
arguments['context'] = cuda_context
|
||||
arguments['theta'] = 1.2
|
||||
arguments['grid'] = grid
|
||||
|
||||
|
||||
|
||||
|
||||
####
|
||||
# Run simulation
|
||||
####
|
||||
logger.info("Running simulation")
|
||||
#Helper function to create MPI simulator
|
||||
def genSim(grid, **kwargs):
|
||||
local_sim = EE2D_KP07_dimsplit.EE2D_KP07_dimsplit(**kwargs)
|
||||
sim = MPISimulator.MPISimulator(local_sim, grid)
|
||||
return sim
|
||||
outfile = Common.runSimulation(genSim, arguments, outfile, save_times, save_var_names)
|
||||
|
||||
|
||||
|
||||
####
|
||||
# Clean shutdown
|
||||
####
|
||||
sim = None
|
||||
local_sim = None
|
||||
cuda_context = None
|
||||
arguments = None
|
||||
logging.shutdown()
|
||||
gc.collect()
|
||||
|
||||
|
||||
|
||||
####
|
||||
# Print completion and exit
|
||||
####
|
||||
print("Completed!")
|
||||
exit(0)
|
Loading…
x
Reference in New Issue
Block a user