mirror of
https://github.com/smyalygames/FiniteVolumeGPU_HIP.git
synced 2025-12-24 13:29:17 +01:00
Compare commits
20 Commits
porting-to
...
6d9f36968d
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6d9f36968d | ||
|
|
5b925cdb42 | ||
|
|
b054a4dbcd | ||
|
|
2e5cf88eef | ||
|
|
80afd31286 | ||
|
|
e2306406a7 | ||
|
|
aa21733806 | ||
|
|
5a27445de8 | ||
|
|
cd69f69080 | ||
|
|
9761ff4924 | ||
|
|
5931cee93f | ||
|
|
208d82ab0b | ||
|
|
31bf80c6f0 | ||
|
|
4df5e5853f | ||
|
|
dc78082f74 | ||
|
|
b8603e939e | ||
|
|
b1dc2938ad | ||
|
|
4ddf19bef7 | ||
|
|
918111df25 | ||
|
|
2a7a8c6258 |
276
.gitignore
vendored
Normal file
276
.gitignore
vendored
Normal file
@@ -0,0 +1,276 @@
|
||||
.vscode/settings.json
|
||||
|
||||
/data
|
||||
|
||||
# Numpy Zipped
|
||||
*.npz
|
||||
|
||||
# NetCDF
|
||||
*.nc
|
||||
|
||||
# Python Related files
|
||||
# Taken from: https://github.com/github/gitignore/blob/main/Python.gitignore
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# UV
|
||||
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
#uv.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
||||
.pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
# Ruff stuff:
|
||||
.ruff_cache/
|
||||
|
||||
# PyPI configuration file
|
||||
.pypirc
|
||||
|
||||
# CUDA
|
||||
cuda_cache/
|
||||
|
||||
# Taken from: https://github.com/github/gitignore/blob/main/CUDA.gitignore
|
||||
*.i
|
||||
*.ii
|
||||
*.gpu
|
||||
*.ptx
|
||||
*.cubin
|
||||
*.fatbin
|
||||
|
||||
# Covers JetBrains IDEs: IntelliJ, RubyMine, PhpStorm, AppCode, PyCharm, CLion, Android Studio, WebStorm and Rider
|
||||
# Reference: https://intellij-support.jetbrains.com/hc/en-us/articles/206544839
|
||||
# Taken from: https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
|
||||
# User-specific stuff
|
||||
.idea/**/workspace.xml
|
||||
.idea/**/tasks.xml
|
||||
.idea/**/usage.statistics.xml
|
||||
.idea/**/dictionaries
|
||||
.idea/**/shelf
|
||||
|
||||
# AWS User-specific
|
||||
.idea/**/aws.xml
|
||||
|
||||
# Generated files
|
||||
.idea/**/contentModel.xml
|
||||
|
||||
# Sensitive or high-churn files
|
||||
.idea/**/dataSources/
|
||||
.idea/**/dataSources.ids
|
||||
.idea/**/dataSources.local.xml
|
||||
.idea/**/sqlDataSources.xml
|
||||
.idea/**/dynamic.xml
|
||||
.idea/**/uiDesigner.xml
|
||||
.idea/**/dbnavigator.xml
|
||||
|
||||
# Gradle
|
||||
.idea/**/gradle.xml
|
||||
.idea/**/libraries
|
||||
|
||||
# Gradle and Maven with auto-import
|
||||
# When using Gradle or Maven with auto-import, you should exclude module files,
|
||||
# since they will be recreated, and may cause churn. Uncomment if using
|
||||
# auto-import.
|
||||
# .idea/artifacts
|
||||
# .idea/compiler.xml
|
||||
# .idea/jarRepositories.xml
|
||||
# .idea/modules.xml
|
||||
# .idea/*.iml
|
||||
# .idea/modules
|
||||
# *.iml
|
||||
# *.ipr
|
||||
|
||||
# CMake
|
||||
cmake-build-*/
|
||||
|
||||
# Mongo Explorer plugin
|
||||
.idea/**/mongoSettings.xml
|
||||
|
||||
# File-based project format
|
||||
*.iws
|
||||
|
||||
# IntelliJ
|
||||
out/
|
||||
|
||||
# mpeltonen/sbt-idea plugin
|
||||
.idea_modules/
|
||||
|
||||
# JIRA plugin
|
||||
atlassian-ide-plugin.xml
|
||||
|
||||
# Cursive Clojure plugin
|
||||
.idea/replstate.xml
|
||||
|
||||
# SonarLint plugin
|
||||
.idea/sonarlint/
|
||||
|
||||
# Crashlytics plugin (for Android Studio and IntelliJ)
|
||||
com_crashlytics_export_strings.xml
|
||||
crashlytics.properties
|
||||
crashlytics-build.properties
|
||||
fabric.properties
|
||||
|
||||
# Editor-based Rest Client
|
||||
.idea/httpRequests
|
||||
|
||||
# Android studio 3.1+ serialized cache file
|
||||
.idea/caches/build_file_checksums.ser
|
||||
@@ -31,7 +31,6 @@ from hip import hip,hiprtc
|
||||
|
||||
from GPUSimulators import Common, Simulator, CudaContext
|
||||
|
||||
class Autotuner:
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
@@ -46,6 +45,8 @@ class Autotuner:
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
|
||||
class Autotuner:
|
||||
def __init__(self,
|
||||
nx=2048, ny=2048,
|
||||
block_widths=range(8, 32, 1),
|
||||
|
||||
@@ -35,6 +35,8 @@ import gc
|
||||
import netCDF4
|
||||
import json
|
||||
|
||||
from tqdm import tqdm
|
||||
|
||||
#import pycuda.compiler as cuda_compiler
|
||||
#import pycuda.gpuarray
|
||||
#import pycuda.driver as cuda
|
||||
@@ -178,11 +180,11 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
|
||||
profiling_data_sim_runner["end"]["t_sim_init"] = time.time()
|
||||
|
||||
#Start simulation loop
|
||||
progress_printer = ProgressPrinter(save_times[-1], print_every=10)
|
||||
for k in range(len(save_times)):
|
||||
# progress_printer = ProgressPrinter(save_times[-1], print_every=10)
|
||||
for k, t_step in tqdm(enumerate(t_steps), desc="Simulation Loop"):
|
||||
#Get target time and step size there
|
||||
t_step = t_steps[k]
|
||||
t_end = save_times[k]
|
||||
# t_step = t_steps[k]
|
||||
# t_end = save_times[k]
|
||||
|
||||
#Sanity check simulator
|
||||
try:
|
||||
@@ -194,7 +196,7 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
|
||||
profiling_data_sim_runner["start"]["t_full_step"] += time.time()
|
||||
|
||||
#Simulate
|
||||
if (t_step > 0.0):
|
||||
if t_step > 0.0:
|
||||
sim.simulate(t_step, dt)
|
||||
|
||||
profiling_data_sim_runner["end"]["t_full_step"] += time.time()
|
||||
@@ -211,17 +213,14 @@ def runSimulation(simulator, simulator_args, outfile, save_times, save_var_names
|
||||
profiling_data_sim_runner["end"]["t_nc_write"] += time.time()
|
||||
|
||||
#Write progress to screen
|
||||
print_string = progress_printer.getPrintString(t_end)
|
||||
if (print_string):
|
||||
logger.debug(print_string)
|
||||
# print_string = progress_printer.getPrintString(t_end)
|
||||
# if (print_string):
|
||||
# logger.debug(print_string)
|
||||
|
||||
logger.debug("Simulated to t={:f} in {:d} timesteps (average dt={:f})".format(t_end, sim.simSteps(), sim.simTime() / sim.simSteps()))
|
||||
logger.debug("Simulated to t={:f} in {:d} timesteps (average dt={:f})".format(save_times[-1], sim.simSteps(), sim.simTime() / sim.simSteps()))
|
||||
|
||||
return outdata.filename, profiling_data_sim_runner, sim.profiling_data_mpi
|
||||
|
||||
|
||||
|
||||
|
||||
#return outdata.filename
|
||||
|
||||
|
||||
class Timer(object):
|
||||
@@ -247,9 +246,6 @@ class Timer(object):
|
||||
return time.time() - self.start
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class PopenFileBuffer(object):
|
||||
"""
|
||||
Simple class for holding a set of tempfiles
|
||||
@@ -312,7 +308,7 @@ class IPEngine(object):
|
||||
import ipyparallel
|
||||
self.cluster = ipyparallel.Client()#profile='mpi')
|
||||
time.sleep(3)
|
||||
while(len(self.cluster.ids) != n_engines):
|
||||
while len(self.cluster.ids) != n_engines:
|
||||
time.sleep(0.5)
|
||||
self.logger.info("Waiting for cluster...")
|
||||
self.cluster = ipyparallel.Client()#profile='mpi')
|
||||
@@ -366,10 +362,6 @@ class IPEngine(object):
|
||||
gc.collect()
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
class DataDumper(object):
|
||||
"""
|
||||
Simple class for holding a netCDF4 object
|
||||
@@ -443,65 +435,58 @@ class DataDumper(object):
|
||||
|
||||
|
||||
|
||||
# class ProgressPrinter(object):
|
||||
# """
|
||||
# Small helper class for
|
||||
# """
|
||||
# def __init__(self, total_steps, print_every=5):
|
||||
# self.logger = logging.getLogger(__name__)
|
||||
# self.start = time.time()
|
||||
# self.total_steps = total_steps
|
||||
# self.print_every = print_every
|
||||
# self.next_print_time = self.print_every
|
||||
# self.last_step = 0
|
||||
# self.secs_per_iter = None
|
||||
|
||||
# def getPrintString(self, step):
|
||||
# elapsed = time.time() - self.start
|
||||
# if (elapsed > self.next_print_time):
|
||||
# dt = elapsed - (self.next_print_time - self.print_every)
|
||||
# dsteps = step - self.last_step
|
||||
# steps_remaining = self.total_steps - step
|
||||
|
||||
class ProgressPrinter(object):
|
||||
"""
|
||||
Small helper class for
|
||||
"""
|
||||
def __init__(self, total_steps, print_every=5):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.start = time.time()
|
||||
self.total_steps = total_steps
|
||||
self.print_every = print_every
|
||||
self.next_print_time = self.print_every
|
||||
self.last_step = 0
|
||||
self.secs_per_iter = None
|
||||
# if (dsteps == 0):
|
||||
# return
|
||||
|
||||
def getPrintString(self, step):
|
||||
elapsed = time.time() - self.start
|
||||
if (elapsed > self.next_print_time):
|
||||
dt = elapsed - (self.next_print_time - self.print_every)
|
||||
dsteps = step - self.last_step
|
||||
steps_remaining = self.total_steps - step
|
||||
|
||||
if (dsteps == 0):
|
||||
return
|
||||
|
||||
self.last_step = step
|
||||
self.next_print_time = elapsed + self.print_every
|
||||
|
||||
if not self.secs_per_iter:
|
||||
self.secs_per_iter = dt / dsteps
|
||||
self.secs_per_iter = 0.2*self.secs_per_iter + 0.8*(dt / dsteps)
|
||||
|
||||
remaining_time = steps_remaining * self.secs_per_iter
|
||||
|
||||
return "{:s}. Total: {:s}, elapsed: {:s}, remaining: {:s}".format(
|
||||
ProgressPrinter.progressBar(step, self.total_steps),
|
||||
ProgressPrinter.timeString(elapsed + remaining_time),
|
||||
ProgressPrinter.timeString(elapsed),
|
||||
ProgressPrinter.timeString(remaining_time))
|
||||
|
||||
def timeString(seconds):
|
||||
seconds = int(max(seconds, 1))
|
||||
minutes, seconds = divmod(seconds, 60)
|
||||
hours, minutes = divmod(minutes, 60)
|
||||
periods = [('h', hours), ('m', minutes), ('s', seconds)]
|
||||
time_string = ' '.join('{}{}'.format(value, name)
|
||||
for name, value in periods
|
||||
if value)
|
||||
return time_string
|
||||
|
||||
def progressBar(step, total_steps, width=30):
|
||||
progress = np.round(width * step / total_steps).astype(np.int32)
|
||||
progressbar = "0% [" + "#"*(progress) + "="*(width-progress) + "] 100%"
|
||||
return progressbar
|
||||
# self.last_step = step
|
||||
# self.next_print_time = elapsed + self.print_every
|
||||
|
||||
# if not self.secs_per_iter:
|
||||
# self.secs_per_iter = dt / dsteps
|
||||
# self.secs_per_iter = 0.2*self.secs_per_iter + 0.8*(dt / dsteps)
|
||||
|
||||
# remaining_time = steps_remaining * self.secs_per_iter
|
||||
|
||||
# return "{:s}. Total: {:s}, elapsed: {:s}, remaining: {:s}".format(
|
||||
# ProgressPrinter.progressBar(step, self.total_steps),
|
||||
# ProgressPrinter.timeString(elapsed + remaining_time),
|
||||
# ProgressPrinter.timeString(elapsed),
|
||||
# ProgressPrinter.timeString(remaining_time))
|
||||
|
||||
# def timeString(seconds):
|
||||
# seconds = int(max(seconds, 1))
|
||||
# minutes, seconds = divmod(seconds, 60)
|
||||
# hours, minutes = divmod(minutes, 60)
|
||||
# periods = [('h', hours), ('m', minutes), ('s', seconds)]
|
||||
# time_string = ' '.join('{}{}'.format(value, name)
|
||||
# for name, value in periods
|
||||
# if value)
|
||||
# return time_string
|
||||
|
||||
# def progressBar(step, total_steps, width=30):
|
||||
# progress = np.round(width * step / total_steps).astype(np.int32)
|
||||
# progressbar = "0% [" + "#"*(progress) + "="*(width-progress) + "] 100%"
|
||||
# return progressbar
|
||||
|
||||
|
||||
"""
|
||||
@@ -525,17 +510,21 @@ class CudaArray2D:
|
||||
#Should perhaps use pycuda.driver.mem_alloc_data.pitch() here
|
||||
#Initialize an array on GPU with zeros
|
||||
#self.data = pycuda.gpuarray.zeros((ny_halo, nx_halo), dtype)
|
||||
self.data_h = np.zeros((ny_halo, nx_halo), dtype="float32")
|
||||
num_bytes = self.data_h.size * self.data_h.itemsize
|
||||
#data.strides[0] == nx_halo*np.float32().itemsize
|
||||
#data.strides[1] == np.float32().itemsize
|
||||
num_bytes = ny_halo*nx_halo * np.float32().itemsize
|
||||
|
||||
#data_h = np.zeros((ny_halo, nx_halo), dtype)
|
||||
# init device array and upload host data
|
||||
self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(ny_halo, nx_halo))
|
||||
|
||||
#num_bytes = ny*nx * np.float32().itemsize
|
||||
#cpu_data = hip_check(hip.hipHostMalloc(num_bytes,hip.hipHostMallocPortable))
|
||||
# copy data from host to device
|
||||
hip_check(hip.hipMemcpy(self.data,self.data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
|
||||
#hip_check(hip.hipMemcpy(self.data,data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
|
||||
|
||||
#For returning to download (No counterpart in hip-python)
|
||||
#https://rocm.docs.amd.com/projects/hip-python/en/latest/python_api/hip.html#hip.hip.hipMemPoolCreate
|
||||
#self.memorypool = PageLockedMemoryPool()
|
||||
|
||||
#If we don't have any data, just allocate and return
|
||||
@@ -547,16 +536,21 @@ class CudaArray2D:
|
||||
assert cpu_data.itemsize == 4, "Wrong size of data type"
|
||||
assert not np.isfortran(cpu_data), "Wrong datatype (Fortran, expected C)"
|
||||
|
||||
|
||||
#Create copy object from host to device
|
||||
x = (nx_halo - cpu_data.shape[1]) // 2
|
||||
y = (ny_halo - cpu_data.shape[0]) // 2
|
||||
self.upload(stream, cpu_data, extent=[x, y, cpu_data.shape[1], cpu_data.shape[0]])
|
||||
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data.gpudata), self.nx, self.ny)
|
||||
|
||||
|
||||
def __del__(self, *args):
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data.gpudata), self.nx, self.ny)
|
||||
self.data.gpudata.free()
|
||||
#self.data.gpudata.free()
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data), self.nx, self.ny)
|
||||
hip_check(hip.hipFree(self.data))
|
||||
#hip_check(hip.hipFreeAsync(self.data, self.stream))
|
||||
self.data = None
|
||||
|
||||
"""
|
||||
@@ -575,14 +569,15 @@ class CudaArray2D:
|
||||
#self.logger.debug("Downloading [%dx%d] buffer", self.nx, self.ny)
|
||||
#Allocate host memory
|
||||
#The following fails, don't know why (crashes python)
|
||||
#allocate a pinned (page-locked) memory array
|
||||
#cpu_data = cuda.pagelocked_empty((int(ny), int(nx)), dtype=np.float32, mem_flags=cuda.host_alloc_flags.PORTABLE)
|
||||
#see here type of memory: https://rocm.docs.amd.com/projects/hip-python/en/latest/python_api/hip.html#hip.hip.hipMemoryType
|
||||
cpu_data = np.empty((ny, nx), dtype=np.float32)
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
cpu_data = np.zeros((ny, nx), dtype=np.float32)
|
||||
#num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
#hipHostMalloc allocates pinned host memory which is mapped into the address space of all GPUs in the system, the memory can #be accessed directly by the GPU device
|
||||
#hipHostMallocDefault:Memory is mapped and portable (default allocation)
|
||||
#hipHostMallocPortable: memory is explicitely portable across different devices
|
||||
cpu_data = hip_check(hip.hipHostMalloc(num_bytes,hip.hipHostMallocPortable))
|
||||
#cpu_data = hip_check(hip.hipHostMalloc(num_bytes,hip.hipHostMallocPortable))
|
||||
#Non-pagelocked: cpu_data = np.empty((ny, nx), dtype=np.float32)
|
||||
#cpu_data = self.memorypool.allocate((ny, nx), dtype=np.float32)
|
||||
|
||||
@@ -591,50 +586,62 @@ class CudaArray2D:
|
||||
assert x+nx <= self.nx + 2*self.x_halo
|
||||
assert y+ny <= self.ny + 2*self.y_halo
|
||||
|
||||
#Cuda
|
||||
"""
|
||||
#Create copy object from device to host
|
||||
#copy = cuda.Memcpy2D()
|
||||
#copy.set_src_device(self.data.gpudata)
|
||||
#copy.set_dst_host(cpu_data)
|
||||
copy = cuda.Memcpy2D()
|
||||
copy.set_src_device(self.data.gpudata)
|
||||
copy.set_dst_host(cpu_data)
|
||||
|
||||
#Set offsets and pitch of source
|
||||
#copy.src_x_in_bytes = int(x)*self.data.strides[1]
|
||||
#copy.src_y = int(y)
|
||||
#copy.src_pitch = self.data.strides[0]
|
||||
copy.src_x_in_bytes = int(x)*self.data.strides[1]
|
||||
copy.src_y = int(y)
|
||||
copy.src_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
#copy.width_in_bytes = int(nx)*cpu_data.itemsize
|
||||
#copy.height = int(ny)
|
||||
copy.width_in_bytes = int(nx)*cpu_data.itemsize
|
||||
copy.height = int(ny)
|
||||
"""
|
||||
|
||||
#The equivalent of cuda.Memcpy2D in hip-python would be: but it fails with an error pointing to cpu_data
|
||||
#and a message: "RuntimeError: hipError_t.hipErrorInvalidValue"
|
||||
#shape = (nx,ny)
|
||||
#num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
#dst_pitch_bytes = cpu_data.strides[0]
|
||||
#src_pitch_bytes = num_bytes // shape[0]
|
||||
#src_pitch_bytes = data.strides[0]
|
||||
#width_bytes = int(nx)*cpu_data.itemsize
|
||||
#height_Nrows = int(ny)
|
||||
#hipMemcpy2D(dst, unsigned long dpitch, src, unsigned long spitch, unsigned long width, unsigned long height, kind)
|
||||
#copy = hip_check(hip.hipMemcpy2D(cpu_data, #pointer to destination
|
||||
# dst_pitch_bytes, #pitch of destination array
|
||||
# data, #pointer to source
|
||||
# src_pitch_bytes, #pitch of source array
|
||||
# width_bytes, #number of bytes in each row
|
||||
# height_Nrows, #number of rows to copy
|
||||
# hip.hipMemcpyKind.hipMemcpyDeviceToHost)) #kind
|
||||
|
||||
#this is an alternative:
|
||||
#copy from device to host
|
||||
cpu_data = np.empty((ny, nx), dtype=np.float32)
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
#hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
|
||||
copy = hip_check(hip.hipMemcpy(cpu_data,self.data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
|
||||
|
||||
copy(stream)
|
||||
#host_array_pinned = hip_check(hip.hipHostMalloc(cpu_data.size * cpu_data.itemsize, hip.hipHostMallocDefault))
|
||||
#device_pointer = hip_check(hip.hipHostGetDevicePointer(host_array_pinned,hip.hipHostMallocDefault))
|
||||
|
||||
|
||||
copy_download = {
|
||||
'srcXInBytes': int(x)*np.float32().itemsize,
|
||||
'srcY': int(y),
|
||||
'srcMemoryType': hip.hipMemoryType.hipMemoryTypeDevice,#hipMemoryTypeManaged
|
||||
'srcDevice': self.data,
|
||||
'srcPitch': self.data.shape[0]*np.float32().itemsize,
|
||||
|
||||
'dstXInBytes': 0,
|
||||
'dstY': 0,
|
||||
'dstMemoryType': hip.hipMemoryType.hipMemoryTypeHost,
|
||||
'dstHost': cpu_data, #device_pointer,
|
||||
'dstPitch': cpu_data.strides[0],
|
||||
|
||||
'WidthInBytes': int(nx)*cpu_data.itemsize,
|
||||
'Height': int(ny)
|
||||
}
|
||||
|
||||
# Perform the copy back to host
|
||||
Copy = hip.hip_Memcpy2D(**copy_download)
|
||||
|
||||
#err = hip.hipMemcpyParam2D(Copy)
|
||||
err = hip.hipMemcpyParam2DAsync(Copy, stream)
|
||||
if err is None:
|
||||
print("--download - DtoH: Failed to copy 2D data to Host")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
#copy(stream)
|
||||
|
||||
if asynch==False:
|
||||
stream.synchronize()
|
||||
|
||||
#stream.synchronize()
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
return cpu_data
|
||||
|
||||
|
||||
@@ -652,31 +659,61 @@ class CudaArray2D:
|
||||
assert(x+nx <= self.nx + 2*self.x_halo)
|
||||
assert(y+ny <= self.ny + 2*self.y_halo)
|
||||
|
||||
#Cuda
|
||||
"""
|
||||
#Create copy object from device to host
|
||||
#Well this copy from src:host to dst:device AND NOT from device to host
|
||||
#copy = cuda.Memcpy2D()
|
||||
#copy.set_dst_device(self.data.gpudata)
|
||||
#copy.set_src_host(cpu_data)
|
||||
copy = cuda.Memcpy2D()
|
||||
copy.set_dst_device(self.data.gpudata)
|
||||
copy.set_src_host(cpu_data)
|
||||
|
||||
#Set offsets and pitch of source
|
||||
#copy.dst_x_in_bytes = int(x)*self.data.strides[1]
|
||||
#copy.dst_y = int(y)
|
||||
#copy.dst_pitch = self.data.strides[0]
|
||||
copy.dst_x_in_bytes = int(x)*self.data.strides[1]
|
||||
copy.dst_y = int(y)
|
||||
copy.dst_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
#copy.width_in_bytes = int(nx)*cpu_data.itemsize
|
||||
#copy.height = int(ny)
|
||||
copy.width_in_bytes = int(nx)*cpu_data.itemsize
|
||||
copy.height = int(ny)
|
||||
"""
|
||||
|
||||
#copy from host de device
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=cpu_data.shape)
|
||||
#hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
|
||||
copy = hip_check(hip.hipMemcpy(self.data,cpu_data,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
|
||||
|
||||
copy(stream)
|
||||
#Copy from host to device
|
||||
|
||||
#host_array_pinned = hip_check(hip.hipHostMalloc(cpu_data.size * cpu_data.itemsize, hip.hipHostMallocDefault))
|
||||
#device_pointer = hip_check(hip.hipHostGetDevicePointer(host_array_pinned,hip.hipHostMallocDefault))
|
||||
|
||||
copy_upload = {
|
||||
'srcXInBytes': 0,
|
||||
'srcY': 0,
|
||||
'srcMemoryType': hip.hipMemoryType.hipMemoryTypeHost,
|
||||
'srcHost': cpu_data, #device_pointer
|
||||
'srcPitch': cpu_data.strides[0], # assuming float32 (4 bytes)
|
||||
|
||||
'dstXInBytes': int(x)*np.float32().itemsize,
|
||||
'dstY': int(y),
|
||||
'dstMemoryType': hip.hipMemoryType.hipMemoryTypeDevice, #hipMemoryTypeManaged
|
||||
'dstDevice': self.data,
|
||||
'dstPitch': self.data.shape[0]*np.float32().itemsize,
|
||||
|
||||
'WidthInBytes': int(nx)*cpu_data.itemsize,
|
||||
'Height': int(ny)
|
||||
}
|
||||
|
||||
|
||||
# Perform the copy HtoD
|
||||
Copy = hip.hip_Memcpy2D(**copy_upload)
|
||||
|
||||
#err = hip.hipMemcpyParam2D(Copy)
|
||||
err = hip.hipMemcpyParam2DAsync(Copy, stream)
|
||||
|
||||
if err is None:
|
||||
print("--Upload - HtoD: Failed to copy 2D data to Device")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
#copy(stream)
|
||||
|
||||
|
||||
|
||||
@@ -704,15 +741,12 @@ class CudaArray3D:
|
||||
#Should perhaps use pycuda.driver.mem_alloc_data.pitch() here
|
||||
#self.data = pycuda.gpuarray.zeros((nz_halo, ny_halo, nx_halo), dtype)
|
||||
|
||||
self.data_h = np.zeros((nz_halo, ny_halo, nx_halo), dtype="float32")
|
||||
num_bytes = self.data_h.size * self.data_h.itemsize
|
||||
|
||||
"""
|
||||
num_bytes = nz_halo*ny_halo*nx_halo * np.float32().itemsize
|
||||
# init device array and upload host data
|
||||
self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(nz_halo, ny_halo, nx_halo))
|
||||
|
||||
# copy data from host to device
|
||||
hip_check(hip.hipMemcpy(self.data,self.data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
|
||||
"""
|
||||
|
||||
#For returning to download
|
||||
#self.memorypool = PageLockedMemoryPool()
|
||||
@@ -726,47 +760,84 @@ class CudaArray3D:
|
||||
assert cpu_data.itemsize == 4, "Wrong size of data type"
|
||||
assert not np.isfortran(cpu_data), "Wrong datatype (Fortran, expected C)"
|
||||
|
||||
#Cuda
|
||||
"""
|
||||
#Create copy object from host to device
|
||||
#copy = cuda.Memcpy3D()
|
||||
#copy.set_src_host(cpu_data)
|
||||
#copy.set_dst_device(self.data.gpudata)
|
||||
copy = cuda.Memcpy3D()
|
||||
copy.set_src_host(cpu_data)
|
||||
copy.set_dst_device(self.data.gpudata)
|
||||
|
||||
#Set offsets of destination
|
||||
#x_offset = (nx_halo - cpu_data.shape[2]) // 2
|
||||
#y_offset = (ny_halo - cpu_data.shape[1]) // 2
|
||||
#z_offset = (nz_halo - cpu_data.shape[0]) // 2
|
||||
#copy.dst_x_in_bytes = x_offset*self.data.strides[1]
|
||||
#copy.dst_y = y_offset
|
||||
#copy.dst_z = z_offset
|
||||
x_offset = (nx_halo - cpu_data.shape[2]) // 2
|
||||
y_offset = (ny_halo - cpu_data.shape[1]) // 2
|
||||
z_offset = (nz_halo - cpu_data.shape[0]) // 2
|
||||
copy.dst_x_in_bytes = x_offset*self.data.strides[1]
|
||||
copy.dst_y = y_offset
|
||||
copy.dst_z = z_offset
|
||||
|
||||
#Set pitch of destination
|
||||
#copy.dst_pitch = self.data.strides[0]
|
||||
copy.dst_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
#width = max(self.nx, cpu_data.shape[2])
|
||||
#height = max(self.ny, cpu_data.shape[1])
|
||||
#depth = max(self.nz, cpu-data.shape[0])
|
||||
#copy.width_in_bytes = width*cpu_data.itemsize
|
||||
#copy.height = height
|
||||
#copy.depth = depth
|
||||
|
||||
#copy from host to device
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
self.data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=cpu_data.shape)
|
||||
#hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
|
||||
copy = hip_check(hip.hipMemcpy(self.data,cpu_data,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice))
|
||||
width = max(self.nx, cpu_data.shape[2])
|
||||
height = max(self.ny, cpu_data.shape[1])
|
||||
depth = max(self.nz, cpu-data.shape[0])
|
||||
copy.width_in_bytes = width*cpu_data.itemsize
|
||||
copy.height = height
|
||||
copy.depth = depth
|
||||
|
||||
#Perform the copy
|
||||
copy(stream)
|
||||
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data.gpudata), self.nx, self.ny)
|
||||
"""
|
||||
|
||||
#copy from host to device
|
||||
#src
|
||||
host_array_pinned = hip_check(hip.hipHostMalloc(cpu_data.size * cpu_data.itemsize, hip.hipHostMallocDefault))
|
||||
src_ptr = hip_check(hip.hipHostGetDevicePointer(host_array_pinned,hip.hipHostMallocDefault))
|
||||
#src_ptr = hip.hipPitchedPtr()
|
||||
|
||||
#dst
|
||||
# Allocate 3D pitched memory on the device
|
||||
self.data = hip.hipPitchedPtr()
|
||||
c_extent = hip.hipExtent(nx_halo*np.float32().itemsize, ny_halo, nz_halo)
|
||||
#hip.hipMalloc3D(pitchedDevPtr-OUT, extent-IN)
|
||||
err, = hip.hipMalloc3D(self.data, c_extent)
|
||||
dst_pitch = nx_halo * np.float32().itemsize
|
||||
|
||||
#include offset: do we need make_hipPitchedPtr
|
||||
x_offset = (nx_halo - cpu_data.shape[2]) // 2
|
||||
y_offset = (ny_halo - cpu_data.shape[1]) // 2
|
||||
z_offset = (nz_halo - cpu_data.shape[0]) // 2
|
||||
|
||||
if err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(f"Error from hipMalloc3D: {hip.hipGetErrorString(err)}")
|
||||
|
||||
copy_upload = {
|
||||
'srcPos': hip.hipPos(0, 0, 0),
|
||||
'srcPtr': src_ptr,
|
||||
'dstPos': hip.hipPos(0, 0, 0),
|
||||
'dstPtr': self.data,
|
||||
'extent': c_extent,
|
||||
'kind': hip.hipMemcpyKind.hipMemcpyHostToDevice
|
||||
}
|
||||
|
||||
# Perform the copy
|
||||
copy = hip.hipMemcpy3DParms(**copy_upload)
|
||||
err = hip.hipMemcpy3DAsync(copy, stream)
|
||||
#copy = hip_check(hip.hipMemcpyAsync(self.data,cpu_data,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice,stream))
|
||||
|
||||
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data), self.nx, self.ny)
|
||||
|
||||
|
||||
def __del__(self, *args):
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data.gpudata), self.nx, self.ny)
|
||||
self.data.gpudata.free()
|
||||
#self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data), self.nx, self.ny)
|
||||
#self.data.gpudata.free()
|
||||
hip_check(hip.hipFree(self.data))
|
||||
#hip_check(hip.hipFreeAsync(self.data, self.stream))
|
||||
self.data = None
|
||||
|
||||
"""
|
||||
@@ -779,31 +850,35 @@ class CudaArray3D:
|
||||
cpu_data = np.empty((self.nz, self.ny, self.nx), dtype=np.float32)
|
||||
#cpu_data = self.memorypool.allocate((self.nz, self.ny, self.nx), dtype=np.float32)
|
||||
|
||||
#Cuda
|
||||
"""
|
||||
#Create copy object from device to host
|
||||
#copy = cuda.Memcpy2D()
|
||||
#copy.set_src_device(self.data.gpudata)
|
||||
#copy.set_dst_host(cpu_data)
|
||||
copy = cuda.Memcpy2D()
|
||||
copy.set_src_device(self.data.gpudata)
|
||||
copy.set_dst_host(cpu_data)
|
||||
|
||||
#Set offsets and pitch of source
|
||||
#copy.src_x_in_bytes = self.x_halo*self.data.strides[1]
|
||||
#copy.src_y = self.y_halo
|
||||
#copy.src_z = self.z_halo
|
||||
#copy.src_pitch = self.data.strides[0]
|
||||
copy.src_x_in_bytes = self.x_halo*self.data.strides[1]
|
||||
copy.src_y = self.y_halo
|
||||
copy.src_z = self.z_halo
|
||||
copy.src_pitch = self.data.strides[0]
|
||||
|
||||
#Set width in bytes to copy for each row and
|
||||
#number of rows to copy
|
||||
#copy.width_in_bytes = self.nx*cpu_data.itemsize
|
||||
#copy.height = self.ny
|
||||
#copy.depth = self.nz
|
||||
copy.width_in_bytes = self.nx*cpu_data.itemsize
|
||||
copy.height = self.ny
|
||||
copy.depth = self.nz
|
||||
|
||||
copy(stream)
|
||||
"""
|
||||
#copy from device to host
|
||||
num_bytes = cpu_data.size * cpu_data.itemsize
|
||||
#hip.hipMemcpy(dst, src, unsigned long sizeBytes, kind)
|
||||
copy = hip_check(hip.hipMemcpy(cpu_data,self.data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
|
||||
copy = hip_check(hip.hipMemcpyAsync(cpu_data,self.data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
|
||||
copy(stream)
|
||||
if asynch==False:
|
||||
stream.synchronize()
|
||||
#stream.synchronize()
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
return cpu_data
|
||||
|
||||
@@ -818,9 +893,11 @@ class ArakawaA2D:
|
||||
"""
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.gpu_variables = []
|
||||
|
||||
for cpu_variable in cpu_variables:
|
||||
self.gpu_variables += [CudaArray2D(stream, nx, ny, halo_x, halo_y, cpu_variable)]
|
||||
|
||||
|
||||
def __getitem__(self, key):
|
||||
assert type(key) == int, "Indexing is int based"
|
||||
if (key > len(self.gpu_variables) or key < 0):
|
||||
@@ -839,15 +916,17 @@ class ArakawaA2D:
|
||||
assert i < len(self.gpu_variables), "Variable {:d} is out of range".format(i)
|
||||
cpu_variables += [self.gpu_variables[i].download(stream, asynch=True)]
|
||||
|
||||
#print("--FIN: sum:", np.array(cpu_variables).sum())
|
||||
|
||||
#stream.synchronize()
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
return cpu_variables
|
||||
|
||||
#hipblas
|
||||
def sum_hipblas(self, num_elements, data):
|
||||
num_bytes_r = np.dtype(np.float32).itemsize
|
||||
result_d = hip_check(hip.hipMalloc(num_bytes_r))
|
||||
result_h = np.zeros(1, dtype=np.float32)
|
||||
print("--bytes:", num_bytes_r)
|
||||
result_h0 = np.zeros(1, dtype=np.float32)
|
||||
|
||||
# call hipblasSaxpy + initialization
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
@@ -859,10 +938,12 @@ class ArakawaA2D:
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in result_d) back to host (store in result_h)
|
||||
hip_check(hip.hipMemcpy(result_h,result_d,num_bytes_r,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
|
||||
hip_check(hip.hipMemcpy(result_h0,result_d,num_bytes_r,hip.hipMemcpyKind.hipMemcpyDeviceToHost))
|
||||
|
||||
result_h = result_h0[0]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipFree(data))
|
||||
#hip_check(hip.hipFree(data))
|
||||
return result_h
|
||||
|
||||
def check(self):
|
||||
@@ -872,8 +953,8 @@ class ArakawaA2D:
|
||||
for i, gpu_variable in enumerate(self.gpu_variables):
|
||||
#compute sum with hipblas
|
||||
#var_sum = pycuda.gpuarray.sum(gpu_variable.data).get()
|
||||
var_sum = self.sum_hipblas(gpu_variable.ny,gpu_variable.data)
|
||||
var_sum = self.sum_hipblas(gpu_variable.data.size,gpu_variable.data)
|
||||
#print(f"GPU: Sum for column {i}: {var_sum}")
|
||||
|
||||
self.logger.debug("Data %d with size [%d x %d] has average %f", i, gpu_variable.nx, gpu_variable.ny, var_sum / (gpu_variable.nx * gpu_variable.ny))
|
||||
assert np.isnan(var_sum) == False, "Data contains NaN values!"
|
||||
|
||||
|
||||
@@ -86,7 +86,8 @@ class CudaContext(object):
|
||||
if device is None:
|
||||
device = 0
|
||||
|
||||
hip_check(hip.hipSetDevice(device))
|
||||
num_gpus = hip_check(hip.hipGetDeviceCount())
|
||||
hip.hipSetDevice(device)
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,device))
|
||||
arch = props.gcnArchName
|
||||
@@ -97,9 +98,12 @@ class CudaContext(object):
|
||||
# Allocate memory to store the PCI BusID
|
||||
pciBusId = ctypes.create_string_buffer(64)
|
||||
# PCI Bus Id
|
||||
hip_check(hip.hipDeviceGetPCIBusId(pciBusId, 64, device))
|
||||
#hip_check(hip.hipDeviceGetPCIBusId(pciBusId, 64, device))
|
||||
pciBusId = hip_check(hip.hipDeviceGetPCIBusId(64, device))
|
||||
|
||||
self.logger.info("Using device %d/%d with --arch: '%s', --BusID: %s ", device, hip_check(hip.hipGetDeviceCount()),arch,pciBusId.value.decode('utf-8')[5:7])
|
||||
|
||||
#self.logger.info("Using device %d/%d with --arch: '%s', --BusID: %s ", device, num_gpus,arch,pciBusId.value.decode('utf-8')[5:7])
|
||||
self.logger.info("Using device %d/%d with --arch: '%s', --BusID: %s ", device, num_gpus,arch,pciBusId[5:7])
|
||||
#self.logger.debug(" => compute capability: %s", str(self.cuda_device.compute_capability()))
|
||||
self.logger.debug(" => compute capability: %s", hip_check(hip.hipDeviceComputeCapability(device)))
|
||||
|
||||
@@ -116,6 +120,7 @@ class CudaContext(object):
|
||||
self.logger.debug(" => Total memory: %d MB available", int(total/(1024*1024)))
|
||||
|
||||
##self.logger.info("Created context handle <%s>", str(self.cuda_context.handle))
|
||||
self.logger.info("Created context handle <%s>", str(self.cuda_context))
|
||||
|
||||
#Create cache dir for cubin files
|
||||
self.cache_path = os.path.join(self.module_path, "cuda_cache")
|
||||
@@ -125,42 +130,51 @@ class CudaContext(object):
|
||||
self.logger.info("Using CUDA cache dir %s", self.cache_path)
|
||||
|
||||
self.autotuner = None
|
||||
"""
|
||||
if (autotuning):
|
||||
self.logger.info("Autotuning enabled. It may take several minutes to run the code the first time: have patience")
|
||||
self.autotuner = Autotuner.Autotuner()
|
||||
|
||||
"""
|
||||
|
||||
def __del__(self, *args):
|
||||
self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
#self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context.handle))
|
||||
#self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context))
|
||||
"""
|
||||
# Loop over all contexts in stack, and remove "this"
|
||||
other_contexts = []
|
||||
#while (cuda.Context.get_current() != None):
|
||||
while (hip.hipCtxGetCurrent() != None):
|
||||
#context = cuda.Context.get_current()
|
||||
context = hip_check(hip.hipCtxGetCurrent())
|
||||
if (context.handle != self.cuda_context.handle):
|
||||
self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
#if (context.handle != self.cuda_context.handle):
|
||||
if (context != self.cuda_context):
|
||||
#self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
#self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context), str(context))
|
||||
other_contexts = [context] + other_contexts
|
||||
#cuda.Context.pop()
|
||||
hip.hipCtxPopCurrent()
|
||||
else:
|
||||
self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
#self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context), str(context))
|
||||
#cuda.Context.pop()
|
||||
hip.hipCtxPopCurrent()
|
||||
|
||||
# Add all the contexts we popped that were not our own
|
||||
for context in other_contexts:
|
||||
self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context.handle), str(context.handle))
|
||||
#self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context.handle), str(context.handle))
|
||||
self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context), str(context))
|
||||
#cuda.Context.push(context)
|
||||
hip_check(hip.hipCtxPushCurrent(context))
|
||||
|
||||
self.logger.debug("<%s> Detaching", str(self.cuda_context.handle))
|
||||
self.cuda_context.detach()
|
||||
|
||||
#self.logger.debug("<%s> Detaching", str(self.cuda_context.handle))
|
||||
self.logger.debug("<%s> Detaching", str(self.cuda_context))
|
||||
#self.cuda_context.detach()
|
||||
hip_check(hip.hipCtxDestroy(self.cuda_context))
|
||||
"""
|
||||
|
||||
def __str__(self):
|
||||
return "CudaContext id " + str(self.cuda_context.handle)
|
||||
#return "CudaContext id " + str(self.cuda_context.handle)
|
||||
return "CudaContext id " + str(self.cuda_context)
|
||||
|
||||
|
||||
def hash_kernel(kernel_filename, include_dirs):
|
||||
@@ -283,33 +297,15 @@ class CudaContext(object):
|
||||
with io.open(cached_kernel_filename + ".txt", "w") as file:
|
||||
file.write(kernel_string)
|
||||
|
||||
|
||||
"""cuda
|
||||
with Common.Timer("compiler") as timer:
|
||||
|
||||
import warnings
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore", message="The CUDA compiler succeeded, but said the following:\nkernel.cu", category=UserWarning)
|
||||
|
||||
cubin = cuda_compiler.compile(kernel_string, include_dirs=include_dirs, cache_dir=False, **compile_args)
|
||||
#module = cuda.module_from_buffer(cubin, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
|
||||
#cubin = hip_check(hiprtc.hiprtcCreateProgram(kernel_string.encode(), b"Kernel-Name", 0, [], []))
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(cubin, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(cubin))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(cubin, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(cubin))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(cubin, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
module = cuda.module_from_buffer(cubin, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
|
||||
if (self.use_cache):
|
||||
with io.open(cached_kernel_filename, "wb") as file:
|
||||
@@ -317,6 +313,7 @@ class CudaContext(object):
|
||||
|
||||
self.modules[kernel_hash] = module
|
||||
return module
|
||||
"""
|
||||
|
||||
"""
|
||||
Clears the kernel cache (useful for debugging & development)
|
||||
@@ -330,4 +327,5 @@ class CudaContext(object):
|
||||
Synchronizes all streams etc
|
||||
"""
|
||||
def synchronize(self):
|
||||
self.cuda_context.synchronize()
|
||||
#self.cuda_context.synchronize()
|
||||
hip_check(hip.hipCtxSynchronize())
|
||||
|
||||
@@ -1,272 +0,0 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements Cuda context handling
|
||||
|
||||
Copyright (C) 2018 SINTEF ICT
|
||||
|
||||
This program is free software: you can redistribute it and/or modify
|
||||
it under the terms of the GNU General Public License as published by
|
||||
the Free Software Foundation, either version 3 of the License, or
|
||||
(at your option) any later version.
|
||||
|
||||
This program is distributed in the hope that it will be useful,
|
||||
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
GNU General Public License for more details.
|
||||
|
||||
You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
|
||||
|
||||
import os
|
||||
|
||||
import numpy as np
|
||||
import time
|
||||
import re
|
||||
import io
|
||||
import hashlib
|
||||
import logging
|
||||
import gc
|
||||
|
||||
import pycuda.compiler as cuda_compiler
|
||||
import pycuda.gpuarray
|
||||
import pycuda.driver as cuda
|
||||
|
||||
from GPUSimulators import Autotuner, Common
|
||||
|
||||
|
||||
|
||||
"""
|
||||
Class which keeps track of the CUDA context and some helper functions
|
||||
"""
|
||||
class CudaContext(object):
|
||||
|
||||
def __init__(self, device=None, context_flags=None, use_cache=True, autotuning=True):
|
||||
"""
|
||||
Create a new CUDA context
|
||||
Set device to an id or pci_bus_id to select a specific GPU
|
||||
Set context_flags to cuda.ctx_flags.SCHED_BLOCKING_SYNC for a blocking context
|
||||
"""
|
||||
self.use_cache = use_cache
|
||||
self.logger = logging.getLogger(__name__)
|
||||
self.modules = {}
|
||||
|
||||
self.module_path = os.path.dirname(os.path.realpath(__file__))
|
||||
|
||||
#Initialize cuda (must be first call to PyCUDA)
|
||||
cuda.init(flags=0)
|
||||
|
||||
self.logger.info("PyCUDA version %s", str(pycuda.VERSION_TEXT))
|
||||
|
||||
#Print some info about CUDA
|
||||
self.logger.info("CUDA version %s", str(cuda.get_version()))
|
||||
self.logger.info("Driver version %s", str(cuda.get_driver_version()))
|
||||
|
||||
if device is None:
|
||||
device = 0
|
||||
|
||||
self.cuda_device = cuda.Device(device)
|
||||
self.logger.info("Using device %d/%d '%s' (%s) GPU", device, cuda.Device.count(), self.cuda_device.name(), self.cuda_device.pci_bus_id())
|
||||
self.logger.debug(" => compute capability: %s", str(self.cuda_device.compute_capability()))
|
||||
|
||||
# Create the CUDA context
|
||||
if context_flags is None:
|
||||
context_flags=cuda.ctx_flags.SCHED_AUTO
|
||||
|
||||
self.cuda_context = self.cuda_device.make_context(flags=context_flags)
|
||||
|
||||
free, total = cuda.mem_get_info()
|
||||
self.logger.debug(" => memory: %d / %d MB available", int(free/(1024*1024)), int(total/(1024*1024)))
|
||||
|
||||
self.logger.info("Created context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
#Create cache dir for cubin files
|
||||
self.cache_path = os.path.join(self.module_path, "cuda_cache")
|
||||
if (self.use_cache):
|
||||
if not os.path.isdir(self.cache_path):
|
||||
os.mkdir(self.cache_path)
|
||||
self.logger.info("Using CUDA cache dir %s", self.cache_path)
|
||||
|
||||
self.autotuner = None
|
||||
if (autotuning):
|
||||
self.logger.info("Autotuning enabled. It may take several minutes to run the code the first time: have patience")
|
||||
self.autotuner = Autotuner.Autotuner()
|
||||
|
||||
|
||||
def __del__(self, *args):
|
||||
self.logger.info("Cleaning up CUDA context handle <%s>", str(self.cuda_context.handle))
|
||||
|
||||
# Loop over all contexts in stack, and remove "this"
|
||||
other_contexts = []
|
||||
while (cuda.Context.get_current() != None):
|
||||
context = cuda.Context.get_current()
|
||||
if (context.handle != self.cuda_context.handle):
|
||||
self.logger.debug("<%s> Popping <%s> (*not* ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
other_contexts = [context] + other_contexts
|
||||
cuda.Context.pop()
|
||||
else:
|
||||
self.logger.debug("<%s> Popping <%s> (ours)", str(self.cuda_context.handle), str(context.handle))
|
||||
cuda.Context.pop()
|
||||
|
||||
# Add all the contexts we popped that were not our own
|
||||
for context in other_contexts:
|
||||
self.logger.debug("<%s> Pushing <%s>", str(self.cuda_context.handle), str(context.handle))
|
||||
cuda.Context.push(context)
|
||||
|
||||
self.logger.debug("<%s> Detaching", str(self.cuda_context.handle))
|
||||
self.cuda_context.detach()
|
||||
|
||||
|
||||
def __str__(self):
|
||||
return "CudaContext id " + str(self.cuda_context.handle)
|
||||
|
||||
|
||||
def hash_kernel(kernel_filename, include_dirs):
|
||||
# Generate a kernel ID for our caches
|
||||
num_includes = 0
|
||||
max_includes = 100
|
||||
kernel_hasher = hashlib.md5()
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Loop over file and includes, and check if something has changed
|
||||
files = [kernel_filename]
|
||||
while len(files):
|
||||
|
||||
if (num_includes > max_includes):
|
||||
raise("Maximum number of includes reached - circular include in {:}?".format(kernel_filename))
|
||||
|
||||
filename = files.pop()
|
||||
|
||||
#logger.debug("Hashing %s", filename)
|
||||
|
||||
modified = os.path.getmtime(filename)
|
||||
|
||||
# Open the file
|
||||
with io.open(filename, "r") as file:
|
||||
|
||||
# Search for #inclue <something> and also hash the file
|
||||
file_str = file.read()
|
||||
kernel_hasher.update(file_str.encode('utf-8'))
|
||||
kernel_hasher.update(str(modified).encode('utf-8'))
|
||||
|
||||
#Find all includes
|
||||
includes = re.findall('^\W*#include\W+(.+?)\W*$', file_str, re.M)
|
||||
|
||||
# Loop over everything that looks like an include
|
||||
for include_file in includes:
|
||||
|
||||
#Search through include directories for the file
|
||||
file_path = os.path.dirname(filename)
|
||||
for include_path in [file_path] + include_dirs:
|
||||
|
||||
# If we find it, add it to list of files to check
|
||||
temp_path = os.path.join(include_path, include_file)
|
||||
if (os.path.isfile(temp_path)):
|
||||
files = files + [temp_path]
|
||||
num_includes = num_includes + 1 #For circular includes...
|
||||
break
|
||||
|
||||
return kernel_hasher.hexdigest()
|
||||
|
||||
|
||||
"""
|
||||
Reads a text file and creates an OpenCL kernel from that
|
||||
"""
|
||||
def get_module(self, kernel_filename,
|
||||
include_dirs=[], \
|
||||
defines={}, \
|
||||
compile_args={'no_extern_c', True}, jit_compile_args={}):
|
||||
"""
|
||||
Helper function to print compilation output
|
||||
"""
|
||||
def cuda_compile_message_handler(compile_success_bool, info_str, error_str):
|
||||
self.logger.debug("Compilation returned %s", str(compile_success_bool))
|
||||
if info_str:
|
||||
self.logger.debug("Info: %s", info_str)
|
||||
if error_str:
|
||||
self.logger.debug("Error: %s", error_str)
|
||||
|
||||
kernel_filename = os.path.normpath(kernel_filename)
|
||||
kernel_path = os.path.abspath(os.path.join(self.module_path, kernel_filename))
|
||||
#self.logger.debug("Getting %s", kernel_filename)
|
||||
|
||||
# Create a hash of the kernel options
|
||||
options_hasher = hashlib.md5()
|
||||
options_hasher.update(str(defines).encode('utf-8') + str(compile_args).encode('utf-8'));
|
||||
options_hash = options_hasher.hexdigest()
|
||||
|
||||
# Create hash of kernel souce
|
||||
source_hash = CudaContext.hash_kernel( \
|
||||
kernel_path, \
|
||||
include_dirs=[self.module_path] + include_dirs)
|
||||
|
||||
# Create final hash
|
||||
root, ext = os.path.splitext(kernel_filename)
|
||||
kernel_hash = root \
|
||||
+ "_" + source_hash \
|
||||
+ "_" + options_hash \
|
||||
+ ext
|
||||
cached_kernel_filename = os.path.join(self.cache_path, kernel_hash)
|
||||
|
||||
# If we have the kernel in our hashmap, return it
|
||||
if (kernel_hash in self.modules.keys()):
|
||||
self.logger.debug("Found kernel %s cached in hashmap (%s)", kernel_filename, kernel_hash)
|
||||
return self.modules[kernel_hash]
|
||||
|
||||
# If we have it on disk, return it
|
||||
elif (self.use_cache and os.path.isfile(cached_kernel_filename)):
|
||||
self.logger.debug("Found kernel %s cached on disk (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
with io.open(cached_kernel_filename, "rb") as file:
|
||||
file_str = file.read()
|
||||
module = cuda.module_from_buffer(file_str, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
|
||||
self.modules[kernel_hash] = module
|
||||
return module
|
||||
|
||||
# Otherwise, compile it from source
|
||||
else:
|
||||
self.logger.debug("Compiling %s (%s)", kernel_filename, kernel_hash)
|
||||
|
||||
#Create kernel string
|
||||
kernel_string = ""
|
||||
for key, value in defines.items():
|
||||
kernel_string += "#define {:s} {:s}\n".format(str(key), str(value))
|
||||
kernel_string += '#include "{:s}"'.format(os.path.join(self.module_path, kernel_filename))
|
||||
if (self.use_cache):
|
||||
cached_kernel_dir = os.path.dirname(cached_kernel_filename)
|
||||
if not os.path.isdir(cached_kernel_dir):
|
||||
os.mkdir(cached_kernel_dir)
|
||||
with io.open(cached_kernel_filename + ".txt", "w") as file:
|
||||
file.write(kernel_string)
|
||||
|
||||
|
||||
with Common.Timer("compiler") as timer:
|
||||
import warnings
|
||||
with warnings.catch_warnings():
|
||||
warnings.filterwarnings("ignore", message="The CUDA compiler succeeded, but said the following:\nkernel.cu", category=UserWarning)
|
||||
cubin = cuda_compiler.compile(kernel_string, include_dirs=include_dirs, cache_dir=False, **compile_args)
|
||||
module = cuda.module_from_buffer(cubin, message_handler=cuda_compile_message_handler, **jit_compile_args)
|
||||
if (self.use_cache):
|
||||
with io.open(cached_kernel_filename, "wb") as file:
|
||||
file.write(cubin)
|
||||
|
||||
self.modules[kernel_hash] = module
|
||||
return module
|
||||
|
||||
"""
|
||||
Clears the kernel cache (useful for debugging & development)
|
||||
"""
|
||||
def clear_kernel_cache(self):
|
||||
self.logger.debug("Clearing cache")
|
||||
self.modules = {}
|
||||
gc.collect()
|
||||
|
||||
"""
|
||||
Synchronizes all streams etc
|
||||
"""
|
||||
def synchronize(self):
|
||||
self.cuda_context.synchronize()
|
||||
@@ -19,6 +19,9 @@ You should have received a copy of the GNU General Public License
|
||||
along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
"""
|
||||
|
||||
import os
|
||||
import sys
|
||||
|
||||
#Import packages we need
|
||||
from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
@@ -27,13 +30,21 @@ import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
from hip import hipblas
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Forward-Backward linear scheme
|
||||
@@ -56,20 +67,6 @@ class EE2D_KP07_dimsplit (BaseSimulator):
|
||||
p: pressure
|
||||
"""
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
rho, rho_u, rho_v, E,
|
||||
@@ -94,133 +91,210 @@ class EE2D_KP07_dimsplit (BaseSimulator):
|
||||
self.gamma = np.float32(gamma)
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
#Get kernels
|
||||
#module = context.get_module("cuda/EE2D_KP07_dimsplit.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
#self.kernel = module.get_function("KP07DimsplitKernel")
|
||||
#self.kernel.prepare("iiffffffiiPiPiPiPiPiPiPiPiPiiii")
|
||||
#
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'EE2D_KP07_dimsplit.cu.hip'))
|
||||
|
||||
#Get cuda kernels
|
||||
""" Cuda
|
||||
module = context.get_module("cuda/EE2D_KP07_dimsplit.cu.hip",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
#compile and load to the device
|
||||
self.kernel = module.get_function("KP07DimsplitKernel")
|
||||
self.kernel.prepare("iiffffffiiPiPiPiPiPiPiPiPiPiiii")
|
||||
"""
|
||||
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# Specify the relative path to the "cuda" directory
|
||||
cuda_dir = os.path.join(current_dir, 'cuda')
|
||||
|
||||
|
||||
#source code
|
||||
kernel_file_path = os.path.abspath(os.path.join(cuda_dir, 'EE2D_KP07_dimsplit.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
#headers
|
||||
#common.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'common.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_common = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07DimsplitKernel", 0, [], []))
|
||||
#EulerCommon.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'EulerCommon.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_EulerCommon = file.read()
|
||||
|
||||
#limiters.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'limiters.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_limiters = file.read()
|
||||
|
||||
#hip.hiprtc.hiprtcCreateProgram(const char *src, const char *name, int numHeaders, headers, includeNames)
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07DimsplitKernel", 3, [header_common.encode(),header_EulerCommon.encode(),header_limiters.encode()], [b"common.h",b"EulerCommon.h",b"limiters.h"]))
|
||||
|
||||
# Check if the program is created successfully
|
||||
if prog is not None:
|
||||
print("--This is <EE2D_KP07_dimsplit.cu.hip>")
|
||||
print("--HIPRTC program created successfully")
|
||||
print()
|
||||
else:
|
||||
print("--Failed to create HIPRTC program")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
#extract the arch of the device
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
hip_check(hip.hipGetDeviceProperties(props,0)) #only one device 0
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
cflags = [b"--offload-arch="+arch, b"-O2", b"-D BLOCK_WIDTH="+ str(self.block_size[0]).encode(), b"-D BLOCK_HEIGHT=" + str(self.block_size[1]).encode()]
|
||||
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
# Check if the program is compiled successfully
|
||||
if err is not None:
|
||||
print("--Compilation:", err)
|
||||
print("--The program is compiled successfully")
|
||||
else:
|
||||
print("--Compilation:", err)
|
||||
print("--Failed to compile the program")
|
||||
print("--I stop:", err)
|
||||
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"KP07DimsplitKernel"))
|
||||
#Load the code as a module
|
||||
self.module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
#Get the device kernel named "KP07DimsplitKernel"
|
||||
self.kernel = hip_check(hip.hipModuleGetFunction(self.module, b"KP07DimsplitKernel"))
|
||||
|
||||
print()
|
||||
print("--Get the device kernel *KP07DimsplitKernel* is created successfully--")
|
||||
print("--kernel", self.kernel)
|
||||
print()
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[rho, rho_u, rho_v, E])
|
||||
|
||||
self.u1 = Common.ArakawaA2D(self.stream,
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None, None])
|
||||
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
# init device array cfl_data
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(rho_u/rho) + np.sqrt(gamma*rho)))
|
||||
dt_y = np.min(self.dy / (np.abs(rho_v/rho) + np.sqrt(gamma*rho)))
|
||||
self.dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
#in HIP, the "DeviceArray" object doesn't have a 'fill' attribute
|
||||
#self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
grid_dim_x, grid_dim_y, grid_dim_z = self.grid_size
|
||||
|
||||
data_h = np.zeros((grid_dim_x, grid_dim_y), dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
data_h.fill(self.dt)
|
||||
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(grid_dim_x, grid_dim_y))
|
||||
|
||||
hip_check(hip.hipMemcpyAsync(self.cfl_data,data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice,self.stream))
|
||||
#sets the memory region pointed to by x_d to zero asynchronously
|
||||
#initiates the memset operation asynchronously
|
||||
#hip_check(hip.hipMemsetAsync(self.cfl_data,0,num_bytes,self.stream))
|
||||
|
||||
|
||||
def substep(self, dt, step_number, external=True, internal=True):
|
||||
self.substepDimsplit(0.5*dt, step_number, external, internal)
|
||||
|
||||
def substepDimsplit(self, dt, substep, external, internal):
|
||||
|
||||
u00_strides0 = self.u0[0].data.shape[0]*np.float32().itemsize
|
||||
u01_strides0 = self.u0[1].data.shape[0]*np.float32().itemsize
|
||||
u02_strides0 = self.u0[2].data.shape[0]*np.float32().itemsize
|
||||
u03_strides0 = self.u0[3].data.shape[0]*np.float32().itemsize
|
||||
|
||||
u10_strides0 = self.u1[0].data.shape[0]*np.float32().itemsize
|
||||
u11_strides0 = self.u1[1].data.shape[0]*np.float32().itemsize
|
||||
u12_strides0 = self.u1[2].data.shape[0]*np.float32().itemsize
|
||||
u13_strides0 = self.u1[3].data.shape[0]*np.float32().itemsize
|
||||
|
||||
if external and internal:
|
||||
|
||||
#print("COMPLETE DOMAIN (dt=" + str(dt) + ")")
|
||||
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# 0, 0,
|
||||
# self.nx, self.ny)
|
||||
""" Cuda
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.gamma,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
self.cfl_data.gpudata,
|
||||
0, 0,
|
||||
self.nx, self.ny)
|
||||
"""
|
||||
|
||||
#launch kernel
|
||||
#hip.hipModuleLaunchKernel(f, unsigned int gridDimX, unsigned int gridDimY, unsigned int gridDimZ, unsigned int blockDimX, unsigned int blockDimY, unsigned int blockDimZ, unsigned int sharedMemBytes, stream, kernelParams, extra)
|
||||
|
||||
#The argument grid/block requires 3 components x,y and z. in 2D z=1.
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*self.grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u0[3].data, ctypes.c_int(u03_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.u1[3].data, ctypes.c_int(u13_strides0),
|
||||
self.cfl_data,
|
||||
0, 0,
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny)
|
||||
ctypes.c_int(0), ctypes.c_int(0),
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--External & Internal: Launching Kernel is ok")
|
||||
#print("--External & Internal: Launching Kernel is ok")
|
||||
|
||||
return
|
||||
|
||||
@@ -229,236 +303,242 @@ class EE2D_KP07_dimsplit (BaseSimulator):
|
||||
# XXX: Corners are treated twice! #
|
||||
###################################
|
||||
|
||||
ns_grid_size = (self.grid_size[0], 1)
|
||||
|
||||
ns_grid_size = (self.grid_size[0], 1, 1)
|
||||
# NORTH
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (0, ny-y_halo) x (nx, ny)
|
||||
# self.kernel.prepared_async_call(ns_grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# 0, self.ny - int(self.u0[0].y_halo),
|
||||
# self.nx, self.ny)
|
||||
""" Cuda
|
||||
self.kernel.prepared_async_call(ns_grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.gamma,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
self.cfl_data.gpudata,
|
||||
0, self.ny - int(self.u0[0].y_halo),
|
||||
self.nx, self.ny)
|
||||
"""
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*ns_grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*ns_grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u0[3].data, ctypes.c_int(u03_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.u1[3].data, ctypes.c_int(u13_strides0),
|
||||
self.cfl_data,
|
||||
0, ctypes.c_int(self.ny) - ctypes.c_int(self.u0[0].y_halo),
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny)
|
||||
ctypes.c_int(0), ctypes.c_int(self.ny - self.u0[0].y_halo),
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipStreamSynchronize(self.stream))
|
||||
|
||||
#print()
|
||||
#print("--I m at the NORTH:")
|
||||
#print()
|
||||
# SOUTH
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (0, 0) x (nx, y_halo)
|
||||
# self.kernel.prepared_async_call(ns_grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# 0, 0,
|
||||
# self.nx, int(self.u0[0].y_halo))
|
||||
""" Cuda
|
||||
self.kernel.prepared_async_call(ns_grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.gamma,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
self.cfl_data.gpudata,
|
||||
0, 0,
|
||||
self.nx, int(self.u0[0].y_halo))
|
||||
"""
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*ns_grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*ns_grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u0[3].data, ctypes.c_int(u03_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.u1[3].data, ctypes.c_int(u13_strides0),
|
||||
self.cfl_data,
|
||||
0, 0,
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.u0[0].y_halo)
|
||||
ctypes.c_int(0), ctypes.c_int(0),
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.u0[0].y_halo),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipStreamSynchronize(self.stream))
|
||||
|
||||
we_grid_size = (1, self.grid_size[1])
|
||||
|
||||
we_grid_size = (1, self.grid_size[1], 1)
|
||||
# WEST
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (0, 0) x (x_halo, ny)
|
||||
# self.kernel.prepared_async_call(we_grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# 0, 0,
|
||||
# int(self.u0[0].x_halo), self.ny)
|
||||
""" Cuda
|
||||
self.kernel.prepared_async_call(we_grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.gamma,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
self.cfl_data.gpudata,
|
||||
0, 0,
|
||||
int(self.u0[0].x_halo), self.ny)
|
||||
"""
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*we_grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*we_grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u0[3].data, ctypes.c_int(u03_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.u1[3].data, ctypes.c_int(u13_strides0),
|
||||
self.cfl_data,
|
||||
0, 0,
|
||||
ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.ny)
|
||||
ctypes.c_int(0), ctypes.c_int(0),
|
||||
ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.ny),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipStreamSynchronize(self.stream))
|
||||
|
||||
# EAST
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (nx-x_halo, 0) x (nx, ny)
|
||||
# self.kernel.prepared_async_call(we_grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.gamma,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
# self.cfl_data.gpudata,
|
||||
# self.nx - int(self.u0[0].x_halo), 0,
|
||||
# self.nx, self.ny)
|
||||
""" Cuda
|
||||
self.kernel.prepared_async_call(we_grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.gamma,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u0[3].data.gpudata, self.u0[3].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.u1[3].data.gpudata, self.u1[3].data.strides[0],
|
||||
self.cfl_data.gpudata,
|
||||
self.nx - int(self.u0[0].x_halo), 0,
|
||||
self.nx, self.ny)
|
||||
"""
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*we_grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*we_grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u0[3].data, ctypes.c_int(u03_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.u1[3].data, ctypes.c_int(u13_strides0),
|
||||
self.cfl_data,
|
||||
ctypes.c_int(self.nx) - ctypes.c_int(self.u0[0].x_halo), 0,
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny)
|
||||
ctypes.c_int(self.nx - self.u0[0].x_halo), ctypes.c_int(0),
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--External and not Internal: Launching Kernel is ok")
|
||||
|
||||
# print("--External and not Internal: Launching Kernel is ok")
|
||||
return
|
||||
|
||||
if internal and not external:
|
||||
@@ -466,6 +546,7 @@ class EE2D_KP07_dimsplit (BaseSimulator):
|
||||
# INTERNAL DOMAIN
|
||||
# (x0, y0) x (x1, y1)
|
||||
# (x_halo, y_halo) x (nx - x_halo, ny - y_halo)
|
||||
"""
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.internal_stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
@@ -485,45 +566,40 @@ class EE2D_KP07_dimsplit (BaseSimulator):
|
||||
self.cfl_data.gpudata,
|
||||
int(self.u0[0].x_halo), int(self.u0[0].y_halo),
|
||||
self.nx - int(self.u0[0].x_halo), self.ny - int(self.u0[0].y_halo))
|
||||
|
||||
"""
|
||||
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*self.grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.internal_stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.gamma),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u0[3].data), ctypes.c_float(self.u0[3].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[3].data), ctypes.c_float(self.u1[3].data.strides[0]),
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u0[3].data, ctypes.c_int(u03_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.u1[3].data, ctypes.c_int(u13_strides0),
|
||||
self.cfl_data,
|
||||
ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.u0[0].y_halo),
|
||||
ctypes.c_int(self.nx) - ctypes.c_int(self.u0[0].x_halo), ctypes.c_int(self.ny) - ctypes.c_int(self.u0[0].y_halo)
|
||||
ctypes.c_int(self.nx - self.u0[0].x_halo), ctypes.c_int(self.ny - self.u0[0].y_halo),
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Internal and not External: Launching Kernel is ok")
|
||||
# print("--Internal and not External: Launching Kernel is ok")
|
||||
return
|
||||
|
||||
def swapBuffers(self):
|
||||
|
||||
@@ -25,16 +25,24 @@ from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
from hip import hipblas
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
"""
|
||||
Class that solves the SW equations
|
||||
@@ -53,19 +61,6 @@ class FORCE (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
@@ -87,25 +82,55 @@ class FORCE (Simulator.BaseSimulator):
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_FORCE.cu.hip",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("FORCEKernel")
|
||||
# self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
#Get cuda kernels
|
||||
"""
|
||||
module = context.get_module("cuda/SWE2D_FORCE.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("FORCEKernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
"""
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_FORCE.cu'))
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# Specify the relative path to the "cuda" directory
|
||||
cuda_dir = os.path.join(current_dir, 'cuda')
|
||||
|
||||
#kernel source
|
||||
kernel_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWE2D_FORCE.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"FORCEKernel", 0, [], []))
|
||||
#headers
|
||||
#common.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'common.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_common = file.read()
|
||||
|
||||
#SWECommon.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWECommon.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_EulerCommon = file.read()
|
||||
|
||||
#hip.hiprtc.hiprtcCreateProgram(const char *src, const char *name, int numHeaders, headers, includeNames)
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"FORCEKernel", 2, [header_common.encode(),header_SWECommon.encode()], [b"common.h", b"SWECommon.h"]))
|
||||
|
||||
# Check if the program is created successfully
|
||||
if prog is not None:
|
||||
print("--This is <SWE2D_FORCE.cu.hip>")
|
||||
print("--HIPRTC program created successfully")
|
||||
print()
|
||||
else:
|
||||
print("--Failed to create HIPRTC program")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
@@ -113,20 +138,38 @@ class FORCE (Simulator.BaseSimulator):
|
||||
|
||||
print(f"Compiling kernel .FORCEKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
cflags = [b"--offload-arch="+arch, b"-O2", b"-D BLOCK_WIDTH="+ str(self.block_size[0]).encode(), b"-D BLOCK_HEIGHT=" + str(self.block_size[1]).encode()]
|
||||
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
# Check if the program is compiled successfully
|
||||
if err is not None:
|
||||
print("--Compilation:", err)
|
||||
print("--The program is compiled successfully")
|
||||
else:
|
||||
print("--Compilation:", err)
|
||||
print("--Failed to compile the program")
|
||||
print("--I stop:", err)
|
||||
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"FORCEKernel"))
|
||||
#Load the code as a module
|
||||
self.module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
#Get the device kernel named named "FORCEKernel"
|
||||
self.kernel = hip_check(hip.hipModuleGetFunction(self.module, b"FORCEKernel"))
|
||||
|
||||
print()
|
||||
print("--Get the device kernel *FORCEKernel* is created successfully--")
|
||||
print("--kernel", self.kernel)
|
||||
print()
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
@@ -138,65 +181,79 @@ class FORCE (Simulator.BaseSimulator):
|
||||
1, 1,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
#in HIP, the "DeviceArray" object doesn't have a 'fill' attribute
|
||||
#self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
grid_dim_x, grid_dim_y, grid_dim_z = self.grid_size
|
||||
|
||||
data_h = np.zeros((grid_dim_x, grid_dim_y), dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
data_h.fill(self.dt)
|
||||
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(grid_dim_x, grid_dim_y))
|
||||
|
||||
hip_check(hip.hipMemcpyAsync(self.cfl_data,data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice,self.stream))
|
||||
#sets the memory region pointed to by x_d to zero asynchronously
|
||||
#initiates the memset operation asynchronously
|
||||
#hip_check(hip.hipMemsetAsync(self.cfl_data,0,num_bytes,self.stream))
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
# self.u0, self.u1 = self.u1, self.u0
|
||||
#Cuda
|
||||
"""
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
"""
|
||||
u00_strides0 = self.u0[0].data.shape[0]*np.float32().itemsize
|
||||
u01_strides0 = self.u0[1].data.shape[0]*np.float32().itemsize
|
||||
u02_strides0 = self.u0[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
u10_strides0 = self.u1[0].data.shape[0]*np.float32().itemsize
|
||||
u11_strides0 = self.u1[1].data.shape[0]*np.float32().itemsize
|
||||
u12_strides0 = self.u1[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*self.grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.cfl_data,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .FORCEKernel. is ok")
|
||||
#print("--Launching Kernel .FORCEKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the HLL flux
|
||||
This python module implements the FORCE flux
|
||||
for the shallow water equations
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
@@ -27,10 +28,21 @@ import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
from hip import hipblas
|
||||
|
||||
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Harten-Lax -van Leer approximate Riemann solver
|
||||
@@ -49,19 +61,6 @@ class HLL (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
@@ -80,28 +79,58 @@ class HLL (Simulator.BaseSimulator):
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
1,
|
||||
block_width, block_height);
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_HLL.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("HLLKernel")
|
||||
# self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
#Get cuda kernels
|
||||
"""
|
||||
module = context.get_module("cuda/SWE2D_HLL.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("HLLKernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
"""
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_HLL.cu.hip'))
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# Specify the relative path to the "cuda" directory
|
||||
cuda_dir = os.path.join(current_dir, 'cuda')
|
||||
|
||||
#kernel source
|
||||
kernel_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWE2D_HLL.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"HLLKernel", 0, [], []))
|
||||
#headers
|
||||
#common.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'common.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_common = file.read()
|
||||
|
||||
#SWECommon.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWECommon.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_EulerCommon = file.read()
|
||||
|
||||
#hip.hiprtc.hiprtcCreateProgram(const char *src, const char *name, int numHeaders, headers, includeNames)
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"HLLKernel", 2, [header_common.encode(),header_SWECommon.encode()], [b"common.h", b"SWECommon.h"]))
|
||||
|
||||
# Check if the program is created successfully
|
||||
if prog is not None:
|
||||
print("--This is <SWE2D_HLL.cu.hip>")
|
||||
print("--HIPRTC program created successfully")
|
||||
print()
|
||||
else:
|
||||
print("--Failed to create HIPRTC program")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
@@ -109,19 +138,38 @@ class HLL (Simulator.BaseSimulator):
|
||||
|
||||
print(f"Compiling kernel .HLLKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
cflags = [b"--offload-arch="+arch, b"-O2", b"-D BLOCK_WIDTH="+ str(self.block_size[0]).encode(), b"-D BLOCK_HEIGHT=" + str(self.block_size[1]).encode()]
|
||||
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
# Check if the program is compiled successfully
|
||||
if err is not None:
|
||||
print("--Compilation:", err)
|
||||
print("--The program is compiled successfully")
|
||||
else:
|
||||
print("--Compilation:", err)
|
||||
print("--Failed to compile the program")
|
||||
print("--I stop:", err)
|
||||
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"HLLKernel"))
|
||||
#Load the code as a module
|
||||
self.module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
#Get the device kernel named named "FORCEKernel"
|
||||
self.kernel = hip_check(hip.hipModuleGetFunction(self.module, b"HLLKernel"))
|
||||
|
||||
print()
|
||||
print("--Get the device kernel *HLLKernel* is created successfully--")
|
||||
print("--kernel", self.kernel)
|
||||
print()
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
@@ -133,63 +181,79 @@ class HLL (Simulator.BaseSimulator):
|
||||
1, 1,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
#in HIP, the "DeviceArray" object doesn't have a 'fill' attribute
|
||||
#self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
grid_dim_x, grid_dim_y, grid_dim_z = self.grid_size
|
||||
|
||||
data_h = np.zeros((grid_dim_x, grid_dim_y), dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
data_h.fill(self.dt)
|
||||
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(grid_dim_x, grid_dim_y))
|
||||
|
||||
hip_check(hip.hipMemcpyAsync(self.cfl_data,data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice,self.stream))
|
||||
#sets the memory region pointed to by x_d to zero asynchronously
|
||||
#initiates the memset operation asynchronously
|
||||
#hip_check(hip.hipMemsetAsync(self.cfl_data,0,num_bytes,self.stream))
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
#Cuda
|
||||
"""
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
"""
|
||||
u00_strides0 = self.u0[0].data.shape[0]*np.float32().itemsize
|
||||
u01_strides0 = self.u0[1].data.shape[0]*np.float32().itemsize
|
||||
u02_strides0 = self.u0[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
u10_strides0 = self.u1[0].data.shape[0]*np.float32().itemsize
|
||||
u11_strides0 = self.u1[1].data.shape[0]*np.float32().itemsize
|
||||
u12_strides0 = self.u1[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*self.grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.cfl_data,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .HLLKernel. is ok")
|
||||
#print("--Launching Kernel .HLLKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
@@ -1,7 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the 2nd order HLL flux
|
||||
This python module implements the FORCE flux
|
||||
for the shallow water equations
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
@@ -27,15 +28,24 @@ import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
from hip import hipblas
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Forward-Backward linear scheme
|
||||
Class that solves the SW equations
|
||||
"""
|
||||
class HLL2 (Simulator.BaseSimulator):
|
||||
|
||||
@@ -51,19 +61,6 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
@@ -83,29 +80,63 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height);
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_HLL2.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("HLL2Kernel")
|
||||
# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
#Get cuda kernels
|
||||
"""
|
||||
module = context.get_module("cuda/SWE2D_HLL2.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("HLL2Kernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
"""
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_HLL2.cu.hip'))
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# Specify the relative path to the "cuda" directory
|
||||
cuda_dir = os.path.join(current_dir, 'cuda')
|
||||
|
||||
#kernel source
|
||||
kernel_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWE2D_HLL2.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"HLL2Kernel", 0, [], []))
|
||||
#headers
|
||||
#common.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'common.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_common = file.read()
|
||||
|
||||
#SWECommon.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWECommon.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_EulerCommon = file.read()
|
||||
|
||||
#limiters.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'limiters.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_limiters = file.read()
|
||||
|
||||
#hip.hiprtc.hiprtcCreateProgram(const char *src, const char *name, int numHeaders, headers, includeNames)
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"HLL2Kernel", 3, [header_common.encode(),header_EulerCommon.encode(),header_limiters.encode()], [b"common.h",b"SWECommon.h",b"limiters.h"]))
|
||||
|
||||
# Check if the program is created successfully
|
||||
if prog is not None:
|
||||
print("--This is <SWE2D_HLL2.cu.hip>")
|
||||
print("--HIPRTC program created successfully")
|
||||
print()
|
||||
else:
|
||||
print("--Failed to create HIPRTC program")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
@@ -113,19 +144,38 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
|
||||
print(f"Compiling kernel .HLL2Kernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
cflags = [b"--offload-arch="+arch, b"-O2", b"-D BLOCK_WIDTH="+ str(self.block_size[0]).encode(), b"-D BLOCK_HEIGHT=" + str(self.block_size[1]).encode()]
|
||||
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
# Check if the program is compiled successfully
|
||||
if err is not None:
|
||||
print("--Compilation:", err)
|
||||
print("--The program is compiled successfully")
|
||||
else:
|
||||
print("--Compilation:", err)
|
||||
print("--Failed to compile the program")
|
||||
print("--I stop:", err)
|
||||
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"HLL2Kernel"))
|
||||
#Load the code as a module
|
||||
self.module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
#Get the device kernel named named "FORCEKernel"
|
||||
self.kernel = hip_check(hip.hipModuleGetFunction(self.module, b"HLL2Kernel"))
|
||||
|
||||
print()
|
||||
print("--Get the device kernel *HLL2Kernel* is created successfully--")
|
||||
print("--kernel", self.kernel)
|
||||
print()
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
@@ -137,70 +187,87 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
#in HIP, the "DeviceArray" object doesn't have a 'fill' attribute
|
||||
#self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
grid_dim_x, grid_dim_y, grid_dim_z = self.grid_size
|
||||
|
||||
data_h = np.zeros((grid_dim_x, grid_dim_y), dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
data_h.fill(self.dt)
|
||||
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(grid_dim_x, grid_dim_y))
|
||||
|
||||
hip_check(hip.hipMemcpyAsync(self.cfl_data,data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice,self.stream))
|
||||
#sets the memory region pointed to by x_d to zero asynchronously
|
||||
#initiates the memset operation asynchronously
|
||||
#hip_check(hip.hipMemsetAsync(self.cfl_data,0,num_bytes,self.stream))
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
#Cuda
|
||||
"""
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
"""
|
||||
|
||||
u00_strides0 = self.u0[0].data.shape[0]*np.float32().itemsize
|
||||
u01_strides0 = self.u0[1].data.shape[0]*np.float32().itemsize
|
||||
u02_strides0 = self.u0[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
u10_strides0 = self.u1[0].data.shape[0]*np.float32().itemsize
|
||||
u11_strides0 = self.u1[1].data.shape[0]*np.float32().itemsize
|
||||
u12_strides0 = self.u1[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*self.grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.cfl_data,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .HLL2Kernel. is ok")
|
||||
#print("--Launching Kernel .HLL2Kernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
@@ -244,4 +311,3 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt*0.5
|
||||
|
||||
|
||||
@@ -29,19 +29,6 @@ from hip import hip, hiprtc
|
||||
|
||||
from GPUSimulators import Common, CudaContext
|
||||
|
||||
|
||||
@magics_class
|
||||
class MagicCudaContext(Magics):
|
||||
@line_magic
|
||||
@magic_arguments.magic_arguments()
|
||||
@magic_arguments.argument(
|
||||
'name', type=str, help='Name of context to create')
|
||||
@magic_arguments.argument(
|
||||
'--blocking', '-b', action="store_true", help='Enable blocking context')
|
||||
@magic_arguments.argument(
|
||||
'--no_cache', '-nc', action="store_true", help='Disable caching of kernels')
|
||||
@magic_arguments.argument(
|
||||
'--no_autotuning', '-na', action="store_true", help='Disable autotuning of kernels')
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
@@ -56,6 +43,19 @@ class MagicCudaContext(Magics):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
@magics_class
|
||||
class MagicCudaContext(Magics):
|
||||
@line_magic
|
||||
@magic_arguments.magic_arguments()
|
||||
@magic_arguments.argument(
|
||||
'name', type=str, help='Name of context to create')
|
||||
@magic_arguments.argument(
|
||||
'--blocking', '-b', action="store_true", help='Enable blocking context')
|
||||
@magic_arguments.argument(
|
||||
'--no_cache', '-nc', action="store_true", help='Disable caching of kernels')
|
||||
@magic_arguments.argument(
|
||||
'--no_autotuning', '-na', action="store_true", help='Disable autotuning of kernels')
|
||||
|
||||
def cuda_context_handler(self, line):
|
||||
args = magic_arguments.parse_argstring(self.cuda_context_handler, line)
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
This python module implements the FORCE flux
|
||||
for the shallow water equations
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
@@ -32,8 +28,21 @@ import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
from hip import hipblas
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Forward-Backward linear scheme
|
||||
@@ -52,19 +61,6 @@ class KP07 (Simulator.BaseSimulator):
|
||||
dt: Size of each timestep (90 s)
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
@@ -85,30 +81,64 @@ class KP07 (Simulator.BaseSimulator):
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
order,
|
||||
block_width, block_height);
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
self.theta = np.float32(theta)
|
||||
self.order = np.int32(order)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_KP07.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("KP07Kernel")
|
||||
# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
#Get cuda kernels
|
||||
"""
|
||||
module = context.get_module("cuda/SWE2D_KP07.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("KP07Kernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
"""
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_KP07.cu.hip'))
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# Specify the relative path to the "cuda" directory
|
||||
cuda_dir = os.path.join(current_dir, 'cuda')
|
||||
|
||||
#kernel source
|
||||
kernel_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWE2D_KP07.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07Kernel", 0, [], []))
|
||||
#headers
|
||||
#common.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'common.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_common = file.read()
|
||||
|
||||
#SWECommon.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWECommon.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_EulerCommon = file.read()
|
||||
|
||||
#limiters.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'limiters.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_limiters = file.read()
|
||||
|
||||
#hip.hiprtc.hiprtcCreateProgram(const char *src, const char *name, int numHeaders, headers, includeNames)
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07Kernel", 3, [header_common.encode(),header_EulerCommon.encode(),header_limiters.encode()], [b"common.h",b"SWECommon.h",b"limiters.h"]))
|
||||
|
||||
# Check if the program is created successfully
|
||||
if prog is not None:
|
||||
print("--This is <SWE2D_KP07.cu.hip>")
|
||||
print("--HIPRTC program created successfully")
|
||||
print()
|
||||
else:
|
||||
print("--Failed to create HIPRTC program")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
@@ -116,19 +146,38 @@ class KP07 (Simulator.BaseSimulator):
|
||||
|
||||
print(f"Compiling kernel .KP07Kernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
cflags = [b"--offload-arch="+arch, b"-O2", b"-D BLOCK_WIDTH="+ str(self.block_size[0]).encode(), b"-D BLOCK_HEIGHT=" + str(self.block_size[1]).encode()]
|
||||
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
# Check if the program is compiled successfully
|
||||
if err is not None:
|
||||
print("--Compilation:", err)
|
||||
print("--The program is compiled successfully")
|
||||
else:
|
||||
print("--Compilation:", err)
|
||||
print("--Failed to compile the program")
|
||||
print("--I stop:", err)
|
||||
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"KP07Kernel"))
|
||||
#Load the code as a module
|
||||
self.module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
#Get the device kernel named named "FORCEKernel"
|
||||
self.kernel = hip_check(hip.hipModuleGetFunction(self.module, b"KP07Kernel"))
|
||||
|
||||
print()
|
||||
print("--Get the device kernel *KP07Kernel* is created successfully--")
|
||||
print("--kernel", self.kernel)
|
||||
print()
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
@@ -140,73 +189,87 @@ class KP07 (Simulator.BaseSimulator):
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
#in HIP, the "DeviceArray" object doesn't have a 'fill' attribute
|
||||
#self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
grid_dim_x, grid_dim_y, grid_dim_z = self.grid_size
|
||||
|
||||
data_h = np.zeros((grid_dim_x, grid_dim_y), dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
data_h.fill(self.dt)
|
||||
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(grid_dim_x, grid_dim_y))
|
||||
|
||||
hip_check(hip.hipMemcpyAsync(self.cfl_data,data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice,self.stream))
|
||||
#sets the memory region pointed to by x_d to zero asynchronously
|
||||
#initiates the memset operation asynchronously
|
||||
#hip_check(hip.hipMemsetAsync(self.cfl_data,0,num_bytes,self.stream))
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepRK(dt, step_number)
|
||||
|
||||
|
||||
def substepRK(self, dt, substep):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.theta,
|
||||
# Simulator.stepOrderToCodedInt(step=substep, order=self.order),
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
#Cuda
|
||||
"""
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.theta,
|
||||
Simulator.stepOrderToCodedInt(step=substep, order=self.order),
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
"""
|
||||
|
||||
u00_strides0 = self.u0[0].data.shape[0]*np.float32().itemsize
|
||||
u01_strides0 = self.u0[1].data.shape[0]*np.float32().itemsize
|
||||
u02_strides0 = self.u0[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
u10_strides0 = self.u1[0].data.shape[0]*np.float32().itemsize
|
||||
u11_strides0 = self.u1[1].data.shape[0]*np.float32().itemsize
|
||||
u12_strides0 = self.u1[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*self.grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.theta),
|
||||
Simulator.stepOrderToCodedInt(step=substep, order=self.order),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.cfl_data,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .KP07Kernel. is ok")
|
||||
#print("--Launching Kernel .KP07Kernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
@@ -247,6 +310,6 @@ class KP07 (Simulator.BaseSimulator):
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt*0.5**(self.order-1)
|
||||
|
||||
@@ -1,12 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the Kurganov-Petrova numerical scheme
|
||||
for the shallow water equations, described in
|
||||
A. Kurganov & Guergana Petrova
|
||||
A Second-Order Well-Balanced Positivity Preserving Central-Upwind
|
||||
Scheme for the Saint-Venant System Communications in Mathematical
|
||||
Sciences, 5 (2007), 133-160.
|
||||
This python module implements the FORCE flux
|
||||
for the shallow water equations
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
@@ -32,9 +28,21 @@ import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
from hip import hipblas
|
||||
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the dimentionally split KP07 scheme
|
||||
@@ -54,20 +62,6 @@ class KP07_dimsplit(Simulator.BaseSimulator):
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
@@ -92,25 +86,59 @@ class KP07_dimsplit(Simulator.BaseSimulator):
|
||||
self.g = np.float32(g)
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_KP07_dimsplit.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("KP07DimsplitKernel")
|
||||
# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
#Get cuda kernels
|
||||
"""
|
||||
module = context.get_module("cuda/SWE2D_KP07_dimsplit.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("KP07DimsplitKernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
"""
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_KP07_dimsplit.cu.hip'))
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# Specify the relative path to the "cuda" directory
|
||||
cuda_dir = os.path.join(current_dir, 'cuda')
|
||||
|
||||
#kernel source
|
||||
kernel_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWE2D_KP07_dimsplit.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07DimsplitKernel", 0, [], []))
|
||||
#headers
|
||||
#common.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'common.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_common = file.read()
|
||||
|
||||
#SWECommon.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWECommon.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_EulerCommon = file.read()
|
||||
|
||||
#limiters.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'limiters.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_limiters = file.read()
|
||||
|
||||
#hip.hiprtc.hiprtcCreateProgram(const char *src, const char *name, int numHeaders, headers, includeNames)
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"KP07DimsplitKernel", 3, [header_common.encode(),header_EulerCommon.encode(),header_limiters.encode()], [b"common.h",b"SWECommon.h",b"limiters.h"]))
|
||||
|
||||
# Check if the program is created successfully
|
||||
if prog is not None:
|
||||
print("--This is <SWE2D_KP07_dimsplit.cu.hip>")
|
||||
print("--HIPRTC program created successfully")
|
||||
print()
|
||||
else:
|
||||
print("--Failed to create HIPRTC program")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
@@ -118,19 +146,38 @@ class KP07_dimsplit(Simulator.BaseSimulator):
|
||||
|
||||
print(f"Compiling kernel .KP07DimsplitKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
cflags = [b"--offload-arch="+arch, b"-O2", b"-D BLOCK_WIDTH="+ str(self.block_size[0]).encode(), b"-D BLOCK_HEIGHT=" + str(self.block_size[1]).encode()]
|
||||
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
# Check if the program is compiled successfully
|
||||
if err is not None:
|
||||
print("--Compilation:", err)
|
||||
print("--The program is compiled successfully")
|
||||
else:
|
||||
print("--Compilation:", err)
|
||||
print("--Failed to compile the program")
|
||||
print("--I stop:", err)
|
||||
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"KP07DimsplitKernel"))
|
||||
#Load the code as a module
|
||||
self.module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
#Get the device kernel named named "FORCEKernel"
|
||||
self.kernel = hip_check(hip.hipModuleGetFunction(self.module, b"KP07DimsplitKernel"))
|
||||
|
||||
print()
|
||||
print("--Get the device kernel *KP07DimsplitKernel* is created successfully--")
|
||||
print("--kernel", self.kernel)
|
||||
print()
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
@@ -142,70 +189,87 @@ class KP07_dimsplit(Simulator.BaseSimulator):
|
||||
self.gc_x, self.gc_y,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
#in HIP, the "DeviceArray" object doesn't have a 'fill' attribute
|
||||
#self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
grid_dim_x, grid_dim_y, grid_dim_z = self.grid_size
|
||||
|
||||
data_h = np.zeros((grid_dim_x, grid_dim_y), dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
data_h.fill(self.dt)
|
||||
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(grid_dim_x, grid_dim_y))
|
||||
|
||||
hip_check(hip.hipMemcpyAsync(self.cfl_data,data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice,self.stream))
|
||||
#sets the memory region pointed to by x_d to zero asynchronously
|
||||
#initiates the memset operation asynchronously
|
||||
#hip_check(hip.hipMemsetAsync(self.cfl_data,0,num_bytes,self.stream))
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
#Cuda
|
||||
"""
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
"""
|
||||
|
||||
u00_strides0 = self.u0[0].data.shape[0]*np.float32().itemsize
|
||||
u01_strides0 = self.u0[1].data.shape[0]*np.float32().itemsize
|
||||
u02_strides0 = self.u0[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
u10_strides0 = self.u1[0].data.shape[0]*np.float32().itemsize
|
||||
u11_strides0 = self.u1[1].data.shape[0]*np.float32().itemsize
|
||||
u12_strides0 = self.u1[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*self.grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep)
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.cfl_data,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .KP07DimsplitKernel. is ok")
|
||||
#print("--Launching Kernel .KP07DimsplitKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the classical Lax-Friedrichs numerical
|
||||
scheme for the shallow water equations
|
||||
This python module implements the FORCE flux
|
||||
for the shallow water equations
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
@@ -28,10 +28,21 @@ import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
from hip import hipblas
|
||||
|
||||
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Lax Friedrichs scheme
|
||||
@@ -51,20 +62,6 @@ class LxF (Simulator.BaseSimulator):
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
@@ -82,28 +79,58 @@ class LxF (Simulator.BaseSimulator):
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
1,
|
||||
block_width, block_height);
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
|
||||
# Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_LxF.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("LxFKernel")
|
||||
# self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
#Get cuda kernels
|
||||
"""
|
||||
module = context.get_module("cuda/SWE2D_LxF.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("LxFKernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
"""
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_LxF.cu.hip'))
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# Specify the relative path to the "cuda" directory
|
||||
cuda_dir = os.path.join(current_dir, 'cuda')
|
||||
|
||||
#kernel source
|
||||
kernel_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWE2D_LxF.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"LxFKernel", 0, [], []))
|
||||
#headers
|
||||
#common.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'common.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_common = file.read()
|
||||
|
||||
#SWECommon.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWECommon.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_EulerCommon = file.read()
|
||||
|
||||
#hip.hiprtc.hiprtcCreateProgram(const char *src, const char *name, int numHeaders, headers, includeNames)
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"LxFKernel", 2, [header_common.encode(),header_SWECommon.encode()], [b"common.h", b"SWECommon.h"]))
|
||||
|
||||
# Check if the program is created successfully
|
||||
if prog is not None:
|
||||
print("--This is <SWE2D_LxF.cu.hip>")
|
||||
print("--HIPRTC program created successfully")
|
||||
print()
|
||||
else:
|
||||
print("--Failed to create HIPRTC program")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
@@ -111,19 +138,38 @@ class LxF (Simulator.BaseSimulator):
|
||||
|
||||
print(f"Compiling kernel .LxFKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
cflags = [b"--offload-arch="+arch, b"-O2", b"-D BLOCK_WIDTH="+ str(self.block_size[0]).encode(), b"-D BLOCK_HEIGHT=" + str(self.block_size[1]).encode()]
|
||||
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
# Check if the program is compiled successfully
|
||||
if err is not None:
|
||||
print("--Compilation:", err)
|
||||
print("--The program is compiled successfully")
|
||||
else:
|
||||
print("--Compilation:", err)
|
||||
print("--Failed to compile the program")
|
||||
print("--I stop:", err)
|
||||
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"LxFKernel"))
|
||||
#Load the code as a module
|
||||
self.module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
#Get the device kernel named named "LxFKernel"
|
||||
self.kernel = hip_check(hip.hipModuleGetFunction(self.module, b"LxFKernel"))
|
||||
|
||||
print()
|
||||
print("--Get the device kernel *LxFKernel* is created successfully--")
|
||||
print("--kernel", self.kernel)
|
||||
print()
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
@@ -135,64 +181,79 @@ class LxF (Simulator.BaseSimulator):
|
||||
1, 1,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
#in HIP, the "DeviceArray" object doesn't have a 'fill' attribute
|
||||
#self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
grid_dim_x, grid_dim_y, grid_dim_z = self.grid_size
|
||||
|
||||
data_h = np.zeros((grid_dim_x, grid_dim_y), dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
data_h.fill(self.dt)
|
||||
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(grid_dim_x, grid_dim_y))
|
||||
|
||||
hip_check(hip.hipMemcpyAsync(self.cfl_data,data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice,self.stream))
|
||||
#sets the memory region pointed to by x_d to zero asynchronously
|
||||
#initiates the memset operation asynchronously
|
||||
#hip_check(hip.hipMemsetAsync(self.cfl_data,0,num_bytes,self.stream))
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
#Cuda
|
||||
"""
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
"""
|
||||
u00_strides0 = self.u0[0].data.shape[0]*np.float32().itemsize
|
||||
u01_strides0 = self.u0[1].data.shape[0]*np.float32().itemsize
|
||||
u02_strides0 = self.u0[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
u10_strides0 = self.u1[0].data.shape[0]*np.float32().itemsize
|
||||
u11_strides0 = self.u1[1].data.shape[0]*np.float32().itemsize
|
||||
u12_strides0 = self.u1[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*self.grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.cfl_data,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .LxFKernel. is ok")
|
||||
#print("--Launching Kernel .LxFKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
@@ -30,6 +30,19 @@ import time
|
||||
#import nvtx
|
||||
from hip import hip, hiprtc
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
class MPIGrid(object):
|
||||
"""
|
||||
@@ -206,19 +219,6 @@ class MPISimulator(Simulator.BaseSimulator):
|
||||
"""
|
||||
Class which handles communication between simulators on different MPI nodes
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self, sim, grid):
|
||||
self.profiling_data_mpi = { 'start': {}, 'end': {} }
|
||||
@@ -306,58 +306,73 @@ class MPISimulator(Simulator.BaseSimulator):
|
||||
#Note that east and west also transfer ghost cells
|
||||
#whilst north/south only transfer internal cells
|
||||
#Reuses the width/height defined in the read-extets above
|
||||
##self.in_e = cuda.pagelocked_empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32) #np.empty((self.nvars, self.read_e[3], self.read_e[2]), dtype=np.float32)
|
||||
"""
|
||||
self.in_e = cuda.pagelocked_empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32) #np.empty((self.nvars, self.read_e[3], self.read_e[2]), dtype=np.float32)
|
||||
self.in_w = cuda.pagelocked_empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32) #np.empty((self.nvars, self.read_w[3], self.read_w[2]), dtype=np.float32)
|
||||
self.in_n = cuda.pagelocked_empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32) #np.empty((self.nvars, self.read_n[3], self.read_n[2]), dtype=np.float32)
|
||||
self.in_s = cuda.pagelocked_empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32) #np.empty((self.nvars, self.read_s[3], self.read_s[2]), dtype=np.float32)
|
||||
"""
|
||||
|
||||
##self.in_w = cuda.pagelocked_empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32) #np.empty((self.nvars, self.read_w[3], self.read_w[2]), dtype=np.float32)
|
||||
##self.in_n = cuda.pagelocked_empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32) #np.empty((self.nvars, self.read_n[3], self.read_n[2]), dtype=np.float32)
|
||||
##self.in_s = cuda.pagelocked_empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32) #np.empty((self.nvars, self.read_s[3], self.read_s[2]), dtype=np.float32)
|
||||
|
||||
self.in_e = np.empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32)
|
||||
#HIP
|
||||
self.in_e = np.zeros((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32)
|
||||
num_bytes_e = self.in_e.size * self.in_e.itemsize
|
||||
#hipHostMalloc allocates pinned host memory which is mapped into the address space of all GPUs in the system, the memory can be accessed directly by the GPU device
|
||||
#hipHostMallocDefault:Memory is mapped and portable (default allocation)
|
||||
#hipHostMallocPortable: memory is explicitely portable across different devices
|
||||
self.in_e = hip_check(hip.hipHostMalloc(num_bytes_e,hip.hipHostMallocPortable))
|
||||
#self.in_e = hip_check(hip.hipHostMalloc(num_bytes_e,hip.hipHostMallocPortable))
|
||||
#hip_check(hip.hipHostGetDevicePointer(self.in_e, hip.hipHostMallocPortable))
|
||||
|
||||
self.in_w = np.empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32)
|
||||
#print("--hip.hipGetDeviceFlags():", hip.hipGetDeviceFlags())
|
||||
|
||||
self.in_w = np.zeros((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32)
|
||||
num_bytes_w = self.in_w.size * self.in_w.itemsize
|
||||
self.in_w = hip_check(hip.hipHostMalloc(num_bytes_w,hip.hipHostMallocPortable))
|
||||
#self.in_w = hip_check(hip.hipHostMalloc(num_bytes_w,hip.hipHostMallocPortable))
|
||||
#hip_check(hip.hipHostGetDevicePointer(self.in_w, hip.hipHostMallocPortable))
|
||||
|
||||
self.in_n = np.empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32)
|
||||
self.in_n = np.zeros((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32)
|
||||
num_bytes_n = self.in_n.size * self.in_n.itemsize
|
||||
self.in_n = hip_check(hip.hipHostMalloc(num_bytes_n,hip.hipHostMallocPortable))
|
||||
#self.in_n = hip_check(hip.hipHostMalloc(num_bytes_n,hip.hipHostMallocPortable))
|
||||
#hip_check(hip.hipHostGetDevicePointer(self.in_n, hip.hipHostMallocPortable))
|
||||
|
||||
self.in_s = np.empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32)
|
||||
self.in_s = np.zeros((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32)
|
||||
num_bytes_s = self.in_s.size * self.in_s.itemsize
|
||||
self.in_s = hip_check(hip.hipHostMalloc(num_bytes_s,hip.hipHostMallocPortable))
|
||||
#self.in_s = hip_check(hip.hipHostMalloc(num_bytes_s,hip.hipHostMallocPortable))
|
||||
#hip_check(hip.hipHostGetDevicePointer(self.in_s, hip.hipHostMallocPortable))
|
||||
|
||||
#Allocate data for sending
|
||||
#self.out_e = cuda.pagelocked_empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32) #np.empty_like(self.in_e)
|
||||
#self.out_w = cuda.pagelocked_empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32) #np.empty_like(self.in_w)
|
||||
#self.out_n = cuda.pagelocked_empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32) #np.empty_like(self.in_n)
|
||||
#self.out_s = cuda.pagelocked_empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32) #np.empty_like(self.in_s)
|
||||
"""
|
||||
self.out_e = cuda.pagelocked_empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32) #np.empty_like(self.in_e)
|
||||
self.out_w = cuda.pagelocked_empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32) #np.empty_like(self.in_w)
|
||||
self.out_n = cuda.pagelocked_empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32) #np.empty_like(self.in_n)
|
||||
self.out_s = cuda.pagelocked_empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32) #np.empty_like(self.in_s)
|
||||
"""
|
||||
|
||||
self.out_e = np.empty((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32)
|
||||
self.out_e = np.zeros((int(self.nvars), int(self.read_e[3]), int(self.read_e[2])), dtype=np.float32)
|
||||
num_bytes_e = self.out_e.size * self.out_e.itemsize
|
||||
self.out_e = hip_check(hip.hipHostMalloc(num_bytes_e,hip.hipHostMallocPortable))
|
||||
#self.out_e = hip_check(hip.hipHostMalloc(num_bytes_e,hip.hipHostMallocPortable))
|
||||
#hip_check(hip.hipHostGetDevicePointer(self.out_e, hip.hipHostMallocPortable))
|
||||
|
||||
self.out_w = np.empty((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32)
|
||||
self.out_w = np.zeros((int(self.nvars), int(self.read_w[3]), int(self.read_w[2])), dtype=np.float32)
|
||||
num_bytes_w = self.out_w.size * self.out_w.itemsize
|
||||
self.out_w = hip_check(hip.hipHostMalloc(num_bytes_w,hip.hipHostMallocPortable))
|
||||
#self.out_w = hip_check(hip.hipHostMalloc(num_bytes_w,hip.hipHostMallocPortable))
|
||||
#hip_check(hip.hipHostGetDevicePointer(self.out_w, hip.hipHostMallocPortable))
|
||||
|
||||
self.out_n = np.empty((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32)
|
||||
self.out_n = np.zeros((int(self.nvars), int(self.read_n[3]), int(self.read_n[2])), dtype=np.float32)
|
||||
num_bytes_n = self.out_n.size * self.out_n.itemsize
|
||||
self.out_n = hip_check(hip.hipHostMalloc(num_bytes_n,hip.hipHostMallocPortable))
|
||||
#self.out_n = hip_check(hip.hipHostMalloc(num_bytes_n,hip.hipHostMallocPortable))
|
||||
#hip_check(hip.hipHostGetDevicePointer(self.out_n, hip.hipHostMallocPortable))
|
||||
|
||||
self.out_s = np.empty((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32)
|
||||
self.out_s = np.zeros((int(self.nvars), int(self.read_s[3]), int(self.read_s[2])), dtype=np.float32)
|
||||
num_bytes_s = self.out_s.size * self.out_s.itemsize
|
||||
self.out_s = hip_check(hip.hipHostMalloc(num_bytes_s,hip.hipHostMallocPortable))
|
||||
#self.out_s = hip_check(hip.hipHostMalloc(num_bytes_s,hip.hipHostMallocPortable))
|
||||
#hip_check(hip.hipHostGetDevicePointer(self.out_s, hip.hipHostMallocPortable))
|
||||
|
||||
|
||||
self.logger.debug("Simlator rank {:d} initialized on {:s}".format(self.grid.comm.rank, MPI.Get_processor_name()))
|
||||
|
||||
self.full_exchange()
|
||||
sim.context.synchronize()
|
||||
#hip_check(hip.hipDeviceSynchronize())
|
||||
#sim.context.synchronize()
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
|
||||
|
||||
@@ -29,11 +29,6 @@ from hip import hip, hiprtc
|
||||
|
||||
import time
|
||||
|
||||
class SHMEMSimulator(Simulator.BaseSimulator):
|
||||
"""
|
||||
Class which handles communication and synchronization between simulators in different
|
||||
contexts (presumably on different GPUs)
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
@@ -48,6 +43,12 @@ class SHMEMSimulator(Simulator.BaseSimulator):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
class SHMEMSimulator(Simulator.BaseSimulator):
|
||||
"""
|
||||
Class which handles communication and synchronization between simulators in different
|
||||
contexts (presumably on different GPUs)
|
||||
"""
|
||||
|
||||
def __init__(self, sims, grid):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -29,11 +29,6 @@ from hip import hip, hiprtc
|
||||
|
||||
import time
|
||||
|
||||
class SHMEMGrid(object):
|
||||
"""
|
||||
Class which represents an SHMEM grid of GPUs. Facilitates easy communication between
|
||||
neighboring subdomains in the grid. Contains one CUDA context per subdomain.
|
||||
"""
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
@@ -48,6 +43,12 @@ class SHMEMGrid(object):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
class SHMEMGrid(object):
|
||||
"""
|
||||
Class which represents an SHMEM grid of GPUs. Facilitates easy communication between
|
||||
neighboring subdomains in the grid. Contains one CUDA context per subdomain.
|
||||
"""
|
||||
|
||||
def __init__(self, ngpus=None, ndims=2):
|
||||
self.logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
@@ -22,8 +22,10 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#Import packages we need
|
||||
import numpy as np
|
||||
import math
|
||||
import logging
|
||||
from enum import IntEnum
|
||||
from tqdm import tqdm
|
||||
|
||||
#import pycuda.compiler as cuda_compiler
|
||||
#import pycuda.gpuarray
|
||||
@@ -34,6 +36,20 @@ from hip import hip, hiprtc
|
||||
from GPUSimulators import Common
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
class BoundaryCondition(object):
|
||||
"""
|
||||
Class for holding boundary conditions for global boundaries
|
||||
@@ -102,15 +118,6 @@ class BoundaryCondition(object):
|
||||
|
||||
class BaseSimulator(object):
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
nx, ny,
|
||||
@@ -150,18 +157,23 @@ class BaseSimulator(object):
|
||||
self.num_substeps = num_substeps
|
||||
|
||||
#Handle autotuning block size
|
||||
if (self.context.autotuner):
|
||||
if self.context.autotuner:
|
||||
peak_configuration = self.context.autotuner.get_peak_performance(self.__class__)
|
||||
block_width = int(peak_configuration["block_width"])
|
||||
block_height = int(peak_configuration["block_height"])
|
||||
self.logger.debug("Used autotuning to get block size [%d x %d]", block_width, block_height)
|
||||
|
||||
#Compute kernel launch parameters
|
||||
"""
|
||||
self.block_size = (block_width, block_height, 1)
|
||||
self.grid_size = (
|
||||
int(np.ceil(self.nx / float(self.block_size[0]))),
|
||||
int(np.ceil(self.ny / float(self.block_size[1])))
|
||||
)
|
||||
"""
|
||||
self.block_size = hip.dim3(block_width, block_height)
|
||||
#self.grid_size = hip.dim3(math.ceil(self.nx/block_width),math.ceil(self.ny/block_height))
|
||||
self.grid_size = hip.dim3(math.ceil((self.nx+block_width-1)/block_width),math.ceil((self.ny+block_height-1)/block_height))
|
||||
|
||||
#Create a CUDA stream
|
||||
#self.stream = cuda.Stream()
|
||||
@@ -184,27 +196,28 @@ class BaseSimulator(object):
|
||||
Requires that the step() function is implemented in the subclasses
|
||||
"""
|
||||
|
||||
printer = Common.ProgressPrinter(t)
|
||||
# printer = Common.ProgressPrinter(t)
|
||||
|
||||
t_start = self.simTime()
|
||||
t_end = t_start + t
|
||||
|
||||
update_dt = True
|
||||
if (dt is not None):
|
||||
if dt is not None:
|
||||
update_dt = False
|
||||
self.dt = dt
|
||||
|
||||
while(self.simTime() < t_end):
|
||||
for _ in tqdm(range(math.ceil((t_end - t_start) / self.dt)), desc="Simulation"):
|
||||
# Update dt every 100 timesteps and cross your fingers it works
|
||||
# for the next 100
|
||||
if (update_dt and (self.simSteps() % 100 == 0)):
|
||||
# TODO this is probably broken now after fixing the "infinite" loop
|
||||
if update_dt and (self.simSteps() % 100 == 0):
|
||||
self.dt = self.computeDt()*self.cfl_scale
|
||||
|
||||
# Compute timestep for "this" iteration (i.e., shorten last timestep)
|
||||
current_dt = np.float32(min(self.dt, t_end-self.simTime()))
|
||||
|
||||
# Stop if end reached (should not happen)
|
||||
if (current_dt <= 0.0):
|
||||
if current_dt <= 0.0:
|
||||
self.logger.warning("Timestep size {:d} is less than or equal to zero!".format(self.simSteps()))
|
||||
break
|
||||
|
||||
@@ -212,14 +225,16 @@ class BaseSimulator(object):
|
||||
self.step(current_dt)
|
||||
|
||||
#Print info
|
||||
print_string = printer.getPrintString(self.simTime() - t_start)
|
||||
if (print_string):
|
||||
self.logger.info("%s: %s", self, print_string)
|
||||
try:
|
||||
self.check()
|
||||
except AssertionError as e:
|
||||
e.args += ("Step={:d}, time={:f}".format(self.simSteps(), self.simTime()),)
|
||||
raise
|
||||
# print_string = printer.getPrintString(self.simTime() - t_start)
|
||||
# if (print_string):
|
||||
# self.logger.info("%s: %s", self, print_string)
|
||||
# try:
|
||||
# self.check()
|
||||
# except AssertionError as e:
|
||||
# e.args += ("Step={:d}, time={:f}".format(self.simSteps(), self.simTime()),)
|
||||
# raise
|
||||
|
||||
print("Done")
|
||||
|
||||
|
||||
def step(self, dt):
|
||||
|
||||
@@ -1,8 +1,8 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
"""
|
||||
This python module implements the Weighted average flux (WAF) described in
|
||||
E. Toro, Shock-Capturing methods for free-surface shallow flows, 2001
|
||||
This python module implements the FORCE flux
|
||||
for the shallow water equations
|
||||
|
||||
Copyright (C) 2016 SINTEF ICT
|
||||
|
||||
@@ -28,8 +28,21 @@ import ctypes
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
from hip import hipblas
|
||||
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
"""
|
||||
Class that solves the SW equations using the Forward-Backward linear scheme
|
||||
@@ -49,20 +62,6 @@ class WAF (Simulator.BaseSimulator):
|
||||
g: Gravitational accelleration (9.81 m/s^2)
|
||||
"""
|
||||
|
||||
def hip_check(call_result):
|
||||
err = call_result[0]
|
||||
result = call_result[1:]
|
||||
if len(result) == 1:
|
||||
result = result[0]
|
||||
if isinstance(err, hip.hipError_t) and err != hip.hipError_t.hipSuccess:
|
||||
raise RuntimeError(str(err))
|
||||
elif (
|
||||
isinstance(err, hiprtc.hiprtcResult)
|
||||
and err != hiprtc.hiprtcResult.HIPRTC_SUCCESS
|
||||
):
|
||||
raise RuntimeError(str(err))
|
||||
return result
|
||||
|
||||
def __init__(self,
|
||||
context,
|
||||
h0, hu0, hv0,
|
||||
@@ -80,28 +79,58 @@ class WAF (Simulator.BaseSimulator):
|
||||
boundary_conditions,
|
||||
cfl_scale,
|
||||
2,
|
||||
block_width, block_height);
|
||||
block_width, block_height)
|
||||
self.g = np.float32(g)
|
||||
|
||||
#Get kernels
|
||||
# module = context.get_module("cuda/SWE2D_WAF.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("WAFKernel")
|
||||
# self.kernel.prepare("iiffffiiPiPiPiPiPiPiP")
|
||||
#Get cuda kernels
|
||||
"""
|
||||
module = context.get_module("cuda/SWE2D_WAF.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("WAFKernel")
|
||||
self.kernel.prepare("iiffffiPiPiPiPiPiPiP")
|
||||
"""
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_WAF.cu.hip'))
|
||||
current_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
# Specify the relative path to the "cuda" directory
|
||||
cuda_dir = os.path.join(current_dir, 'cuda')
|
||||
|
||||
#kernel source
|
||||
kernel_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWE2D_WAF.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"WAFKernel", 0, [], []))
|
||||
#headers
|
||||
#common.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'common.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_common = file.read()
|
||||
|
||||
#SWECommon.h
|
||||
header_file_path = os.path.abspath(os.path.join(cuda_dir, 'SWECommon.h'))
|
||||
with open(header_file_path, 'r') as file:
|
||||
header_EulerCommon = file.read()
|
||||
|
||||
#hip.hiprtc.hiprtcCreateProgram(const char *src, const char *name, int numHeaders, headers, includeNames)
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"WAFKernel", 2, [header_common.encode(),header_SWECommon.encode()], [b"common.h", b"SWECommon.h"]))
|
||||
|
||||
# Check if the program is created successfully
|
||||
if prog is not None:
|
||||
print("--This is <SWE2D_WAF.cu.hip>")
|
||||
print("--HIPRTC program created successfully")
|
||||
print()
|
||||
else:
|
||||
print("--Failed to create HIPRTC program")
|
||||
print("--I stop:", err)
|
||||
exit()
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
@@ -109,19 +138,38 @@ class WAF (Simulator.BaseSimulator):
|
||||
|
||||
print(f"Compiling kernel .WAFKernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
cflags = [b"--offload-arch="+arch, b"-O2", b"-D BLOCK_WIDTH="+ str(self.block_size[0]).encode(), b"-D BLOCK_HEIGHT=" + str(self.block_size[1]).encode()]
|
||||
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
# Check if the program is compiled successfully
|
||||
if err is not None:
|
||||
print("--Compilation:", err)
|
||||
print("--The program is compiled successfully")
|
||||
else:
|
||||
print("--Compilation:", err)
|
||||
print("--Failed to compile the program")
|
||||
print("--I stop:", err)
|
||||
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"WAFKernel"))
|
||||
#Load the code as a module
|
||||
self.module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
#Get the device kernel named named "LxFKernel"
|
||||
self.kernel = hip_check(hip.hipModuleGetFunction(self.module, b"WAFKernel"))
|
||||
|
||||
print()
|
||||
print("--Get the device kernel *WAFKernel* is created successfully--")
|
||||
print("--kernel", self.kernel)
|
||||
print()
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
@@ -133,69 +181,84 @@ class WAF (Simulator.BaseSimulator):
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
self.cfl_data.fill(dt, stream=self.stream)
|
||||
#in HIP, the "DeviceArray" object doesn't have a 'fill' attribute
|
||||
#self.cfl_data.fill(self.dt, stream=self.stream)
|
||||
grid_dim_x, grid_dim_y, grid_dim_z = self.grid_size
|
||||
|
||||
data_h = np.zeros((grid_dim_x, grid_dim_y), dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
data_h.fill(self.dt)
|
||||
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=(grid_dim_x, grid_dim_y))
|
||||
|
||||
hip_check(hip.hipMemcpyAsync(self.cfl_data,data_h,num_bytes,hip.hipMemcpyKind.hipMemcpyHostToDevice,self.stream))
|
||||
#sets the memory region pointed to by x_d to zero asynchronously
|
||||
#initiates the memset operation asynchronously
|
||||
#hip_check(hip.hipMemsetAsync(self.cfl_data,0,num_bytes,self.stream))
|
||||
|
||||
def substep(self, dt, step_number):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
#Cuda
|
||||
"""
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
"""
|
||||
u00_strides0 = self.u0[0].data.shape[0]*np.float32().itemsize
|
||||
u01_strides0 = self.u0[1].data.shape[0]*np.float32().itemsize
|
||||
u02_strides0 = self.u0[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
u10_strides0 = self.u1[0].data.shape[0]*np.float32().itemsize
|
||||
u11_strides0 = self.u1[1].data.shape[0]*np.float32().itemsize
|
||||
u12_strides0 = self.u1[2].data.shape[0]*np.float32().itemsize
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
self.kernel,
|
||||
*self.grid_size, #grid
|
||||
*self.block_size, #block
|
||||
sharedMemBytes=0, #65536,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
self.u0[0].data, ctypes.c_int(u00_strides0),
|
||||
self.u0[1].data, ctypes.c_int(u01_strides0),
|
||||
self.u0[2].data, ctypes.c_int(u02_strides0),
|
||||
self.u1[0].data, ctypes.c_int(u10_strides0),
|
||||
self.u1[1].data, ctypes.c_int(u11_strides0),
|
||||
self.u1[2].data, ctypes.c_int(u12_strides0),
|
||||
self.cfl_data,
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .WAFKernel. is ok")
|
||||
#print("--Launching Kernel .WAFKernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
Binary file not shown.
Binary file not shown.
Binary file not shown.
@@ -25,7 +25,6 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
#include "EulerCommon.h"
|
||||
#include "limiters.h"
|
||||
|
||||
|
||||
__device__
|
||||
void computeFluxF(float Q[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
float Qx[4][BLOCK_HEIGHT+4][BLOCK_WIDTH+4],
|
||||
|
||||
@@ -24,6 +24,8 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#pragma once
|
||||
|
||||
#include <stddef.h>
|
||||
#include <float.h>
|
||||
|
||||
/**
|
||||
* Float3 operators
|
||||
@@ -86,9 +88,6 @@ __device__ float desingularize(float x_, float eps_) {
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
/**
|
||||
* Returns the step stored in the leftmost 16 bits
|
||||
* of the 32 bit step-order integer
|
||||
@@ -497,14 +496,18 @@ __device__ void memset(float Q[vars][shmem_height][shmem_width], float value) {
|
||||
|
||||
|
||||
template <unsigned int threads>
|
||||
__device__ void reduce_max(float* data, unsigned int n) {
|
||||
//__device__ void reduce_max(float* data, unsigned int n) {
|
||||
__device__ float reduce_max(float* data, unsigned int n) {
|
||||
__shared__ float sdata[threads];
|
||||
unsigned int tid = threadIdx.x;
|
||||
|
||||
//Reduce to "threads" elements
|
||||
sdata[tid] = FLT_MIN;
|
||||
for (unsigned int i=tid; i<n; i += threads) {
|
||||
sdata[tid] = max(sdata[tid], dt_ctx.L[i]);
|
||||
|
||||
//sdata[tid] = max(sdata[tid], dt_ctx.L[i]);
|
||||
sdata[tid] = max(sdata[tid], data[i]);
|
||||
|
||||
}
|
||||
__syncthreads();
|
||||
|
||||
|
||||
@@ -4,10 +4,11 @@
|
||||
#SBATCH --time=00:10:00
|
||||
#SBATCH --partition=dev-g
|
||||
#SBATCH --nodes=1
|
||||
#SBATCH --ntasks-per-node=2
|
||||
#SBATCH --gpus=2
|
||||
#SBATCH --gpus-per-node=2
|
||||
#SBATCH --ntasks-per-node=8
|
||||
#SBATCH --gpus=8
|
||||
#SBATCH --gpus-per-node=8
|
||||
#SBATCH -o %x-%j.out
|
||||
#SBATCH --exclusive
|
||||
#
|
||||
|
||||
N=$SLURM_JOB_NUM_NODES
|
||||
@@ -18,17 +19,21 @@ Mydir=/project/project_4650000xx
|
||||
Myapplication=${Mydir}/FiniteVolumeGPU_hip/mpiTesting.py
|
||||
|
||||
#modules
|
||||
ml LUMI/23.03 partition/G
|
||||
ml LUMI/24.03 partition/G
|
||||
ml lumi-container-wrapper
|
||||
ml cray-python/3.9.13.1
|
||||
ml rocm/5.2.3
|
||||
ml cray-python/3.11.7
|
||||
ml rocm/6.2.2
|
||||
|
||||
ml craype-accel-amd-gfx90a
|
||||
ml cray-mpich/8.1.27
|
||||
|
||||
#Enable GPU-aware MPI
|
||||
export MPICH_GPU_SUPPORT_ENABLED=1
|
||||
ml cray-mpich/8.1.29
|
||||
|
||||
export PATH="/project/project_4650000xx/FiniteVolumeGPU_hip/MyCondaEnv/bin:$PATH"
|
||||
|
||||
srun python ${Myapplication} -nx 1024 -ny 1024 --profile
|
||||
#missing library
|
||||
export LD_LIBRARY_PATH=/opt/cray/pe/mpich/8.1.29/ofi/cray/17.0/lib-abi-mpich:$LD_LIBRARY_PATH
|
||||
|
||||
#Binding mask
|
||||
bind_mask="0x${fe}000000000000,0x${fe}00000000000000,0x${fe}0000,0x${fe}000000,0x${fe},0x${fe}00,0x${fe}00000000,0x${fe}0000000000"
|
||||
|
||||
srun --cpu-bind=mask_cpu:$bind_mask \
|
||||
python ${Myapplication} -nx 1024 -ny 1024 --profile
|
||||
|
||||
53
README.md
53
README.md
@@ -1,41 +1,31 @@
|
||||
# FiniteVolumeGPU
|
||||
|
||||
This is a HIP version of the [FiniteVolume code](https://github.com/babrodtk/FiniteVolumeGPU) (work in progress). It is a Python software package that implements several finite volume discretizations on Cartesian grids for the shallow water equations and the Euler equations.
|
||||
|
||||
## Setup
|
||||
A good place to start exploring this codebase is the notebooks. Complete the following steps to run the notebooks:
|
||||
|
||||
1. Install conda (see e.g. Miniconda or Anaconda)
|
||||
2. Change directory to the repository root and run the following commands
|
||||
3. conda env create -f conda_environment.yml
|
||||
4. conda activate ShallowWaterGPU
|
||||
5. jupyter notebook
|
||||
|
||||
Make sure you are running the correct kernel ("conda:ShallowWaterGPU"). If not, change kernel using the "Kernel"-menu in the notebook.
|
||||
|
||||
If you do not need to run notebooks you may use the conda environment found in conda_environment_hpc.yml
|
||||
|
||||
## Troubleshooting
|
||||
Have a look at the conda documentation and https://towardsdatascience.com/how-to-set-up-anaconda-and-jupyter-notebook-the-right-way-de3b7623ea4a
|
||||
This is a HIP version of the [FiniteVolume code](https://github.com/babrodtk/FiniteVolumeGPU). It is a Python software package that implements several finite volume discretizations on Cartesian grids for the shallow water equations and the Euler equations.
|
||||
|
||||
## Setup on LUMI-G
|
||||
Here is a step-by-step guide on installing packages on LUMI-G
|
||||
|
||||
### Step 0: load modules
|
||||
### Step 1: Install rocm-5.4.6 with Easybuild
|
||||
```
|
||||
ml LUMI/23.03
|
||||
ml lumi-container-wrapper
|
||||
ml cray-python/3.9.13.1
|
||||
export EBU_USER_PREFIX=/project/project_xxxxxx/EasyBuild
|
||||
ml LUMI/24.03 partition/G
|
||||
ml EasyBuild-user
|
||||
export PYTHONIOENCODING=utf-8
|
||||
eb rocm-5.4.6.eb -r
|
||||
```
|
||||
|
||||
### Step 1: run conda-container
|
||||
### Step 2: run conda-container
|
||||
Installation via conda can be done as:
|
||||
```
|
||||
ml LUMI/24.03 partition/G
|
||||
ml lumi-container-wrapper/0.3.3-cray-python-3.11.7
|
||||
```
|
||||
```
|
||||
conda-containerize new --prefix MyCondaEnv conda_environment_lumi.yml
|
||||
```
|
||||
where the file `conda_environment_lumi.yml` contains packages to be installed.
|
||||
|
||||
### Step 2: Set the env. variable to search for binaries
|
||||
### Step 3: Set the env. variable to search for binaries
|
||||
```
|
||||
export the bin path: export PATH="$PWD/MyCondaEnv/bin:$PATH"
|
||||
```
|
||||
@@ -43,3 +33,20 @@ export the bin path: export PATH="$PWD/MyCondaEnv/bin:$PATH"
|
||||
```
|
||||
cotainr build my_container.sif --system=lumi-g --conda-env=conda_environment_lumi.yml
|
||||
```
|
||||
|
||||
### Error when running MPI.
|
||||
```
|
||||
`MPI startup(): PMI server not found. Please set I_MPI_PMI_LIBRARY variable if it is not a singleton case.
|
||||
```
|
||||
This can be resolved by exporting this:
|
||||
```
|
||||
export I_MPI_PMI_LIBRARY=/opt/cray/pe/mpich/8.1.27/ofi/cray/14.0/lib/libmpi.so
|
||||
```
|
||||
### Install hip-python
|
||||
```
|
||||
python -m pip install -i https://test.pypi.org/simple/ hip-python==5.4.3.470.16
|
||||
```
|
||||
|
||||
The testing was done with this specific version `hip-python==5.4.3.470.16`
|
||||
|
||||
|
||||
|
||||
@@ -5,16 +5,17 @@ channels:
|
||||
- conda-forge
|
||||
|
||||
dependencies:
|
||||
- python=3.9
|
||||
- python=3.11.7
|
||||
- pip
|
||||
- numpy
|
||||
- mpi4py
|
||||
- six
|
||||
- pytools
|
||||
- netcdf4
|
||||
- scipy
|
||||
- tqdm
|
||||
- pip:
|
||||
- hip-python
|
||||
- hip-python-as-cuda
|
||||
- hip-python==6.2.0.499.16
|
||||
- -i https://test.pypi.org/simple/
|
||||
|
||||
|
||||
|
||||
@@ -28,17 +28,19 @@ import logging
|
||||
import os
|
||||
|
||||
#GPU-aware MPI
|
||||
"""
|
||||
from os import environ
|
||||
if environ.get("MPICH_GPU_SUPPORT_ENABLED", False):
|
||||
from ctypes import CDLL, RTLD_GLOBAL
|
||||
CDLL(f"{environ.get('CRAY_MPICH_ROOTDIR')}/gtl/lib/libmpi_gtl_hsa.so", mode=RTLD_GLOBAL)
|
||||
"""
|
||||
|
||||
# MPI
|
||||
from mpi4py import MPI
|
||||
|
||||
# CUDA
|
||||
#import pycuda.driver as cuda
|
||||
from hip import hip
|
||||
from hip import hip,hiprtc
|
||||
|
||||
# Simulator engine etc
|
||||
from GPUSimulators import MPISimulator, Common, CudaContext
|
||||
@@ -68,7 +70,7 @@ def hip_check(call_result):
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
if(args.profile):
|
||||
if args.profile:
|
||||
profiling_data = {}
|
||||
# profiling: total run time
|
||||
t_total_start = time.time()
|
||||
@@ -77,6 +79,8 @@ if(args.profile):
|
||||
|
||||
# Get MPI COMM to use
|
||||
comm = MPI.COMM_WORLD
|
||||
size = comm.Get_size()
|
||||
rank = comm.Get_rank()
|
||||
|
||||
|
||||
####
|
||||
@@ -84,7 +88,7 @@ comm = MPI.COMM_WORLD
|
||||
####
|
||||
log_level_console = 20
|
||||
log_level_file = 10
|
||||
log_filename = 'mpi_' + str(comm.rank) + '.log'
|
||||
log_filename = 'mpi_' + str(rank) + '.log'
|
||||
logger = logging.getLogger('GPUSimulators')
|
||||
logger.setLevel(min(log_level_console, log_level_file))
|
||||
|
||||
@@ -108,8 +112,17 @@ logger.info("File logger using level %s to %s",
|
||||
# Initialize MPI grid etc
|
||||
####
|
||||
logger.info("Creating MPI grid")
|
||||
grid = MPISimulator.MPIGrid(MPI.COMM_WORLD)
|
||||
grid = MPISimulator.MPIGrid(comm)
|
||||
|
||||
"""
|
||||
job_id = int(os.environ["SLURM_JOB_ID"])
|
||||
allocated_nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
|
||||
allocated_gpus = int(os.environ["ROCR_VISIBLE_DEVICES"].count(",") + 1)
|
||||
|
||||
print("job_id:", job_id)
|
||||
print("allocated_nodes", allocated_nodes)
|
||||
print("allocated_gpus", allocated_gpus)
|
||||
"""
|
||||
|
||||
####
|
||||
# Initialize CUDA
|
||||
@@ -123,7 +136,6 @@ cuda_device = local_rank % num_cuda_devices
|
||||
logger.info("Process %s using CUDA device %s", str(local_rank), str(cuda_device))
|
||||
cuda_context = CudaContext.CudaContext(device=cuda_device, autotuning=False)
|
||||
|
||||
|
||||
####
|
||||
# Set initial conditions
|
||||
####
|
||||
@@ -142,7 +154,7 @@ gamma = 1.4
|
||||
#save_times = np.linspace(0, 0.000099, 11)
|
||||
#save_times = np.linspace(0, 0.000099, 2)
|
||||
save_times = np.linspace(0, 0.0000999, 2)
|
||||
outfile = "mpi_out_" + str(MPI.COMM_WORLD.rank) + ".nc"
|
||||
outfile = "mpi_out_" + str(rank) + ".nc"
|
||||
save_var_names = ['rho', 'rho_u', 'rho_v', 'E']
|
||||
|
||||
arguments = IC.genKelvinHelmholtz(nx, ny, gamma, grid=grid)
|
||||
@@ -150,7 +162,7 @@ arguments['context'] = cuda_context
|
||||
arguments['theta'] = 1.2
|
||||
arguments['grid'] = grid
|
||||
|
||||
if(args.profile):
|
||||
if args.profile:
|
||||
t_init_end = time.time()
|
||||
t_init = t_init_end - t_init_start
|
||||
profiling_data["t_init"] = t_init
|
||||
@@ -168,28 +180,28 @@ def genSim(grid, **kwargs):
|
||||
return sim
|
||||
|
||||
|
||||
outfile, sim_runner_profiling_data, sim_profiling_data = Common.runSimulation(
|
||||
(outfile, sim_runner_profiling_data, sim_profiling_data) = Common.runSimulation(
|
||||
genSim, arguments, outfile, save_times, save_var_names, dt)
|
||||
|
||||
if(args.profile):
|
||||
if args.profile:
|
||||
t_total_end = time.time()
|
||||
t_total = t_total_end - t_total_start
|
||||
profiling_data["t_total"] = t_total
|
||||
print("Total run time on rank " + str(MPI.COMM_WORLD.rank) + " is " + str(t_total) + " s")
|
||||
print("Total run time on rank " + str(rank) + " is " + str(t_total) + " s")
|
||||
|
||||
# write profiling to json file
|
||||
if(args.profile and MPI.COMM_WORLD.rank == 0):
|
||||
if args.profile and rank == 0:
|
||||
job_id = ""
|
||||
if "SLURM_JOB_ID" in os.environ:
|
||||
job_id = int(os.environ["SLURM_JOB_ID"])
|
||||
allocated_nodes = int(os.environ["SLURM_JOB_NUM_NODES"])
|
||||
allocated_gpus = int(os.environ["HIP_VISIBLE_DEVICES"].count(",") + 1)
|
||||
allocated_gpus = int(os.environ["ROCR_VISIBLE_DEVICES"].count(",") + 1)
|
||||
# allocated_gpus = int(os.environ["CUDA_VISIBLE_DEVICES"].count(",") + 1)
|
||||
profiling_file = "MPI_jobid_" + \
|
||||
str(job_id) + "_" + str(allocated_nodes) + "_nodes_and_" + str(allocated_gpus) + "_GPUs_profiling.json"
|
||||
profiling_data["outfile"] = outfile
|
||||
else:
|
||||
profiling_file = "MPI_" + str(MPI.COMM_WORLD.size) + "_procs_and_" + str(num_cuda_devices) + "_GPUs_profiling.json"
|
||||
profiling_file = "MPI_" + str(size) + "_procs_and_" + str(num_cuda_devices) + "_GPUs_profiling.json"
|
||||
|
||||
for stage in sim_runner_profiling_data["start"].keys():
|
||||
profiling_data[stage] = sim_runner_profiling_data["end"][stage] - sim_runner_profiling_data["start"][stage]
|
||||
@@ -204,7 +216,7 @@ if(args.profile and MPI.COMM_WORLD.rank == 0):
|
||||
|
||||
profiling_data["slurm_job_id"] = job_id
|
||||
profiling_data["n_cuda_devices"] = str(num_cuda_devices)
|
||||
profiling_data["n_processes"] = str(MPI.COMM_WORLD.size)
|
||||
profiling_data["n_processes"] = str(size)
|
||||
profiling_data["git_hash"] = Common.getGitHash()
|
||||
profiling_data["git_status"] = Common.getGitStatus()
|
||||
|
||||
|
||||
Reference in New Issue
Block a user