mirror of
https://github.com/smyalygames/FiniteVolumeGPU_HIP.git
synced 2025-05-18 06:24:11 +02:00
re-write HLL2.py with hip-python
This commit is contained in:
parent
6a11b0e3ab
commit
866e54c43f
@ -24,8 +24,8 @@ from GPUSimulators import Simulator, Common
|
||||
from GPUSimulators.Simulator import BaseSimulator, BoundaryCondition
|
||||
import numpy as np
|
||||
|
||||
from pycuda import gpuarray
|
||||
|
||||
#from pycuda import gpuarray
|
||||
from hip import hip,hiprtc
|
||||
|
||||
|
||||
|
||||
@ -73,18 +73,44 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
self.theta = np.float32(theta)
|
||||
|
||||
#Get kernels
|
||||
module = context.get_module("cuda/SWE2D_HLL2.cu",
|
||||
defines={
|
||||
'BLOCK_WIDTH': self.block_size[0],
|
||||
'BLOCK_HEIGHT': self.block_size[1]
|
||||
},
|
||||
compile_args={
|
||||
'no_extern_c': True,
|
||||
'options': ["--use_fast_math"],
|
||||
},
|
||||
jit_compile_args={})
|
||||
self.kernel = module.get_function("HLL2Kernel")
|
||||
self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
# module = context.get_module("cuda/SWE2D_HLL2.cu",
|
||||
# defines={
|
||||
# 'BLOCK_WIDTH': self.block_size[0],
|
||||
# 'BLOCK_HEIGHT': self.block_size[1]
|
||||
# },
|
||||
# compile_args={
|
||||
# 'no_extern_c': True,
|
||||
# 'options': ["--use_fast_math"],
|
||||
# },
|
||||
# jit_compile_args={})
|
||||
# self.kernel = module.get_function("HLL2Kernel")
|
||||
# self.kernel.prepare("iifffffiiPiPiPiPiPiPiP")
|
||||
|
||||
kernel_file_path = os.path.abspath(os.path.join('cuda', 'SWE2D_HLL2.cu.hip'))
|
||||
with open(kernel_file_path, 'r') as file:
|
||||
kernel_source = file.read()
|
||||
|
||||
prog = hip_check(hiprtc.hiprtcCreateProgram(kernel_source.encode(), b"HLL2Kernel", 0, [], []))
|
||||
|
||||
props = hip.hipDeviceProp_t()
|
||||
hip_check(hip.hipGetDeviceProperties(props,0))
|
||||
arch = props.gcnArchName
|
||||
|
||||
print(f"Compiling kernel .HLL2Kernel. for {arch}")
|
||||
|
||||
cflags = [b"--offload-arch="+arch]
|
||||
err, = hiprtc.hiprtcCompileProgram(prog, len(cflags), cflags)
|
||||
if err != hiprtc.hiprtcResult.HIPRTC_SUCCESS:
|
||||
log_size = hip_check(hiprtc.hiprtcGetProgramLogSize(prog))
|
||||
log = bytearray(log_size)
|
||||
hip_check(hiprtc.hiprtcGetProgramLog(prog, log))
|
||||
raise RuntimeError(log.decode())
|
||||
code_size = hip_check(hiprtc.hiprtcGetCodeSize(prog))
|
||||
code = bytearray(code_size)
|
||||
hip_check(hiprtc.hiprtcGetCode(prog, code))
|
||||
module = hip_check(hip.hipModuleLoadData(code))
|
||||
|
||||
kernel = hip_check(hip.hipModuleGetFunction(module, b"HLL2Kernel"))
|
||||
|
||||
#Create data by uploading to device
|
||||
self.u0 = Common.ArakawaA2D(self.stream,
|
||||
@ -95,7 +121,12 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
nx, ny,
|
||||
2, 2,
|
||||
[None, None, None])
|
||||
self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
#self.cfl_data = gpuarray.GPUArray(self.grid_size, dtype=np.float32)
|
||||
data_h = np.empty(self.grid_size, dtype=np.float32)
|
||||
num_bytes = data_h.size * data_h.itemsize
|
||||
self.cfl_data = hip_check(hip.hipMalloc(num_bytes)).configure(
|
||||
typestr="float32",shape=self.grid_size)
|
||||
|
||||
dt_x = np.min(self.dx / (np.abs(hu0/h0) + np.sqrt(g*h0)))
|
||||
dt_y = np.min(self.dy / (np.abs(hv0/h0) + np.sqrt(g*h0)))
|
||||
dt = min(dt_x, dt_y)
|
||||
@ -105,22 +136,56 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
self.substepDimsplit(dt*0.5, step_number)
|
||||
|
||||
def substepDimsplit(self, dt, substep):
|
||||
self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
self.nx, self.ny,
|
||||
self.dx, self.dy, dt,
|
||||
self.g,
|
||||
self.theta,
|
||||
substep,
|
||||
self.boundary_conditions,
|
||||
self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
self.cfl_data.gpudata)
|
||||
# self.kernel.prepared_async_call(self.grid_size, self.block_size, self.stream,
|
||||
# self.nx, self.ny,
|
||||
# self.dx, self.dy, dt,
|
||||
# self.g,
|
||||
# self.theta,
|
||||
# substep,
|
||||
# self.boundary_conditions,
|
||||
# self.u0[0].data.gpudata, self.u0[0].data.strides[0],
|
||||
# self.u0[1].data.gpudata, self.u0[1].data.strides[0],
|
||||
# self.u0[2].data.gpudata, self.u0[2].data.strides[0],
|
||||
# self.u1[0].data.gpudata, self.u1[0].data.strides[0],
|
||||
# self.u1[1].data.gpudata, self.u1[1].data.strides[0],
|
||||
# self.u1[2].data.gpudata, self.u1[2].data.strides[0],
|
||||
# self.cfl_data.gpudata)
|
||||
|
||||
#launch kernel
|
||||
hip_check(
|
||||
hip.hipModuleLaunchKernel(
|
||||
kernel,
|
||||
*self.grid_size,
|
||||
*self.block_size,
|
||||
sharedMemBytes=0,
|
||||
stream=self.stream,
|
||||
kernelParams=None,
|
||||
extra=( # pass kernel's arguments
|
||||
ctypes.c_int(self.nx), ctypes.c_int(self.ny),
|
||||
ctypes.c_float(self.dx), ctypes.c_float(self.dy), ctypes.c_float(self.dt),
|
||||
ctypes.c_float(self.g),
|
||||
ctypes.c_float(self.theta),
|
||||
ctypes.c_int(substep),
|
||||
ctypes.c_int(self.boundary_conditions),
|
||||
ctypes.c_float(self.u0[0].data), ctypes.c_float(self.u0[0].data.strides[0]),
|
||||
ctypes.c_float(self.u0[1].data), ctypes.c_float(self.u0[1].data.strides[0]),
|
||||
ctypes.c_float(self.u0[2].data), ctypes.c_float(self.u0[2].data.strides[0]),
|
||||
ctypes.c_float(self.u1[0].data), ctypes.c_float(self.u1[0].data.strides[0]),
|
||||
ctypes.c_float(self.u1[1].data), ctypes.c_float(self.u1[1].data.strides[0]),
|
||||
ctypes.c_float(self.u1[2].data), ctypes.c_float(self.u1[2].data.strides[0]),
|
||||
self.cfl_data
|
||||
)
|
||||
)
|
||||
)
|
||||
self.u0, self.u1 = self.u1, self.u0
|
||||
|
||||
hip_check(hip.hipDeviceSynchronize())
|
||||
hip_check(hip.hipModuleUnload(module))
|
||||
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
|
||||
print("--Launching Kernel .HLL2Kernel. is ok")
|
||||
|
||||
def getOutput(self):
|
||||
return self.u0
|
||||
|
||||
@ -128,6 +193,39 @@ class HLL2 (Simulator.BaseSimulator):
|
||||
self.u0.check()
|
||||
self.u1.check()
|
||||
|
||||
# computing min with hipblas: the output is an index
|
||||
def min_hipblas(self, num_elements, cfl_data, stream):
|
||||
num_bytes = num_elements * np.dtype(np.float32).itemsize
|
||||
num_bytes_i = np.dtype(np.int32).itemsize
|
||||
indx_d = hip_check(hip.hipMalloc(num_bytes_i))
|
||||
indx_h = np.zeros(1, dtype=np.int32)
|
||||
x_temp = np.zeros(num_elements, dtype=np.float32)
|
||||
|
||||
#print("--size.data:", cfl_data.size)
|
||||
handle = hip_check(hipblas.hipblasCreate())
|
||||
|
||||
#hip_check(hipblas.hipblasGetStream(handle, stream))
|
||||
#"incx" [int] specifies the increment for the elements of x. incx must be > 0.
|
||||
hip_check(hipblas.hipblasIsamin(handle, num_elements, cfl_data, 1, indx_d))
|
||||
|
||||
# destruction of handle
|
||||
hip_check(hipblas.hipblasDestroy(handle))
|
||||
|
||||
# copy result (stored in indx_d) back to the host (store in indx_h)
|
||||
hip_check(hip.hipMemcpyAsync(indx_h,indx_d,num_bytes_i,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
hip_check(hip.hipMemcpyAsync(x_temp,cfl_data,num_bytes,hip.hipMemcpyKind.hipMemcpyDeviceToHost,stream))
|
||||
#hip_check(hip.hipMemsetAsync(cfl_data,0,num_bytes,self.stream))
|
||||
hip_check(hip.hipStreamSynchronize(stream))
|
||||
|
||||
min_value = x_temp.flatten()[indx_h[0]-1]
|
||||
|
||||
# clean up
|
||||
hip_check(hip.hipStreamDestroy(stream))
|
||||
hip_check(hip.hipFree(cfl_data))
|
||||
return min_value
|
||||
|
||||
def computeDt(self):
|
||||
max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
#max_dt = gpuarray.min(self.cfl_data, stream=self.stream).get();
|
||||
max_dt = self.min_hipblas(self.cfl_data.size, self.cfl_data, self.stream)
|
||||
return max_dt*0.5
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user