Changed from print to logging

This commit is contained in:
André R. Brodtkorb 2018-08-13 16:04:46 +02:00
parent 9592a09d36
commit 8bda93e565
5 changed files with 594 additions and 357 deletions

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@ -39,8 +39,9 @@ import pycuda.driver as cuda
Class which keeps track of time spent for a section of code
"""
class Timer(object):
def __init__(self, tag):
def __init__(self, tag, log_level=logging.DEBUG):
self.tag = tag
self.log_level = log_level
self.logger = logging.getLogger(__name__)
def __enter__(self):
@ -51,7 +52,7 @@ class Timer(object):
self.end = time.time()
self.secs = self.end - self.start
self.msecs = self.secs * 1000 # millisecs
self.logger.info("%s: %f ms", self.tag, self.msecs)
self.logger.log(self.log_level, "%s: %f ms", self.tag, self.msecs)
@ -276,12 +277,14 @@ class CUDAArray2D:
Uploads initial data to the CL device
"""
def __init__(self, stream, nx, ny, halo_x, halo_y, data):
self.logger = logging.getLogger(__name__)
self.nx = nx
self.ny = ny
self.nx_halo = nx + 2*halo_x
self.ny_halo = ny + 2*halo_y
self.logger.debug("Allocating [%dx%d] buffer", self.nx, self.ny)
#Make sure data is in proper format
assert np.issubdtype(data.dtype, np.float32), "Wrong datatype: %s" % str(data.dtype)
assert not np.isfortran(data), "Wrong datatype (Fortran, expected C)"
@ -293,17 +296,26 @@ class CUDAArray2D:
self.bytes_per_float = data.itemsize
assert(self.bytes_per_float == 4)
self.pitch = np.int32((self.nx_halo)*self.bytes_per_float)
self.logger.debug("Buffer <%s> [%dx%d]: Allocated ", int(self.data.gpudata), self.nx, self.ny)
def __del__(self, *args):
self.logger.debug("Buffer <%s> [%dx%d]: Releasing ", int(self.data.gpudata), self.nx, self.ny)
self.data.gpudata.free()
self.data = None
"""
Enables downloading data from CL device to Python
"""
def download(self, stream, async=False):
#Copy data from device to host
if (async):
self.logger.debug("Buffer <%s> [%dx%d]: Downloading async ", int(self.data.gpudata), self.nx, self.ny)
host_data = self.data.get_async(stream=stream)
return host_data
else:
self.logger.debug("Buffer <%s> [%dx%d]: Downloading synchronously", int(self.data.gpudata), self.nx, self.ny)
host_data = self.data.get(stream=stream)#, pagelocked=True) # pagelocked causes crash on windows at least
return host_data

View File

@ -48,7 +48,6 @@ class KP07 (Simulator.BaseSimulator):
dy: Grid cell spacing along y-axis (20 000 m)
dt: Size of each timestep (90 s)
g: Gravitational accelleration (9.81 m/s^2)
r: Bottom friction coefficient (2.4e-3 m/s)
"""
def __init__(self, \
context, \
@ -56,7 +55,7 @@ class KP07 (Simulator.BaseSimulator):
nx, ny, \
dx, dy, dt, \
g, \
theta=1.3, r=0.0, \
theta=1.3, \
block_width=16, block_height=16):
# Call super constructor
@ -69,11 +68,10 @@ class KP07 (Simulator.BaseSimulator):
block_width, block_height);
self.theta = np.float32(theta)
self.r = np.float32(r)
#Get kernels
self.kernel = context.get_prepared_kernel("KP07_kernel.cu", "KP07Kernel", \
"iiffffffiPiPiPiPiPiPi", \
"iifffffiPiPiPiPiPiPi", \
BLOCK_WIDTH=block_width, \
BLOCK_HEIGHT=block_height)
@ -89,7 +87,6 @@ class KP07 (Simulator.BaseSimulator):
self.dx, self.dy, dt, \
self.g, \
self.theta, \
self.r, \
np.int32(substep), \
self.data.h0.data.gpudata, self.data.h0.pitch, \
self.data.hu0.data.gpudata, self.data.hu0.pitch, \

View File

@ -22,6 +22,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#Import packages we need
import numpy as np
import logging
import pycuda.compiler as cuda_compiler
import pycuda.gpuarray
@ -53,6 +54,9 @@ class BaseSimulator:
dx, dy, dt, \
g, \
block_width, block_height):
#Get logger
self.logger = logging.getLogger(__name__ + "." + self.__class__.__name__)
#Create a CUDA stream
self.stream = cuda.Stream()
@ -93,6 +97,7 @@ class BaseSimulator:
Requires that the stepEuler functionality is implemented in the subclasses
"""
def simulateEuler(self, t_end):
with Common.Timer(self.__class__.__name__ + ".simulateEuler") as t:
# Compute number of timesteps to perform
n = int(t_end / self.dt + 1)
@ -107,6 +112,7 @@ class BaseSimulator:
# Step with forward Euler
self.stepEuler(local_dt)
self.logger.info("%s simulated %f seconds to %f with %d steps in %f seconds", self.__class__.__name__, t_end, self.t, n, t.secs)
return self.t, n
"""
@ -114,7 +120,7 @@ class BaseSimulator:
Requires that the stepRK functionality is implemented in the subclasses
"""
def simulateRK(self, t_end, order):
with Common.Timer(self.__class__.__name__ + ".simulateRK") as t:
# Compute number of timesteps to perform
n = int(t_end / self.dt + 1)
@ -129,6 +135,7 @@ class BaseSimulator:
# Perform all the Runge-Kutta substeps
self.stepRK(local_dt, order)
self.logger.info("%s simulated %f seconds to %f with %d steps in %f seconds", self.__class__.__name__, t_end, self.t, n, t.secs)
return self.t, n
"""
@ -136,7 +143,7 @@ class BaseSimulator:
Requires that the stepDimsplitX and stepDimsplitY functionality is implemented in the subclasses
"""
def simulateDimsplit(self, t_end):
with Common.Timer(self.__class__.__name__ + ".simulateDimsplit") as t:
# Compute number of timesteps to perform
n = int(t_end / (2.0*self.dt) + 1)
@ -152,7 +159,7 @@ class BaseSimulator:
self.stepDimsplitXY(local_dt)
self.stepDimsplitYX(local_dt)
self.logger.info("%s simulated %f seconds to %f with %d steps in %f seconds", self.__class__.__name__, t_end, self.t, 2*n, t.secs)
return self.t, 2*n