mirror of
https://github.com/smyalygames/FiniteVolumeGPU.git
synced 2025-05-18 14:34:13 +02:00
Refactoring / cleanup
This commit is contained in:
parent
f9f0f20df8
commit
b266567d09
@ -104,6 +104,8 @@ class MagicLogger(Magics):
|
|||||||
|
|
||||||
@line_magic
|
@line_magic
|
||||||
@magic_arguments.magic_arguments()
|
@magic_arguments.magic_arguments()
|
||||||
|
@magic_arguments.argument(
|
||||||
|
'name', type=str, help='Name of context to create')
|
||||||
@magic_arguments.argument(
|
@magic_arguments.argument(
|
||||||
'--out', '-o', type=str, default='output.log', help='The filename to store the log to')
|
'--out', '-o', type=str, default='output.log', help='The filename to store the log to')
|
||||||
@magic_arguments.argument(
|
@magic_arguments.argument(
|
||||||
@ -146,6 +148,7 @@ class MagicLogger(Magics):
|
|||||||
logger.addHandler(fh)
|
logger.addHandler(fh)
|
||||||
|
|
||||||
logger.info("Python version %s", sys.version)
|
logger.info("Python version %s", sys.version)
|
||||||
|
self.shell.user_ns[args.name] = logger
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
@ -25,168 +25,29 @@ from GPUSimulators import Simulator
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
from mpi4py import MPI
|
from mpi4py import MPI
|
||||||
|
|
||||||
class MPISimulator(Simulator.BaseSimulator):
|
|
||||||
def __init__(self, sim, comm):
|
|
||||||
|
|
||||||
|
class MPIGrid(object):
|
||||||
|
"""
|
||||||
|
Class which represents an MPI grid of nodes. Facilitates easy communication between
|
||||||
|
neighboring nodes
|
||||||
|
"""
|
||||||
|
def __init__(self, comm, ndims=2):
|
||||||
self.logger = logging.getLogger(__name__)
|
self.logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
autotuner = sim.context.autotuner
|
assert ndims == 2, "Unsupported number of dimensions. Must be two at the moment"
|
||||||
sim.context.autotuner = None;
|
assert comm.size >= 1, "Must have at least one node"
|
||||||
super().__init__(sim.context,
|
|
||||||
sim.nx, sim.ny,
|
|
||||||
sim.dx, sim.dy,
|
|
||||||
sim.cfl_scale,
|
|
||||||
sim.num_substeps,
|
|
||||||
sim.block_size[0], sim.block_size[1])
|
|
||||||
sim.context.autotuner = autotuner
|
|
||||||
|
|
||||||
self.sim = sim
|
self.grid = MPIGrid.getGrid(comm.size, ndims)
|
||||||
self.comm = comm
|
self.comm = comm
|
||||||
self.rank = comm.rank
|
|
||||||
|
|
||||||
#Get global dimensions
|
self.logger.debug("Created MPI grid: {:}. Rank {:d} has coordinate {:}".format(
|
||||||
self.grid = MPISimulator.getFactors(self.comm.size, 2)
|
self.grid, self.comm.rank, self.getCoordinate()))
|
||||||
|
|
||||||
#Get neighbor node ids
|
def getCoordinate(self, rank=None):
|
||||||
self.east = self.getEast()
|
if (rank is None):
|
||||||
self.west = self.getWest()
|
rank = self.comm.rank
|
||||||
self.north = self.getNorth()
|
|
||||||
self.south = self.getSouth()
|
|
||||||
|
|
||||||
#Get local dimensions
|
|
||||||
self.gc_x = int(self.sim.u0[0].x_halo)
|
|
||||||
self.gc_y = int(self.sim.u0[0].y_halo)
|
|
||||||
self.nx = int(self.sim.nx)
|
|
||||||
self.ny = int(self.sim.ny)
|
|
||||||
self.nvars = len(self.sim.u0.gpu_variables)
|
|
||||||
|
|
||||||
#Allocate data for receiving
|
|
||||||
#Note that east and west also transfer ghost cells
|
|
||||||
#whilst north/south only transfer internal cells
|
|
||||||
self.in_e = np.empty((self.nvars, self.ny + 2*self.gc_y, self.gc_x), dtype=np.float32)
|
|
||||||
self.in_w = np.empty((self.nvars, self.ny + 2*self.gc_y, self.gc_x), dtype=np.float32)
|
|
||||||
self.in_n = np.empty((self.nvars, self.gc_y, self.nx), dtype=np.float32)
|
|
||||||
self.in_s = np.empty((self.nvars, self.gc_y, self.nx), dtype=np.float32)
|
|
||||||
|
|
||||||
#Allocate data for sending
|
|
||||||
self.out_e = np.empty_like(self.in_e)
|
|
||||||
self.out_w = np.empty_like(self.in_w)
|
|
||||||
self.out_n = np.empty_like(self.in_n)
|
|
||||||
self.out_s = np.empty_like(self.in_s)
|
|
||||||
|
|
||||||
#Set regions for ghost cells to read from
|
|
||||||
self.read_e = np.array([ self.nx, 0, self.gc_x, self.ny + 2*self.gc_y])
|
|
||||||
self.read_w = np.array([self.gc_x, 0, self.gc_x, self.ny + 2*self.gc_y])
|
|
||||||
self.read_n = np.array([self.gc_x, self.ny, self.nx, self.gc_y])
|
|
||||||
self.read_s = np.array([self.gc_x, self.gc_y, self.nx, self.gc_y])
|
|
||||||
|
|
||||||
#Set regions for ghost cells to write to
|
|
||||||
self.write_e = self.read_e + np.array([self.gc_x, 0, 0, 0])
|
|
||||||
self.write_w = self.read_w - np.array([self.gc_x, 0, 0, 0])
|
|
||||||
self.write_n = self.read_n + np.array([0, self.gc_y, 0, 0])
|
|
||||||
self.write_s = self.read_s - np.array([0, self.gc_y, 0, 0])
|
|
||||||
|
|
||||||
self.logger.debug("Simlator rank {:d} created ".format(self.rank))
|
|
||||||
|
|
||||||
|
|
||||||
def substep(self, dt, step_number):
|
|
||||||
self.exchange()
|
|
||||||
self.sim.substep(dt, step_number)
|
|
||||||
|
|
||||||
def download(self):
|
|
||||||
return self.sim.download()
|
|
||||||
|
|
||||||
def synchronize(self):
|
|
||||||
self.sim.synchronize()
|
|
||||||
|
|
||||||
def check(self):
|
|
||||||
return self.sim.check()
|
|
||||||
|
|
||||||
def computeDt(self):
|
|
||||||
local_dt = np.array([np.float32(self.sim.computeDt())]);
|
|
||||||
global_dt = np.empty(1, dtype=np.float32)
|
|
||||||
self.comm.Allreduce(local_dt, global_dt, op=MPI.MIN)
|
|
||||||
self.logger.debug("Local dt: {:f}, global dt: {:f}".format(local_dt[0], global_dt[0]))
|
|
||||||
return global_dt[0]
|
|
||||||
|
|
||||||
def exchange(self):
|
|
||||||
#Shorthands for dimensions
|
|
||||||
gc_x = self.gc_x
|
|
||||||
gc_y = self.gc_y
|
|
||||||
nx = self.nx
|
|
||||||
ny = self.ny
|
|
||||||
|
|
||||||
####
|
|
||||||
# First transfer internal cells north-south
|
|
||||||
####
|
|
||||||
|
|
||||||
#Download from the GPU
|
|
||||||
for k in range(self.nvars):
|
|
||||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_n[k,:,:], async=True, extent=self.read_n)
|
|
||||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_s[k,:,:], async=True, extent=self.read_s)
|
|
||||||
self.sim.stream.synchronize()
|
|
||||||
|
|
||||||
#Send to north/south neighbours
|
|
||||||
comm_send = []
|
|
||||||
comm_send += [self.comm.Isend(self.out_n, dest=self.north, tag=4*self.nt + 0)]
|
|
||||||
comm_send += [self.comm.Isend(self.out_s, dest=self.south, tag=4*self.nt + 1)]
|
|
||||||
|
|
||||||
#Receive from north/south neighbors
|
|
||||||
comm_recv = []
|
|
||||||
comm_recv += [self.comm.Irecv(self.in_s, source=self.south, tag=4*self.nt + 0)]
|
|
||||||
comm_recv += [self.comm.Irecv(self.in_n, source=self.north, tag=4*self.nt + 1)]
|
|
||||||
|
|
||||||
#Wait for incoming transfers to complete
|
|
||||||
for comm in comm_recv:
|
|
||||||
comm.wait()
|
|
||||||
|
|
||||||
#Upload to the GPU
|
|
||||||
for k in range(self.nvars):
|
|
||||||
self.sim.u0[k].upload(self.sim.stream, self.in_n[k,:,:], extent=self.write_n)
|
|
||||||
self.sim.u0[k].upload(self.sim.stream, self.in_s[k,:,:], extent=self.write_s)
|
|
||||||
|
|
||||||
#Wait for sending to complete
|
|
||||||
for comm in comm_send:
|
|
||||||
comm.wait()
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
####
|
|
||||||
# Then transfer east-west including ghost cells that have been filled in by north-south transfer above
|
|
||||||
# Fixme: This can be optimized by overlapping the GPU transfer with the pervious MPI transfer if the corners
|
|
||||||
# har handled on the CPU
|
|
||||||
####
|
|
||||||
|
|
||||||
#Download from the GPU
|
|
||||||
for k in range(self.nvars):
|
|
||||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_e[k,:,:], async=True, extent=self.read_e)
|
|
||||||
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_w[k,:,:], async=True, extent=self.read_w)
|
|
||||||
self.sim.stream.synchronize()
|
|
||||||
|
|
||||||
#Send to east/west neighbours
|
|
||||||
comm_send = []
|
|
||||||
comm_send += [self.comm.Isend(self.out_e, dest=self.east, tag=4*self.nt + 2)]
|
|
||||||
comm_send += [self.comm.Isend(self.out_w, dest=self.west, tag=4*self.nt + 3)]
|
|
||||||
|
|
||||||
#Receive from east/west neighbors
|
|
||||||
comm_recv = []
|
|
||||||
comm_recv += [self.comm.Irecv(self.in_w, source=self.west, tag=4*self.nt + 2)]
|
|
||||||
comm_recv += [self.comm.Irecv(self.in_e, source=self.east, tag=4*self.nt + 3)]
|
|
||||||
|
|
||||||
#Wait for incoming transfers to complete
|
|
||||||
for comm in comm_recv:
|
|
||||||
comm.wait()
|
|
||||||
|
|
||||||
#Upload to the GPU
|
|
||||||
for k in range(self.nvars):
|
|
||||||
self.sim.u0[k].upload(self.sim.stream, self.in_e[k,:,:], extent=self.write_e)
|
|
||||||
self.sim.u0[k].upload(self.sim.stream, self.in_w[k,:,:], extent=self.write_w)
|
|
||||||
|
|
||||||
#Wait for sending to complete
|
|
||||||
for comm in comm_send:
|
|
||||||
comm.wait()
|
|
||||||
|
|
||||||
|
|
||||||
def getCoordinate(self, rank):
|
|
||||||
i = (rank % self.grid[0])
|
i = (rank % self.grid[0])
|
||||||
j = (rank // self.grid[0])
|
j = (rank // self.grid[0])
|
||||||
return i, j
|
return i, j
|
||||||
@ -195,28 +56,32 @@ class MPISimulator(Simulator.BaseSimulator):
|
|||||||
return j*self.grid[0] + i
|
return j*self.grid[0] + i
|
||||||
|
|
||||||
def getEast(self):
|
def getEast(self):
|
||||||
i, j = self.getCoordinate(self.rank)
|
i, j = self.getCoordinate(self.comm.rank)
|
||||||
i = (i+1) % self.grid[0]
|
i = (i+1) % self.grid[0]
|
||||||
return self.getRank(i, j)
|
return self.getRank(i, j)
|
||||||
|
|
||||||
def getWest(self):
|
def getWest(self):
|
||||||
i, j = self.getCoordinate(self.rank)
|
i, j = self.getCoordinate(self.comm.rank)
|
||||||
i = (i+self.grid[0]-1) % self.grid[0]
|
i = (i+self.grid[0]-1) % self.grid[0]
|
||||||
return self.getRank(i, j)
|
return self.getRank(i, j)
|
||||||
|
|
||||||
def getNorth(self):
|
def getNorth(self):
|
||||||
i, j = self.getCoordinate(self.rank)
|
i, j = self.getCoordinate(self.comm.rank)
|
||||||
j = (j+1) % self.grid[1]
|
j = (j+1) % self.grid[1]
|
||||||
return self.getRank(i, j)
|
return self.getRank(i, j)
|
||||||
|
|
||||||
def getSouth(self):
|
def getSouth(self):
|
||||||
i, j = self.getCoordinate(self.rank)
|
i, j = self.getCoordinate(self.comm.rank)
|
||||||
j = (j+self.grid[1]-1) % self.grid[1]
|
j = (j+self.grid[1]-1) % self.grid[1]
|
||||||
return self.getRank(i, j)
|
return self.getRank(i, j)
|
||||||
|
|
||||||
def getFactors(number, num_factors):
|
def getGrid(num_nodes, num_dims):
|
||||||
|
assert(isinstance(num_nodes, int))
|
||||||
|
assert(isinstance(num_dims, int))
|
||||||
|
|
||||||
# Adapted from https://stackoverflow.com/questions/28057307/factoring-a-number-into-roughly-equal-factors
|
# Adapted from https://stackoverflow.com/questions/28057307/factoring-a-number-into-roughly-equal-factors
|
||||||
# Original code by https://stackoverflow.com/users/3928385/ishamael
|
# Original code by https://stackoverflow.com/users/3928385/ishamael
|
||||||
|
# Factorizes a number into n roughly equal factors
|
||||||
|
|
||||||
#Dictionary to remember already computed permutations
|
#Dictionary to remember already computed permutations
|
||||||
memo = {}
|
memo = {}
|
||||||
@ -253,22 +118,237 @@ class MPISimulator(Simulator.BaseSimulator):
|
|||||||
memo[(n, left)] = (best, bestTuple)
|
memo[(n, left)] = (best, bestTuple)
|
||||||
return memo[(n, left)]
|
return memo[(n, left)]
|
||||||
|
|
||||||
assert(isinstance(number, int))
|
|
||||||
assert(isinstance(num_factors, int))
|
|
||||||
|
|
||||||
factors = dp(number, num_factors)[1]
|
grid = dp(num_nodes, num_dims)[1]
|
||||||
|
|
||||||
if (len(factors) < num_factors):
|
if (len(grid) < num_dims):
|
||||||
#Split problematic 4
|
#Split problematic 4
|
||||||
if (4 in factors):
|
if (4 in grid):
|
||||||
factors.remove(4)
|
grid.remove(4)
|
||||||
factors.append(2)
|
grid.append(2)
|
||||||
factors.append(2)
|
grid.append(2)
|
||||||
|
|
||||||
#Pad with ones to guarantee num_factors
|
#Pad with ones to guarantee num_dims
|
||||||
factors = factors + [1]*(num_factors - len(factors))
|
grid = grid + [1]*(num_dims - len(grid))
|
||||||
|
|
||||||
#Sort in descending order
|
#Sort in descending order
|
||||||
factors = np.flip(np.sort(factors))
|
grid = np.flip(np.sort(grid))
|
||||||
|
|
||||||
|
return grid
|
||||||
|
|
||||||
|
|
||||||
|
def getExtent(self, width, height, rank):
|
||||||
|
"""
|
||||||
|
Function which returns the extent of node with rank
|
||||||
|
rank in the grid
|
||||||
|
"""
|
||||||
|
i, j = self.getCoordinate(rank)
|
||||||
|
x0 = i * width
|
||||||
|
y0 = j * height
|
||||||
|
x1 = x0+width
|
||||||
|
y1 = y0+height
|
||||||
|
return [x0, x1, y0, y1]
|
||||||
|
|
||||||
|
|
||||||
|
def gatherData(self, data, rank=0):
|
||||||
|
"""
|
||||||
|
Function which gathers the data onto node with rank
|
||||||
|
rank
|
||||||
|
"""
|
||||||
|
#Get shape of data
|
||||||
|
ny, nx = data.shape
|
||||||
|
|
||||||
|
#Create list of buffers to return
|
||||||
|
retval = []
|
||||||
|
|
||||||
|
#If we are the target node, recieve from others
|
||||||
|
#otherwise send to target
|
||||||
|
if (self.comm.rank == rank):
|
||||||
|
mpi_requests = []
|
||||||
|
retval = []
|
||||||
|
|
||||||
|
#Loop over all nodes
|
||||||
|
for k in range(0, self.comm.size):
|
||||||
|
#If k equal target node, add our own data
|
||||||
|
#Otherwise receive it from node k
|
||||||
|
if (k == rank):
|
||||||
|
retval += [data]
|
||||||
|
else:
|
||||||
|
buffer = np.empty((ny, nx), dtype=np.float32)
|
||||||
|
retval += [buffer]
|
||||||
|
mpi_requests += [self.comm.Irecv(buffer, source=k, tag=k)]
|
||||||
|
|
||||||
|
#Wait for transfers to complete
|
||||||
|
for mpi_request in mpi_requests:
|
||||||
|
mpi_request.wait()
|
||||||
|
else:
|
||||||
|
mpi_request = self.comm.Isend(data, dest=rank, tag=self.comm.rank)
|
||||||
|
mpi_request.wait()
|
||||||
|
|
||||||
|
return retval
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
class MPISimulator(Simulator.BaseSimulator):
|
||||||
|
"""
|
||||||
|
Class which handles communication between simulators on different MPI nodes
|
||||||
|
"""
|
||||||
|
def __init__(self, sim, grid):
|
||||||
|
self.logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
autotuner = sim.context.autotuner
|
||||||
|
sim.context.autotuner = None;
|
||||||
|
super().__init__(sim.context,
|
||||||
|
sim.nx, sim.ny,
|
||||||
|
sim.dx, sim.dy,
|
||||||
|
sim.cfl_scale,
|
||||||
|
sim.num_substeps,
|
||||||
|
sim.block_size[0], sim.block_size[1])
|
||||||
|
sim.context.autotuner = autotuner
|
||||||
|
|
||||||
|
self.sim = sim
|
||||||
|
self.grid = grid
|
||||||
|
|
||||||
|
#Get neighbor node ids
|
||||||
|
self.east = grid.getEast()
|
||||||
|
self.west = grid.getWest()
|
||||||
|
self.north = grid.getNorth()
|
||||||
|
self.south = grid.getSouth()
|
||||||
|
|
||||||
|
#Get number of variables
|
||||||
|
self.nvars = len(self.sim.u0.gpu_variables)
|
||||||
|
|
||||||
|
#Shorthands for computing extents and sizes
|
||||||
|
gc_x = int(self.sim.u0[0].x_halo)
|
||||||
|
gc_y = int(self.sim.u0[0].y_halo)
|
||||||
|
nx = int(self.sim.nx)
|
||||||
|
ny = int(self.sim.ny)
|
||||||
|
|
||||||
|
#Set regions for ghost cells to read from
|
||||||
|
#These have the format [x0, y0, width, height]
|
||||||
|
self.read_e = np.array([ nx, 0, gc_x, ny + 2*gc_y])
|
||||||
|
self.read_w = np.array([gc_x, 0, gc_x, ny + 2*gc_y])
|
||||||
|
self.read_n = np.array([gc_x, ny, nx, gc_y])
|
||||||
|
self.read_s = np.array([gc_x, gc_y, nx, gc_y])
|
||||||
|
|
||||||
|
#Set regions for ghost cells to write to
|
||||||
|
self.write_e = self.read_e + np.array([gc_x, 0, 0, 0])
|
||||||
|
self.write_w = self.read_w - np.array([gc_x, 0, 0, 0])
|
||||||
|
self.write_n = self.read_n + np.array([0, gc_y, 0, 0])
|
||||||
|
self.write_s = self.read_s - np.array([0, gc_y, 0, 0])
|
||||||
|
|
||||||
|
#Allocate data for receiving
|
||||||
|
#Note that east and west also transfer ghost cells
|
||||||
|
#whilst north/south only transfer internal cells
|
||||||
|
#Reuses the width/height defined in the read-extets above
|
||||||
|
self.in_e = np.empty((self.nvars, self.read_e[3], self.read_e[2]), dtype=np.float32)
|
||||||
|
self.in_w = np.empty((self.nvars, self.read_w[3], self.read_w[2]), dtype=np.float32)
|
||||||
|
self.in_n = np.empty((self.nvars, self.read_n[3], self.read_n[2]), dtype=np.float32)
|
||||||
|
self.in_s = np.empty((self.nvars, self.read_s[3], self.read_s[2]), dtype=np.float32)
|
||||||
|
|
||||||
|
#Allocate data for sending
|
||||||
|
self.out_e = np.empty_like(self.in_e)
|
||||||
|
self.out_w = np.empty_like(self.in_w)
|
||||||
|
self.out_n = np.empty_like(self.in_n)
|
||||||
|
self.out_s = np.empty_like(self.in_s)
|
||||||
|
|
||||||
|
self.logger.debug("Simlator rank {:d} initialized ".format(self.grid.comm.rank))
|
||||||
|
|
||||||
|
|
||||||
|
def substep(self, dt, step_number):
|
||||||
|
self.exchange()
|
||||||
|
self.sim.substep(dt, step_number)
|
||||||
|
|
||||||
|
def download(self):
|
||||||
|
return self.sim.download()
|
||||||
|
|
||||||
|
def synchronize(self):
|
||||||
|
self.sim.synchronize()
|
||||||
|
|
||||||
|
def check(self):
|
||||||
|
return self.sim.check()
|
||||||
|
|
||||||
|
def computeDt(self):
|
||||||
|
local_dt = np.array([np.float32(self.sim.computeDt())]);
|
||||||
|
global_dt = np.empty(1, dtype=np.float32)
|
||||||
|
self.grid.comm.Allreduce(local_dt, global_dt, op=MPI.MIN)
|
||||||
|
self.logger.debug("Local dt: {:f}, global dt: {:f}".format(local_dt[0], global_dt[0]))
|
||||||
|
return global_dt[0]
|
||||||
|
|
||||||
|
def exchange(self):
|
||||||
|
####
|
||||||
|
# FIXME: This function can be optimized using persitent communications.
|
||||||
|
# Also by overlapping some of the communications north/south and east/west of GPU and intra-node
|
||||||
|
# communications
|
||||||
|
####
|
||||||
|
|
||||||
|
####
|
||||||
|
# First transfer internal cells north-south
|
||||||
|
####
|
||||||
|
|
||||||
|
#Download from the GPU
|
||||||
|
for k in range(self.nvars):
|
||||||
|
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_n[k,:,:], async=True, extent=self.read_n)
|
||||||
|
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_s[k,:,:], async=True, extent=self.read_s)
|
||||||
|
self.sim.stream.synchronize()
|
||||||
|
|
||||||
|
#Send to north/south neighbours
|
||||||
|
comm_send = []
|
||||||
|
comm_send += [self.grid.comm.Isend(self.out_n, dest=self.north, tag=4*self.nt + 0)]
|
||||||
|
comm_send += [self.grid.comm.Isend(self.out_s, dest=self.south, tag=4*self.nt + 1)]
|
||||||
|
|
||||||
|
#Receive from north/south neighbors
|
||||||
|
comm_recv = []
|
||||||
|
comm_recv += [self.grid.comm.Irecv(self.in_s, source=self.south, tag=4*self.nt + 0)]
|
||||||
|
comm_recv += [self.grid.comm.Irecv(self.in_n, source=self.north, tag=4*self.nt + 1)]
|
||||||
|
|
||||||
|
#Wait for incoming transfers to complete
|
||||||
|
for comm in comm_recv:
|
||||||
|
comm.wait()
|
||||||
|
|
||||||
|
#Upload to the GPU
|
||||||
|
for k in range(self.nvars):
|
||||||
|
self.sim.u0[k].upload(self.sim.stream, self.in_n[k,:,:], extent=self.write_n)
|
||||||
|
self.sim.u0[k].upload(self.sim.stream, self.in_s[k,:,:], extent=self.write_s)
|
||||||
|
|
||||||
|
#Wait for sending to complete
|
||||||
|
for comm in comm_send:
|
||||||
|
comm.wait()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
####
|
||||||
|
# Then transfer east-west including ghost cells that have been filled in by north-south transfer above
|
||||||
|
####
|
||||||
|
|
||||||
|
#Download from the GPU
|
||||||
|
for k in range(self.nvars):
|
||||||
|
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_e[k,:,:], async=True, extent=self.read_e)
|
||||||
|
self.sim.u0[k].download(self.sim.stream, cpu_data=self.out_w[k,:,:], async=True, extent=self.read_w)
|
||||||
|
self.sim.stream.synchronize()
|
||||||
|
|
||||||
|
#Send to east/west neighbours
|
||||||
|
comm_send = []
|
||||||
|
comm_send += [self.grid.comm.Isend(self.out_e, dest=self.east, tag=4*self.nt + 2)]
|
||||||
|
comm_send += [self.grid.comm.Isend(self.out_w, dest=self.west, tag=4*self.nt + 3)]
|
||||||
|
|
||||||
|
#Receive from east/west neighbors
|
||||||
|
comm_recv = []
|
||||||
|
comm_recv += [self.grid.comm.Irecv(self.in_w, source=self.west, tag=4*self.nt + 2)]
|
||||||
|
comm_recv += [self.grid.comm.Irecv(self.in_e, source=self.east, tag=4*self.nt + 3)]
|
||||||
|
|
||||||
|
#Wait for incoming transfers to complete
|
||||||
|
for comm in comm_recv:
|
||||||
|
comm.wait()
|
||||||
|
|
||||||
|
#Upload to the GPU
|
||||||
|
for k in range(self.nvars):
|
||||||
|
self.sim.u0[k].upload(self.sim.stream, self.in_e[k,:,:], extent=self.write_e)
|
||||||
|
self.sim.u0[k].upload(self.sim.stream, self.in_w[k,:,:], extent=self.write_w)
|
||||||
|
|
||||||
|
#Wait for sending to complete
|
||||||
|
for comm in comm_send:
|
||||||
|
comm.wait()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
return factors
|
|
Loading…
x
Reference in New Issue
Block a user