Created
November 8, 2019 07:24
-
-
Save mikeando/35f661d13b374e857315859cd0d975d6 to your computer and use it in GitHub Desktop.
Example bad devito MPI operator
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
import numpy as np | |
import cloudpickle | |
from mpi4py import MPI | |
from devito import Function, TimeFunction, Grid, Eq, Operator, configuration | |
configuration["mpi"] = True | |
configuration["openmp"] = True | |
def build_operator(): | |
print(f"{MPI.COMM_WORLD.rank} building operator") | |
toy_grid = Grid(shape=(100,100), comm=MPI.COMM_SELF) | |
u = TimeFunction(name='u',grid=toy_grid, space_order=4, time_order=1) | |
# We need out 0.000001 to be small enough to make this stable | |
# Really it depends on the grid sizes etc - we're just hard coded for dt=1 | |
# nx,ny=100 | |
eqn = Eq(u.forward, u + 0.000001 * u.laplace) | |
op = Operator(eqn, name="HeatFwd") | |
print(f"{MPI.COMM_WORLD.rank} returning operator") | |
return op | |
def get_operator(comm): | |
# If we're given a communicator we build the operator on MPI rank 0 - other | |
# ranks get it from a broadcast Otherwise we build it on all ranks | |
op = None | |
if comm is None or comm.rank == 0: | |
op = build_operator() | |
if comm is not None: | |
# The default pickle used by MPI broadcast isn't cloudpickle | |
# and so can't pickle everything we want - so we manually | |
# pickle and unpickle. But we only need to do the pickle on | |
# rank 0. | |
if comm.rank == 0: | |
op_pickle = cloudpickle.dumps(op) | |
else: | |
op_pickle = None | |
op_pickle = comm.bcast(op_pickle, root=0) | |
# Ranks 0 already has a valid solver_factory | |
# so lets not waste time unpickling there. | |
if comm.rank != 0: | |
op = cloudpickle.loads(op_pickle) | |
del op_pickle | |
assert op is not None | |
return op | |
print(f"{MPI.COMM_WORLD.rank}: Getting op") | |
#op = get_operator(MPI.COMM_WORLD) | |
op = get_operator(None) | |
print(f"{MPI.COMM_WORLD.rank}: Got op = {type(op)}") | |
def setup_u(g:Grid) -> TimeFunction: | |
u = TimeFunction(name="u", grid=g, space_order=4, time_order=1) | |
x_,y_ = np.ix_(np.linspace(-1,1,g.shape[0]), np.linspace(-1,1,g.shape[1])) | |
dx = x_-0.5 | |
dy = y_ | |
u.data_with_halo[:] = 0 | |
u.data[0,:,:] = 1.0 * ( (dx*dx + dy*dy) < 0.125 ) | |
u.data[1,:,:] = 1.0 * ( (dx*dx + dy*dy) < 0.125 ) | |
return u | |
def run_op(op, description, u, outfile): | |
print(f"{MPI.COMM_WORLD.rank} : running {description}") | |
print(f"{MPI.COMM_WORLD.rank} : {description} before ||u|| = {np.sum(u.data[u.local_indices]*u.data[u.local_indices])}") | |
op.apply(u=u, time_M=15000) | |
print(f"{MPI.COMM_WORLD.rank} : {description} after ||u|| = {np.sum(u.data[u.local_indices]*u.data[u.local_indices])}") | |
d = np.zeros(u.shape) | |
d[:,:,:] = u.data[u.local_indices] | |
np.save(outfile, d) | |
def run_local(op): | |
g = Grid(shape=(100,100), comm=MPI.COMM_SELF) | |
u = setup_u(g) | |
run_op(op, "local only", u, f"local_rank_{MPI.COMM_WORLD.rank}") | |
def run_dist(op): | |
g = Grid(shape=(100,100), comm=MPI.COMM_WORLD) | |
u = setup_u(g) | |
run_op(op, "distributed", u, f"dist_rank_{MPI.COMM_WORLD.rank}") | |
def run_dist_patched(op): | |
g = Grid(shape=(100,100), comm=MPI.COMM_WORLD) | |
u = setup_u(g) | |
m={} | |
m['comm'] = g.distributor._obj_comm | |
m['nb'] = g.distributor._obj_neighborhood | |
op.objects = tuple( m.get(o.name, o) for o in op.objects) | |
run_op(op, "distributed patched", u, f"dist_patched_rank_{MPI.COMM_WORLD.rank}") | |
run_local(op) | |
run_dist(op) | |
run_dist_patched(op) | |
Sign up for free
to join this conversation on GitHub.
Already have an account?
Sign in to comment