view distributedTest.jl @ 148:95a3ba70bccb parallel_test

Added some clarifying comments on the state of distributedTest.jl
author Vidar Stiernström <vidar.stiernstrom@it.uu.se>
date Tue, 26 Feb 2019 10:02:20 +0100
parents aa18b7bf4926
children 11b6646918d4
line wrap: on
line source

#NOTE: The followig code "works" in that the resulting error is small. Much more
# work needs to be done in order to figure out how julias distributed parallelism works
# especially w.r.t data movement.

#TODO: everywhere using here or just everywhere include?
@everywhere using DistributedArrays

# TODO: Currently uses integer division to calculate the local grid size.
# Should we make sure this is handled in some way if mod(sz./nworkers()) != 0
# or keep assertions?
@everywhere function create_partitioned_grid(size::NTuple{Dim, Int}, limit_lower::NTuple{Dim, T}, limit_upper::NTuple{Dim, T}, nworkers_per_dim::NTuple{Dim, Int}) where Dim where T
  @assert mod.(size, nworkers_per_dim) == (0,0)
  @assert prod(nworkers_per_dim) == nworkers()
  # Translate the current worker id to a cartesian index, based on nworkers_per_dim
  ci = CartesianIndices(nworkers_per_dim);
  id = Tuple(ci[myid()-1])

  # Compute the size of each partitioned grid
  size_partition = size./nworkers_per_dim
  size_partition = map(x->Int(x),size_partition) # TODO: Cant this be done in an easier way?...
  # Compute domain size for each partition
  domain_size = limit_upper.-limit_lower
  domain_partition_size = domain_size./nworkers_per_dim

  # Compute the lower and upper limit for each grid partition, then construct the grid
  ll_partition = limit_lower .+ domain_partition_size.*(id.-1)
  lu_partition = limit_lower .+ domain_partition_size.*id
  grid = sbp.Grid.EquidistantGrid(size_partition, ll_partition, lu_partition)
  return grid
end

# Create grid
#TODO: Should these be declared globally?
gridsize = (10000, 10000);
limit_lower = (0., 0.)
limit_upper = (2pi, 3pi/2)
nworkers_per_dim = (Int(nworkers()/2),Int(nworkers()/2))

# TODO: Currently only works with same number of processes in each direction and for
# an even number of processes
grids_partitioned = [@spawnat p create_partitioned_grid(gridsize, limit_lower , limit_upper, nworkers_per_dim) for p in workers()]

# Create Laplace operator
# TODO: If we dont have fetch here, then the error is large. Does this indicate that we need to move data, or simply that
# the future is not yet computed once this statement is reached?
Laplace_partitioned  = [@spawnat p sbp.Laplace(fetch(grids_partitioned[p-1]), 1.0, sbp.readOperator("d2_4th.txt","h_4th.txt")) for p in workers()]

# Create initial value grid function v and solution grid function u
#TODO: Should init be declared globally?
init(x,y) = sin(x) + sin(y)
v = dzeros(gridsize) # Distribured arrays
u = dzeros(gridsize) # Distribured arrays
fetch([@spawnat p v[:L] = sbp.Grid.evalOn(fetch(grids_partitioned[p-1]), init) for p in workers()])  #TODO: Don't want to fetch here

# Apply Laplace
fetch([@spawnat p sbp.apply_tiled!(fetch(Laplace_partitioned[p-1]),u[:L], v[:L]) for p in workers()]) #TODO: Don't want to fetch here

#TODO: Here we need to make sure that the data is ready.
@show maximum(abs.(u + v))