changeset 149:11b6646918d4 parallel_test

Rewrote the test using the @distributed macro instead of spawns. Seems to improve results.
author Vidar Stiernström <vidar.stiernstrom@it.uu.se>
date Tue, 26 Feb 2019 11:09:30 +0100
parents 95a3ba70bccb
children 4dc19757cada
files distributedTest.jl
diffstat 1 files changed, 17 insertions(+), 27 deletions(-) [+]
line wrap: on
line diff
--- a/distributedTest.jl	Tue Feb 26 10:02:20 2019 +0100
+++ b/distributedTest.jl	Tue Feb 26 11:09:30 2019 +0100
@@ -1,7 +1,3 @@
-#NOTE: The followig code "works" in that the resulting error is small. Much more
-# work needs to be done in order to figure out how julias distributed parallelism works
-# especially w.r.t data movement.
-
 #TODO: everywhere using here or just everywhere include?
 @everywhere using DistributedArrays
 
@@ -29,31 +25,25 @@
   return grid
 end
 
-# Create grid
-#TODO: Should these be declared globally?
-gridsize = (10000, 10000);
-limit_lower = (0., 0.)
-limit_upper = (2pi, 3pi/2)
-nworkers_per_dim = (Int(nworkers()/2),Int(nworkers()/2))
+@everywhere function timed_apply(op, u, v)
+  @time sbp.apply_tiled!(op, u, v)
+  return nothing
+end
 
-# TODO: Currently only works with same number of processes in each direction and for
-# an even number of processes
-grids_partitioned = [@spawnat p create_partitioned_grid(gridsize, limit_lower , limit_upper, nworkers_per_dim) for p in workers()]
-
-# Create Laplace operator
-# TODO: If we dont have fetch here, then the error is large. Does this indicate that we need to move data, or simply that
-# the future is not yet computed once this statement is reached?
-Laplace_partitioned  = [@spawnat p sbp.Laplace(fetch(grids_partitioned[p-1]), 1.0, sbp.readOperator("d2_4th.txt","h_4th.txt")) for p in workers()]
-
-# Create initial value grid function v and solution grid function u
-#TODO: Should init be declared globally?
-init(x,y) = sin(x) + sin(y)
+gridsize = (10000, 10000); # Global grid size
 v = dzeros(gridsize) # Distribured arrays
 u = dzeros(gridsize) # Distribured arrays
-fetch([@spawnat p v[:L] = sbp.Grid.evalOn(fetch(grids_partitioned[p-1]), init) for p in workers()])  #TODO: Don't want to fetch here
 
-# Apply Laplace
-fetch([@spawnat p sbp.apply_tiled!(fetch(Laplace_partitioned[p-1]),u[:L], v[:L]) for p in workers()]) #TODO: Don't want to fetch here
-
-#TODO: Here we need to make sure that the data is ready.
+@sync @distributed for p in workers()
+  #Should these be declared globally or locally?
+  limit_lower = (0., 0.)
+  limit_upper = (2pi, 3pi/2)
+  nworkers_per_dim = (Int(nworkers()/2),Int(nworkers()/2))
+  init(x,y) = sin(x) + sin(y)
+  grid = create_partitioned_grid(gridsize, limit_lower , limit_upper, nworkers_per_dim)
+  @inbounds v[:L] = sbp.Grid.evalOn(grid, init)
+  op = sbp.readOperator("d2_4th.txt","h_4th.txt")
+  Δ = sbp.Laplace(grid, 1.0, op)
+  @inbounds timed_apply(Δ,u[:L], v[:L])
+end
 @show maximum(abs.(u + v))