changeset 148:95a3ba70bccb parallel_test

Added some clarifying comments on the state of distributedTest.jl
author Vidar Stiernström <vidar.stiernstrom@it.uu.se>
date Tue, 26 Feb 2019 10:02:20 +0100
parents aa18b7bf4926
children 11b6646918d4
files distributedTest.jl
diffstat 1 files changed, 18 insertions(+), 9 deletions(-) [+]
line wrap: on
line diff
--- a/distributedTest.jl	Mon Feb 25 17:21:07 2019 +0100
+++ b/distributedTest.jl	Tue Feb 26 10:02:20 2019 +0100
@@ -1,4 +1,8 @@
-@everywhere using Distributed
+#NOTE: The followig code "works" in that the resulting error is small. Much more
+# work needs to be done in order to figure out how julias distributed parallelism works
+# especially w.r.t data movement.
+
+#TODO: everywhere using here or just everywhere include?
 @everywhere using DistributedArrays
 
 # TODO: Currently uses integer division to calculate the local grid size.
@@ -26,25 +30,30 @@
 end
 
 # Create grid
+#TODO: Should these be declared globally?
 gridsize = (10000, 10000);
 limit_lower = (0., 0.)
 limit_upper = (2pi, 3pi/2)
+nworkers_per_dim = (Int(nworkers()/2),Int(nworkers()/2))
+
 # TODO: Currently only works with same number of processes in each direction and for
 # an even number of processes
-nworkers_per_dim = (Int(nworkers()/2),Int(nworkers()/2))
 grids_partitioned = [@spawnat p create_partitioned_grid(gridsize, limit_lower , limit_upper, nworkers_per_dim) for p in workers()]
 
 # Create Laplace operator
-@everywhere op = sbp.readOperator("d2_4th.txt","h_4th.txt")
-Laplace_partitioned  = [@spawnat p sbp.Laplace(fetch(grids_partitioned[p-1]), 1.0, op) for p in workers()]
+# TODO: If we dont have fetch here, then the error is large. Does this indicate that we need to move data, or simply that
+# the future is not yet computed once this statement is reached?
+Laplace_partitioned  = [@spawnat p sbp.Laplace(fetch(grids_partitioned[p-1]), 1.0, sbp.readOperator("d2_4th.txt","h_4th.txt")) for p in workers()]
 
 # Create initial value grid function v and solution grid function u
-@everywhere init(x,y) = sin(x) + sin(y)
-v = dzeros(gridsize)
-u = dzeros(gridsize)
-@async([@spawnat p v[:L] = sbp.Grid.evalOn(fetch(grids_partitioned[p-1]), init) for p in workers()]) #TODO: Correct use of async?
+#TODO: Should init be declared globally?
+init(x,y) = sin(x) + sin(y)
+v = dzeros(gridsize) # Distribured arrays
+u = dzeros(gridsize) # Distribured arrays
+fetch([@spawnat p v[:L] = sbp.Grid.evalOn(fetch(grids_partitioned[p-1]), init) for p in workers()])  #TODO: Don't want to fetch here
 
 # Apply Laplace
-@async([@spawnat p sbp.apply_tiled!(fetch(Laplace_partitioned[p-1]),u[:L], v[:L]) for p in workers()]) #TODO: Correct use of async?
+fetch([@spawnat p sbp.apply_tiled!(fetch(Laplace_partitioned[p-1]),u[:L], v[:L]) for p in workers()]) #TODO: Don't want to fetch here
 
+#TODO: Here we need to make sure that the data is ready.
 @show maximum(abs.(u + v))