changeset 146:21b188f38358 parallel_test

Started branch for testing out parallel computing
author Vidar Stiernström <vidar.stiernstrom@it.uu.se>
date Mon, 25 Feb 2019 16:36:15 +0100
parents 18b3c63673b3
children aa18b7bf4926
files distributedTest.jl
diffstat 1 files changed, 56 insertions(+), 0 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/distributedTest.jl	Mon Feb 25 16:36:15 2019 +0100
@@ -0,0 +1,56 @@
+@everywhere using Distributed
+@everywhere using DistributedArrays
+
+# TODO: Currently uses integer division to calculate the local grid size.
+# Should we make sure this is handled in some way if mod(sz./nworkers()) != 0
+# or keep assertions?
+@everywhere function create_partitioned_grid(size::NTuple{Dim, Int}, limit_lower::NTuple{Dim, T}, limit_upper::NTuple{Dim, T}, nworkers_per_dim::NTuple{Dim, Int}) where Dim where T
+  @assert mod.(size, nworkers_per_dim) == (0,0)
+  @assert prod(nworkers_per_dim) == nworkers()
+  # Translate the current worker id to a cartesian index, based on nworkers_per_dim
+  ci = CartesianIndices(nworkers_per_dim);
+  id = Tuple(ci[myid()-1])
+
+  # Compute the size of each partitioned grid
+  size_partition = size./nworkers_per_dim
+  size_partition = map(x->Int(x),size_partition) # TODO: Cant this be done in an easier way?...
+  # Compute domain size for each partition
+  domain_size = limit_upper.-limit_lower
+  domain_partition_size = domain_size./nworkers_per_dim
+
+  # Compute the lower and upper limit for each grid partition, then construct the grid
+  ll_partition = limit_lower .+ domain_partition_size.*(id.-1)
+  lu_partition = limit_lower .+ domain_partition_size.*id
+  grid = sbp.Grid.EquidistantGrid(size_partition, ll_partition, lu_partition)
+  return grid
+end
+
+# Create grid
+gridsize = (10000, 10000);
+limit_lower = (0., 0.)
+limit_upper = (2pi, 3pi/2)
+# TODO: Currently only works with same number of processes in each direction and for
+# an even number of processes
+nworkers_per_dim = (Int(nworkers()/2),Int(nworkers()/2))
+grids_partitioned = [@spawnat p create_partitioned_grid(gridsize, limit_lower , limit_upper, nworkers_per_dim) for p in workers()]
+
+# Create Laplace operator
+@everywhere op = sbp.readOperator("d2_4th.txt","h_4th.txt")
+Laplace_partitioned  = [@spawnat p sbp.Laplace(fetch(grids_partitioned[p-1]), 1.0, op) for p in workers()]
+
+# Create initial value grid function v and solution grid function u
+# TODO: u and v could be a distributed arrays from the start.
+# Then pass local parts of the distributed arrays to the functions
+@everywhere init(x,y) = sin(x) + sin(y)
+v = [@spawnat p sbp.Grid.evalOn(fetch(grids_partitioned[p-1]), init) for p in workers()]
+u = [@spawnat p zero(fetch(v[p-1])) for p in workers()]
+
+# # Apply Laplace
+fetch([@spawnat p sbp.apply_tiled!(fetch(Laplace_partitioned[p-1]),fetch(u[p-1]), fetch(v[p-1])) for p in workers()])
+
+# Construct global vector and store in distributed array
+u = reshape(u,(2,2))
+u_global = DArray(u)
+v = reshape(v,(2,2))
+v_global = DArray(v)
+@show maximum(abs.(u_global + v_global))