Mercurial > repos > public > sbplib_julia
comparison distributedTest.jl @ 146:21b188f38358 parallel_test
Started branch for testing out parallel computing
author | Vidar Stiernström <vidar.stiernstrom@it.uu.se> |
---|---|
date | Mon, 25 Feb 2019 16:36:15 +0100 |
parents | |
children | aa18b7bf4926 |
comparison
equal
deleted
inserted
replaced
141:18b3c63673b3 | 146:21b188f38358 |
---|---|
1 @everywhere using Distributed | |
2 @everywhere using DistributedArrays | |
3 | |
4 # TODO: Currently uses integer division to calculate the local grid size. | |
5 # Should we make sure this is handled in some way if mod(sz./nworkers()) != 0 | |
6 # or keep assertions? | |
7 @everywhere function create_partitioned_grid(size::NTuple{Dim, Int}, limit_lower::NTuple{Dim, T}, limit_upper::NTuple{Dim, T}, nworkers_per_dim::NTuple{Dim, Int}) where Dim where T | |
8 @assert mod.(size, nworkers_per_dim) == (0,0) | |
9 @assert prod(nworkers_per_dim) == nworkers() | |
10 # Translate the current worker id to a cartesian index, based on nworkers_per_dim | |
11 ci = CartesianIndices(nworkers_per_dim); | |
12 id = Tuple(ci[myid()-1]) | |
13 | |
14 # Compute the size of each partitioned grid | |
15 size_partition = size./nworkers_per_dim | |
16 size_partition = map(x->Int(x),size_partition) # TODO: Cant this be done in an easier way?... | |
17 # Compute domain size for each partition | |
18 domain_size = limit_upper.-limit_lower | |
19 domain_partition_size = domain_size./nworkers_per_dim | |
20 | |
21 # Compute the lower and upper limit for each grid partition, then construct the grid | |
22 ll_partition = limit_lower .+ domain_partition_size.*(id.-1) | |
23 lu_partition = limit_lower .+ domain_partition_size.*id | |
24 grid = sbp.Grid.EquidistantGrid(size_partition, ll_partition, lu_partition) | |
25 return grid | |
26 end | |
27 | |
28 # Create grid | |
29 gridsize = (10000, 10000); | |
30 limit_lower = (0., 0.) | |
31 limit_upper = (2pi, 3pi/2) | |
32 # TODO: Currently only works with same number of processes in each direction and for | |
33 # an even number of processes | |
34 nworkers_per_dim = (Int(nworkers()/2),Int(nworkers()/2)) | |
35 grids_partitioned = [@spawnat p create_partitioned_grid(gridsize, limit_lower , limit_upper, nworkers_per_dim) for p in workers()] | |
36 | |
37 # Create Laplace operator | |
38 @everywhere op = sbp.readOperator("d2_4th.txt","h_4th.txt") | |
39 Laplace_partitioned = [@spawnat p sbp.Laplace(fetch(grids_partitioned[p-1]), 1.0, op) for p in workers()] | |
40 | |
41 # Create initial value grid function v and solution grid function u | |
42 # TODO: u and v could be a distributed arrays from the start. | |
43 # Then pass local parts of the distributed arrays to the functions | |
44 @everywhere init(x,y) = sin(x) + sin(y) | |
45 v = [@spawnat p sbp.Grid.evalOn(fetch(grids_partitioned[p-1]), init) for p in workers()] | |
46 u = [@spawnat p zero(fetch(v[p-1])) for p in workers()] | |
47 | |
48 # # Apply Laplace | |
49 fetch([@spawnat p sbp.apply_tiled!(fetch(Laplace_partitioned[p-1]),fetch(u[p-1]), fetch(v[p-1])) for p in workers()]) | |
50 | |
51 # Construct global vector and store in distributed array | |
52 u = reshape(u,(2,2)) | |
53 u_global = DArray(u) | |
54 v = reshape(v,(2,2)) | |
55 v_global = DArray(v) | |
56 @show maximum(abs.(u_global + v_global)) |