Mercurial > repos > public > sbplib_julia
comparison distributedTest.jl @ 148:95a3ba70bccb parallel_test
Added some clarifying comments on the state of distributedTest.jl
| author | Vidar Stiernström <vidar.stiernstrom@it.uu.se> |
|---|---|
| date | Tue, 26 Feb 2019 10:02:20 +0100 |
| parents | aa18b7bf4926 |
| children | 11b6646918d4 |
comparison
equal
deleted
inserted
replaced
| 147:aa18b7bf4926 | 148:95a3ba70bccb |
|---|---|
| 1 @everywhere using Distributed | 1 #NOTE: The followig code "works" in that the resulting error is small. Much more |
| 2 # work needs to be done in order to figure out how julias distributed parallelism works | |
| 3 # especially w.r.t data movement. | |
| 4 | |
| 5 #TODO: everywhere using here or just everywhere include? | |
| 2 @everywhere using DistributedArrays | 6 @everywhere using DistributedArrays |
| 3 | 7 |
| 4 # TODO: Currently uses integer division to calculate the local grid size. | 8 # TODO: Currently uses integer division to calculate the local grid size. |
| 5 # Should we make sure this is handled in some way if mod(sz./nworkers()) != 0 | 9 # Should we make sure this is handled in some way if mod(sz./nworkers()) != 0 |
| 6 # or keep assertions? | 10 # or keep assertions? |
| 24 grid = sbp.Grid.EquidistantGrid(size_partition, ll_partition, lu_partition) | 28 grid = sbp.Grid.EquidistantGrid(size_partition, ll_partition, lu_partition) |
| 25 return grid | 29 return grid |
| 26 end | 30 end |
| 27 | 31 |
| 28 # Create grid | 32 # Create grid |
| 33 #TODO: Should these be declared globally? | |
| 29 gridsize = (10000, 10000); | 34 gridsize = (10000, 10000); |
| 30 limit_lower = (0., 0.) | 35 limit_lower = (0., 0.) |
| 31 limit_upper = (2pi, 3pi/2) | 36 limit_upper = (2pi, 3pi/2) |
| 37 nworkers_per_dim = (Int(nworkers()/2),Int(nworkers()/2)) | |
| 38 | |
| 32 # TODO: Currently only works with same number of processes in each direction and for | 39 # TODO: Currently only works with same number of processes in each direction and for |
| 33 # an even number of processes | 40 # an even number of processes |
| 34 nworkers_per_dim = (Int(nworkers()/2),Int(nworkers()/2)) | |
| 35 grids_partitioned = [@spawnat p create_partitioned_grid(gridsize, limit_lower , limit_upper, nworkers_per_dim) for p in workers()] | 41 grids_partitioned = [@spawnat p create_partitioned_grid(gridsize, limit_lower , limit_upper, nworkers_per_dim) for p in workers()] |
| 36 | 42 |
| 37 # Create Laplace operator | 43 # Create Laplace operator |
| 38 @everywhere op = sbp.readOperator("d2_4th.txt","h_4th.txt") | 44 # TODO: If we dont have fetch here, then the error is large. Does this indicate that we need to move data, or simply that |
| 39 Laplace_partitioned = [@spawnat p sbp.Laplace(fetch(grids_partitioned[p-1]), 1.0, op) for p in workers()] | 45 # the future is not yet computed once this statement is reached? |
| 46 Laplace_partitioned = [@spawnat p sbp.Laplace(fetch(grids_partitioned[p-1]), 1.0, sbp.readOperator("d2_4th.txt","h_4th.txt")) for p in workers()] | |
| 40 | 47 |
| 41 # Create initial value grid function v and solution grid function u | 48 # Create initial value grid function v and solution grid function u |
| 42 @everywhere init(x,y) = sin(x) + sin(y) | 49 #TODO: Should init be declared globally? |
| 43 v = dzeros(gridsize) | 50 init(x,y) = sin(x) + sin(y) |
| 44 u = dzeros(gridsize) | 51 v = dzeros(gridsize) # Distribured arrays |
| 45 @async([@spawnat p v[:L] = sbp.Grid.evalOn(fetch(grids_partitioned[p-1]), init) for p in workers()]) #TODO: Correct use of async? | 52 u = dzeros(gridsize) # Distribured arrays |
| 53 fetch([@spawnat p v[:L] = sbp.Grid.evalOn(fetch(grids_partitioned[p-1]), init) for p in workers()]) #TODO: Don't want to fetch here | |
| 46 | 54 |
| 47 # Apply Laplace | 55 # Apply Laplace |
| 48 @async([@spawnat p sbp.apply_tiled!(fetch(Laplace_partitioned[p-1]),u[:L], v[:L]) for p in workers()]) #TODO: Correct use of async? | 56 fetch([@spawnat p sbp.apply_tiled!(fetch(Laplace_partitioned[p-1]),u[:L], v[:L]) for p in workers()]) #TODO: Don't want to fetch here |
| 49 | 57 |
| 58 #TODO: Here we need to make sure that the data is ready. | |
| 50 @show maximum(abs.(u + v)) | 59 @show maximum(abs.(u + v)) |
