Mercurial > repos > public > sbplib_julia
changeset 244:a827568fc251 boundary_conditions
Fix NormalDerivative and add tests
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Wed, 26 Jun 2019 21:22:36 +0200 |
parents | 01a67d1b8b5d |
children | d9e262cb2e8d ed29ee13e92e |
files | DiffOps/src/laplace.jl DiffOps/test/runtests.jl |
diffstat | 2 files changed, 91 insertions(+), 14 deletions(-) [+] |
line wrap: on
line diff
--- a/DiffOps/src/laplace.jl Wed Jun 26 21:19:00 2019 +0200 +++ b/DiffOps/src/laplace.jl Wed Jun 26 21:22:36 2019 +0200 @@ -10,29 +10,28 @@ end export NormalDerivative +# TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? +# Can we give special treatment to TensorMappings that go to a higher dim? +LazyTensors.range_size(e::NormalDerivative{T}, domain_size::NTuple{1,Integer}) where T = size(e.grid) +LazyTensors.domain_size(e::NormalDerivative{T}, range_size::NTuple{2,Integer}) where T = (range_size[3-dim(e.bId)],) + # Not correct abstraction level # TODO: Not type stable D:< function LazyTensors.apply(d::NormalDerivative, v::AbstractArray, I::NTuple{2,Int}) i = I[dim(d.bId)] j = I[3-dim(d.bId)] - N_i = d.grid.size[dim(d.bId)] - - r = getregion(i, closureSize(d.op), N_i) + N_i = size(d.grid)[dim(d.bId)] - if r != region(d.bId) - return 0 - end - - if r == Lower + if region(d.bId) == Lower # Note, closures are indexed by offset. Fix this D:< return d.grid.inverse_spacing[dim(d.bId)]*d.op.dClosure[i-1]*v[j] - elseif r == Upper - return d.grid.inverse_spacing[dim(d.bId)]*d.op.dClosure[N_i-j]*v[j] + elseif region(d.bId) == Upper + return -d.grid.inverse_spacing[dim(d.bId)]*d.op.dClosure[N_i-i]*v[j] end end function LazyTensors.apply_transpose(d::NormalDerivative, v::AbstractArray, I::NTuple{1,Int}) - u = selectdim(v,3-dim(d.bId),I) + u = selectdim(v,3-dim(d.bId),I[1]) return apply_d(d.op, d.grid.inverse_spacing[dim(d.bId)], u, region(d.bId)) end @@ -52,7 +51,7 @@ # TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? # Can we give special treatment to TensorMappings that go to a higher dim? LazyTensors.range_size(e::BoundaryValue{T}, domain_size::NTuple{1,Integer}) where T = size(e.grid) -LazyTensors.domain_size(e::BoundaryValue{T}, range_size::NTuple{2,Integer}) where T = (range_size[3-dim(e.bId)],); +LazyTensors.domain_size(e::BoundaryValue{T}, range_size::NTuple{2,Integer}) where T = (range_size[3-dim(e.bId)],) function LazyTensors.apply(e::BoundaryValue, v::AbstractArray, I::NTuple{2,Int}) i = I[dim(e.bId)]
--- a/DiffOps/test/runtests.jl Wed Jun 26 21:19:00 2019 +0200 +++ b/DiffOps/test/runtests.jl Wed Jun 26 21:22:36 2019 +0200 @@ -5,8 +5,6 @@ using RegionIndices using LazyTensors -@test_broken false - @testset "BoundaryValue" begin op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") g = EquidistantGrid((4,5), (0.0, 0.0), (1.0,1.0)) @@ -56,9 +54,89 @@ G_n = zeros(Float64, (4,5)) G_n[:,5] = g_x + @test size(e_w*g_y) == (4,5) + @test size(e_e*g_y) == (4,5) + @test size(e_s*g_x) == (4,5) + @test size(e_n*g_x) == (4,5) + @test collect(e_w*g_y) == G_w @test collect(e_e*g_y) == G_e @test collect(e_s*g_x) == G_s @test collect(e_n*g_x) == G_n +end +@testset "NormalDerivative" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + g = EquidistantGrid((5,6), (0.0, 0.0), (4.0,5.0)) + + d_w = NormalDerivative(op, g, CartesianBoundary{1,Lower}()) + d_e = NormalDerivative(op, g, CartesianBoundary{1,Upper}()) + d_s = NormalDerivative(op, g, CartesianBoundary{2,Lower}()) + d_n = NormalDerivative(op, g, CartesianBoundary{2,Upper}()) + + + v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) + v∂x = evalOn(g, (x,y)-> 2*x + y) + v∂y = evalOn(g, (x,y)-> 2*(y-1) + x) + + @test d_w isa TensorMapping{T,2,1} where T + @test d_w' isa TensorMapping{T,1,2} where T + + @test domain_size(d_w, (3,2)) == (2,) + @test domain_size(d_e, (3,2)) == (2,) + @test domain_size(d_s, (3,2)) == (3,) + @test domain_size(d_n, (3,2)) == (3,) + + @test size(d_w'*v) == (6,) + @test size(d_e'*v) == (6,) + @test size(d_s'*v) == (5,) + @test size(d_n'*v) == (5,) + + @test collect(d_w'*v) ≈ v∂x[1,:] + @test collect(d_e'*v) ≈ v∂x[5,:] + @test collect(d_s'*v) ≈ v∂y[:,1] + @test collect(d_n'*v) ≈ v∂y[:,6] + + + d_x_l = zeros(Float64, 5) + d_x_u = zeros(Float64, 5) + for i ∈ eachindex(d_x_l) + d_x_l[i] = op.dClosure[i-1] + d_x_u[i] = -op.dClosure[length(d_x_u)-i] + end + + d_y_l = zeros(Float64, 6) + d_y_u = zeros(Float64, 6) + for i ∈ eachindex(d_y_l) + d_y_l[i] = op.dClosure[i-1] + d_y_u[i] = -op.dClosure[length(d_y_u)-i] + end + + function ❓(x,y) + G = zeros(Float64, length(x), length(y)) + for I ∈ CartesianIndices(G) + G[I] = x[I[1]]*y[I[2]] + end + + return G + end + + g_x = [1,2,3,4.0,5] + g_y = [5,4,3,2,1.0,11] + + G_w = ❓(d_x_l, g_y) + G_e = ❓(d_x_u, g_y) + G_s = ❓(g_x, d_y_l) + G_n = ❓(g_x, d_y_u) + + + @test size(d_w*g_y) == (5,6) + @test size(d_e*g_y) == (5,6) + @test size(d_s*g_x) == (5,6) + @test size(d_n*g_x) == (5,6) + + @test collect(d_w*g_y) ≈ G_w + @test collect(d_e*g_y) ≈ G_e + @test collect(d_s*g_x) ≈ G_s + @test collect(d_n*g_x) ≈ G_n end