Mercurial > repos > public > sbplib_julia
changeset 345:2fcc960836c6
Merge branch refactor/combine_to_one_package.
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Sat, 26 Sep 2020 15:22:13 +0200 |
parents | e12c9a866513 (current diff) f781d6da7d3d (diff) |
children | 76aa1486124a |
files | |
diffstat | 78 files changed, 2295 insertions(+), 2401 deletions(-) [+] |
line wrap: on
line diff
--- a/DiffOps/Manifest.toml Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,67 +0,0 @@ -# This file is machine-generated - editing it directly is not advised - -[[Base64]] -uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" - -[[Distributed]] -deps = ["Random", "Serialization", "Sockets"] -uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" - -[[Grids]] -deps = ["RegionIndices"] -path = "../Grids" -uuid = "960fdf28-97ed-11e9-2a74-bd90bf2fab5a" -version = "0.1.0" - -[[InteractiveUtils]] -deps = ["Markdown"] -uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" - -[[LazyTensors]] -deps = ["RegionIndices"] -path = "../LazyTensors" -uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3" -version = "0.1.0" - -[[Logging]] -uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" - -[[Markdown]] -deps = ["Base64"] -uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" - -[[OffsetArrays]] -git-tree-sha1 = "1af2f79c7eaac3e019a0de41ef63335ff26a0a57" -uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "0.11.1" - -[[Random]] -deps = ["Serialization"] -uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" - -[[RegionIndices]] -path = "../RegionIndices" -uuid = "5d527584-97f1-11e9-084c-4540c7ecf219" -version = "0.1.0" - -[[SbpOperators]] -deps = ["RegionIndices"] -path = "../SbpOperators" -uuid = "204357d8-97fd-11e9-05e7-010897a14cd0" -version = "0.1.0" - -[[Serialization]] -uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" - -[[Sockets]] -uuid = "6462fe0b-24de-5631-8697-dd941f90decc" - -[[Test]] -deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] -uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[[TiledIteration]] -deps = ["OffsetArrays", "Test"] -git-tree-sha1 = "58f6f07d3b54a363ec283a8f5fc9fb4ecebde656" -uuid = "06e1c1a7-607b-532d-9fad-de7d9aa2abac" -version = "0.2.3"
--- a/DiffOps/Project.toml Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,17 +0,0 @@ -name = "DiffOps" -uuid = "39474f48-97ec-11e9-01fc-6ddcbe5918df" -authors = ["Jonatan Werpers <jonatan.werpers@it.uu.se>"] -version = "0.1.0" - -[deps] -Grids = "960fdf28-97ed-11e9-2a74-bd90bf2fab5a" -LazyTensors = "62fbed2c-918d-11e9-279b-eb3a325b37d3" -RegionIndices = "5d527584-97f1-11e9-084c-4540c7ecf219" -SbpOperators = "204357d8-97fd-11e9-05e7-010897a14cd0" -TiledIteration = "06e1c1a7-607b-532d-9fad-de7d9aa2abac" - -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[targets] -test = ["Test"]
--- a/DiffOps/src/DiffOps.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,100 +0,0 @@ -module DiffOps - -using RegionIndices -using SbpOperators -using Grids -using LazyTensors - -""" - DiffOp - -Supertype of differential operator discretisations. -The action of the DiffOp is defined in the method - apply(D::DiffOp, v::AbstractVector, I...) -""" -abstract type DiffOp end - -function apply end - -function matrixRepresentation(D::DiffOp) - error("not implemented") -end - -abstract type DiffOpCartesian{Dim} <: DiffOp end - -# DiffOp must have a grid of dimension Dim!!! -function apply!(D::DiffOpCartesian{Dim}, u::AbstractArray{T,Dim}, v::AbstractArray{T,Dim}) where {T,Dim} - for I ∈ eachindex(D.grid) - u[I] = apply(D, v, I) - end - - return nothing -end -export apply! - -function apply_region!(D::DiffOpCartesian{2}, u::AbstractArray{T,2}, v::AbstractArray{T,2}) where T - apply_region!(D, u, v, Lower, Lower) - apply_region!(D, u, v, Lower, Interior) - apply_region!(D, u, v, Lower, Upper) - apply_region!(D, u, v, Interior, Lower) - apply_region!(D, u, v, Interior, Interior) - apply_region!(D, u, v, Interior, Upper) - apply_region!(D, u, v, Upper, Lower) - apply_region!(D, u, v, Upper, Interior) - apply_region!(D, u, v, Upper, Upper) - return nothing -end - -# Maybe this should be split according to b3fbef345810 after all?! Seems like it makes performance more predictable -function apply_region!(D::DiffOpCartesian{2}, u::AbstractArray{T,2}, v::AbstractArray{T,2}, r1::Type{<:Region}, r2::Type{<:Region}) where T - for I ∈ regionindices(D.grid.size, closuresize(D.op), (r1,r2)) - @inbounds indextuple = (Index{r1}(I[1]), Index{r2}(I[2])) - @inbounds u[I] = apply(D, v, indextuple) - end - return nothing -end -export apply_region! - -function apply_tiled!(D::DiffOpCartesian{2}, u::AbstractArray{T,2}, v::AbstractArray{T,2}) where T - apply_region_tiled!(D, u, v, Lower, Lower) - apply_region_tiled!(D, u, v, Lower, Interior) - apply_region_tiled!(D, u, v, Lower, Upper) - apply_region_tiled!(D, u, v, Interior, Lower) - apply_region_tiled!(D, u, v, Interior, Interior) - apply_region_tiled!(D, u, v, Interior, Upper) - apply_region_tiled!(D, u, v, Upper, Lower) - apply_region_tiled!(D, u, v, Upper, Interior) - apply_region_tiled!(D, u, v, Upper, Upper) - return nothing -end - -using TiledIteration -function apply_region_tiled!(D::DiffOpCartesian{2}, u::AbstractArray{T,2}, v::AbstractArray{T,2}, r1::Type{<:Region}, r2::Type{<:Region}) where T - ri = regionindices(D.grid.size, closuresize(D.op), (r1,r2)) - # TODO: Pass Tilesize to function - for tileaxs ∈ TileIterator(axes(ri), padded_tilesize(T, (5,5), 2)) - for j ∈ tileaxs[2], i ∈ tileaxs[1] - I = ri[i,j] - u[I] = apply(D, v, (Index{r1}(I[1]), Index{r2}(I[2]))) - end - end - return nothing -end -export apply_region_tiled! - -function apply(D::DiffOp, v::AbstractVector)::AbstractVector - u = zeros(eltype(v), size(v)) - apply!(D,v,u) - return u -end - -export apply - -""" -A BoundaryCondition should implement the method - sat(::DiffOp, v::AbstractArray, data::AbstractArray, ...) -""" -abstract type BoundaryCondition end - - -end # module
--- a/DiffOps/test/runtests.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,269 +0,0 @@ -using Test -using DiffOps -using Grids -using SbpOperators -using RegionIndices -using LazyTensors - -@testset "Laplace2D" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - Lx = 3.5 - Ly = 7.2 - g = EquidistantGrid((42,41), (0.0, 0.0), (Lx,Ly)) - L = Laplace(g, 1., op) - H = quadrature(L) - - f0(x::Float64,y::Float64) = 2. - f1(x::Float64,y::Float64) = x+y - f2(x::Float64,y::Float64) = 1/2*x^2 + 1/2*y^2 - f3(x::Float64,y::Float64) = 1/6*x^3 + 1/6*y^3 - f4(x::Float64,y::Float64) = 1/24*x^4 + 1/24*y^4 - f5(x::Float64,y::Float64) = sin(x) + cos(y) - f5ₓₓ(x::Float64,y::Float64) = -f5(x,y) - - v0 = evalOn(g,f0) - v1 = evalOn(g,f1) - v2 = evalOn(g,f2) - v3 = evalOn(g,f3) - v4 = evalOn(g,f4) - v5 = evalOn(g,f5) - v5ₓₓ = evalOn(g,f5ₓₓ) - - @test L isa TensorOperator{T,2} where T - @test L' isa TensorMapping{T,2,2} where T - - # TODO: Should perhaps set tolerance level for isapporx instead? - # Are these tolerance levels resonable or should tests be constructed - # differently? - equalitytol = 0.5*1e-10 - accuracytol = 0.5*1e-3 - # 4th order interior stencil, 2nd order boundary stencil, - # implies that L*v should be exact for v - monomial up to order 3. - # Exact differentiation is measured point-wise. For other grid functions - # the error is measured in the H-norm. - @test all(abs.(collect(L*v0)) .<= equalitytol) - @test all(abs.(collect(L*v1)) .<= equalitytol) - @test all(collect(L*v2) .≈ v0) # Seems to be more accurate - @test all(abs.((collect(L*v3) - v1)) .<= equalitytol) - e4 = collect(L*v4) - v2 - e5 = collect(L*v5) - v5ₓₓ - @test sum(collect(H*e4.^2)) <= accuracytol - @test sum(collect(H*e5.^2)) <= accuracytol -end - -@testset "Quadrature" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - Lx = 2.3 - Ly = 5.2 - g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) - H = Quadrature(op,g) - v = ones(Float64, size(g)) - - @test H isa TensorOperator{T,2} where T - @test H' isa TensorMapping{T,2,2} where T - @test sum(collect(H*v)) ≈ (Lx*Ly) - @test collect(H*v) == collect(H'*v) -end - -@testset "InverseQuadrature" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - Lx = 7.3 - Ly = 8.2 - g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) - H = Quadrature(op,g) - Hinv = InverseQuadrature(op,g) - v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) - - @test Hinv isa TensorOperator{T,2} where T - @test Hinv' isa TensorMapping{T,2,2} where T - @test collect(Hinv*H*v) ≈ v - @test collect(Hinv*v) == collect(Hinv'*v) -end - -@testset "BoundaryValue" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - g = EquidistantGrid((4,5), (0.0, 0.0), (1.0,1.0)) - - e_w = BoundaryValue(op, g, CartesianBoundary{1,Lower}()) - e_e = BoundaryValue(op, g, CartesianBoundary{1,Upper}()) - e_s = BoundaryValue(op, g, CartesianBoundary{2,Lower}()) - e_n = BoundaryValue(op, g, CartesianBoundary{2,Upper}()) - - v = zeros(Float64, 4, 5) - v[:,5] = [1, 2, 3,4] - v[:,4] = [1, 2, 3,4] - v[:,3] = [4, 5, 6, 7] - v[:,2] = [7, 8, 9, 10] - v[:,1] = [10, 11, 12, 13] - - @test e_w isa TensorMapping{T,2,1} where T - @test e_w' isa TensorMapping{T,1,2} where T - - @test domain_size(e_w, (3,2)) == (2,) - @test domain_size(e_e, (3,2)) == (2,) - @test domain_size(e_s, (3,2)) == (3,) - @test domain_size(e_n, (3,2)) == (3,) - - @test size(e_w'*v) == (5,) - @test size(e_e'*v) == (5,) - @test size(e_s'*v) == (4,) - @test size(e_n'*v) == (4,) - - @test collect(e_w'*v) == [10,7,4,1.0,1] - @test collect(e_e'*v) == [13,10,7,4,4.0] - @test collect(e_s'*v) == [10,11,12,13.0] - @test collect(e_n'*v) == [1,2,3,4.0] - - g_x = [1,2,3,4.0] - g_y = [5,4,3,2,1.0] - - G_w = zeros(Float64, (4,5)) - G_w[1,:] = g_y - - G_e = zeros(Float64, (4,5)) - G_e[4,:] = g_y - - G_s = zeros(Float64, (4,5)) - G_s[:,1] = g_x - - G_n = zeros(Float64, (4,5)) - G_n[:,5] = g_x - - @test size(e_w*g_y) == (UnknownDim,5) - @test size(e_e*g_y) == (UnknownDim,5) - @test size(e_s*g_x) == (4,UnknownDim) - @test size(e_n*g_x) == (4,UnknownDim) - - # These tests should be moved to where they are possible (i.e we know what the grid should be) - @test_broken collect(e_w*g_y) == G_w - @test_broken collect(e_e*g_y) == G_e - @test_broken collect(e_s*g_x) == G_s - @test_broken collect(e_n*g_x) == G_n -end - -@testset "NormalDerivative" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - g = EquidistantGrid((5,6), (0.0, 0.0), (4.0,5.0)) - - d_w = NormalDerivative(op, g, CartesianBoundary{1,Lower}()) - d_e = NormalDerivative(op, g, CartesianBoundary{1,Upper}()) - d_s = NormalDerivative(op, g, CartesianBoundary{2,Lower}()) - d_n = NormalDerivative(op, g, CartesianBoundary{2,Upper}()) - - - v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) - v∂x = evalOn(g, (x,y)-> 2*x + y) - v∂y = evalOn(g, (x,y)-> 2*(y-1) + x) - - @test d_w isa TensorMapping{T,2,1} where T - @test d_w' isa TensorMapping{T,1,2} where T - - @test domain_size(d_w, (3,2)) == (2,) - @test domain_size(d_e, (3,2)) == (2,) - @test domain_size(d_s, (3,2)) == (3,) - @test domain_size(d_n, (3,2)) == (3,) - - @test size(d_w'*v) == (6,) - @test size(d_e'*v) == (6,) - @test size(d_s'*v) == (5,) - @test size(d_n'*v) == (5,) - - @test collect(d_w'*v) ≈ v∂x[1,:] - @test collect(d_e'*v) ≈ v∂x[5,:] - @test collect(d_s'*v) ≈ v∂y[:,1] - @test collect(d_n'*v) ≈ v∂y[:,6] - - - d_x_l = zeros(Float64, 5) - d_x_u = zeros(Float64, 5) - for i ∈ eachindex(d_x_l) - d_x_l[i] = op.dClosure[i-1] - d_x_u[i] = -op.dClosure[length(d_x_u)-i] - end - - d_y_l = zeros(Float64, 6) - d_y_u = zeros(Float64, 6) - for i ∈ eachindex(d_y_l) - d_y_l[i] = op.dClosure[i-1] - d_y_u[i] = -op.dClosure[length(d_y_u)-i] - end - - function prod_matrix(x,y) - G = zeros(Float64, length(x), length(y)) - for I ∈ CartesianIndices(G) - G[I] = x[I[1]]*y[I[2]] - end - - return G - end - - g_x = [1,2,3,4.0,5] - g_y = [5,4,3,2,1.0,11] - - G_w = prod_matrix(d_x_l, g_y) - G_e = prod_matrix(d_x_u, g_y) - G_s = prod_matrix(g_x, d_y_l) - G_n = prod_matrix(g_x, d_y_u) - - - @test size(d_w*g_y) == (UnknownDim,6) - @test size(d_e*g_y) == (UnknownDim,6) - @test size(d_s*g_x) == (5,UnknownDim) - @test size(d_n*g_x) == (5,UnknownDim) - - # These tests should be moved to where they are possible (i.e we know what the grid should be) - @test_broken collect(d_w*g_y) ≈ G_w - @test_broken collect(d_e*g_y) ≈ G_e - @test_broken collect(d_s*g_x) ≈ G_s - @test_broken collect(d_n*g_x) ≈ G_n -end - -@testset "BoundaryQuadrature" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - g = EquidistantGrid((10,11), (0.0, 0.0), (1.0,1.0)) - - H_w = BoundaryQuadrature(op, g, CartesianBoundary{1,Lower}()) - H_e = BoundaryQuadrature(op, g, CartesianBoundary{1,Upper}()) - H_s = BoundaryQuadrature(op, g, CartesianBoundary{2,Lower}()) - H_n = BoundaryQuadrature(op, g, CartesianBoundary{2,Upper}()) - - v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) - - function get_quadrature(N) - qc = op.quadratureClosure - q = (qc..., ones(N-2*closuresize(op))..., reverse(qc)...) - @assert length(q) == N - return q - end - - v_w = v[1,:] - v_e = v[10,:] - v_s = v[:,1] - v_n = v[:,11] - - q_x = spacing(g)[1].*get_quadrature(10) - q_y = spacing(g)[2].*get_quadrature(11) - - @test H_w isa TensorOperator{T,1} where T - - @test domain_size(H_w, (3,)) == (3,) - @test domain_size(H_n, (3,)) == (3,) - - @test range_size(H_w, (3,)) == (3,) - @test range_size(H_n, (3,)) == (3,) - - @test size(H_w*v_w) == (11,) - @test size(H_e*v_e) == (11,) - @test size(H_s*v_s) == (10,) - @test size(H_n*v_n) == (10,) - - @test collect(H_w*v_w) ≈ q_y.*v_w - @test collect(H_e*v_e) ≈ q_y.*v_e - @test collect(H_s*v_s) ≈ q_x.*v_s - @test collect(H_n*v_n) ≈ q_x.*v_n - - @test collect(H_w'*v_w) == collect(H_w'*v_w) - @test collect(H_e'*v_e) == collect(H_e'*v_e) - @test collect(H_s'*v_s) == collect(H_s'*v_s) - @test collect(H_n'*v_n) == collect(H_n'*v_n) -end
--- a/Grids/Manifest.toml Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,6 +0,0 @@ -# This file is machine-generated - editing it directly is not advised - -[[RegionIndices]] -path = "../RegionIndices" -uuid = "5d527584-97f1-11e9-084c-4540c7ecf219" -version = "0.1.0"
--- a/Grids/Project.toml Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,13 +0,0 @@ -name = "Grids" -uuid = "960fdf28-97ed-11e9-2a74-bd90bf2fab5a" -authors = ["Jonatan Werpers <jonatan.werpers@it.uu.se>"] -version = "0.1.0" - -[deps] -RegionIndices = "5d527584-97f1-11e9-084c-4540c7ecf219" - -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[targets] -test = ["Test"]
--- a/Grids/src/AbstractGrid.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,24 +0,0 @@ -""" - AbstractGrid - -Should implement - dimension(grid::AbstractGrid) - points(grid::AbstractGrid) - -""" -abstract type AbstractGrid end - -function dimension end -function points end -export dimension, points - -""" - evalOn(g::AbstractGrid, f::Function) - -Evaluate function f on the grid g -""" -function evalOn(g::AbstractGrid, f::Function) - F(x) = f(x...) - return F.(points(g)) -end -export evalOn
--- a/Grids/src/EquidistantGrid.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,73 +0,0 @@ -# EquidistantGrid is a grid with equidistant grid spacing per coordinat -# direction. The domain is defined through the two points P1 = x̄₁, P2 = x̄₂ -# by the exterior product of the vectors obtained by projecting (x̄₂-x̄₁) onto -# the coordinate directions. E.g for a 2D grid with x̄₁=(-1,0) and x̄₂=(1,2) -# the domain is defined as (-1,1)x(0,2). - -export EquidistantGrid - -struct EquidistantGrid{Dim,T<:Real} <: AbstractGrid - size::NTuple{Dim, Int} # First coordinate direction stored first - limit_lower::NTuple{Dim, T} - limit_upper::NTuple{Dim, T} - inverse_spacing::NTuple{Dim, T} # Reciprocal of grid spacing - - # General constructor - function EquidistantGrid(size::NTuple{Dim, Int}, limit_lower::NTuple{Dim, T}, limit_upper::NTuple{Dim, T}) where Dim where T - @assert all(size.>0) - @assert all(limit_upper.-limit_lower .!= 0) - inverse_spacing = (size.-1)./ abs.(limit_upper.-limit_lower) - return new{Dim,T}(size, limit_lower, limit_upper, inverse_spacing) - end -end - -function EquidistantGrid(size::Int, limit_lower::T, limit_upper::T) where T - return EquidistantGrid((size,),(limit_lower,),(limit_upper,)) -end - -function Base.eachindex(grid::EquidistantGrid) - CartesianIndices(grid.size) -end - -Base.size(g::EquidistantGrid) = g.size - -# Returns the number of dimensions of an EquidistantGrid. -# -# @Input: grid - an EquidistantGrid -# @Return: dimension - The dimension of the grid -function dimension(grid::EquidistantGrid) - return length(grid.size) -end - -# Returns the reciprocal of the spacing of the grid -# -function inverse_spacing(grid::EquidistantGrid) - return grid.inverse_spacing -end -export inverse_spacing - -# Returns the reciprocal of the spacing of the grid -# -# TODO: Evaluate if divisions affect performance -function spacing(grid::EquidistantGrid) - return 1.0./grid.inverse_spacing -end -export spacing - -# Computes the points of an EquidistantGrid as an array of tuples with -# the same dimension as the grid. -# -# @Input: grid - an EquidistantGrid -# @Return: points - the points of the grid. -function points(grid::EquidistantGrid) - # TODO: Make this return an abstract array? - indices = Tuple.(CartesianIndices(grid.size)) - h = spacing(grid) - return broadcast(I -> grid.limit_lower .+ (I.-1).*h, indices) -end - -function pointsalongdim(grid::EquidistantGrid, dim::Integer) - @assert dim<=dimension(grid) - @assert dim>0 - points = collect(range(grid.limit_lower[dim],stop=grid.limit_upper[dim],length=grid.size[dim])) -end
--- a/Grids/src/Grids.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,17 +0,0 @@ -module Grids - -using RegionIndices - -export BoundaryIdentifier, CartesianBoundary - -abstract type BoundaryIdentifier end -struct CartesianBoundary{Dim, R<:Region} <: BoundaryIdentifier end -dim(::CartesianBoundary{Dim, R}) where {Dim, R} = Dim -region(::CartesianBoundary{Dim, R}) where {Dim, R} = R - -export dim, region - -include("AbstractGrid.jl") -include("EquidistantGrid.jl") - -end # module
--- a/Grids/test/runtests.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,8 +0,0 @@ -using Grids -using Test - -@testset "EquidistantGrid" begin - @test EquidistantGrid(4,0,1) isa EquidistantGrid - @test dimension(EquidistantGrid(4,0,1)) == 1 - @test EquidistantGrid(4,0,1) == EquidistantGrid((4,),(0,),(1,)) -end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/LICENSE Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2020 Jonatan Werpers <jonatan@werpers.com> and contributors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE.
--- a/LazyTensors/Manifest.toml Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,6 +0,0 @@ -# This file is machine-generated - editing it directly is not advised - -[[RegionIndices]] -path = "../RegionIndices" -uuid = "5d527584-97f1-11e9-084c-4540c7ecf219" -version = "0.1.0"
--- a/LazyTensors/Project.toml Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,13 +0,0 @@ -name = "LazyTensors" -uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3" -authors = ["Jonatan Werpers <jonatan.werpers@it.uu.se>"] -version = "0.1.0" - -[deps] -RegionIndices = "5d527584-97f1-11e9-084c-4540c7ecf219" - -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[targets] -test = ["Test"]
--- a/LazyTensors/src/LazyTensors.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,7 +0,0 @@ -module LazyTensors -using RegionIndices -include("tensor_mapping.jl") -include("lazy_array.jl") -include("lazy_tensor_operations.jl") - -end # module
--- a/LazyTensors/src/lazy_array.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,93 +0,0 @@ -""" - LazyArray{T,D} <: AbstractArray{T,D} - -Array which is calcualted lazily when indexing. - -A subtype of `LazyArray` will use lazy version of `+`, `-`, `*`, `/`. -""" -abstract type LazyArray{T,D} <: AbstractArray{T,D} end -export LazyArray - -struct LazyConstantArray{T,D} <: LazyArray{T,D} - val::T - size::NTuple{D,Int} -end - -Base.size(lca::LazyConstantArray) = lca.size -Base.getindex(lca::LazyConstantArray{T,D}, I::Vararg{Int,D}) where {T,D} = lca.val - -""" - LazyElementwiseOperation{T,D,Op} <: LazyArray{T,D} -Struct allowing for lazy evaluation of elementwise operations on AbstractArrays. - -A LazyElementwiseOperation contains two arrays together with an operation. -The operations are carried out when the LazyElementwiseOperation is indexed. -""" -struct LazyElementwiseOperation{T,D,Op} <: LazyArray{T,D} - a::AbstractArray{T,D} - b::AbstractArray{T,D} - - function LazyElementwiseOperation{T,D,Op}(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D,Op} - @boundscheck if size(a) != size(b) - throw(DimensionMismatch("dimensions must match")) - end - return new{T,D,Op}(a,b) - end - - LazyElementwiseOperation{T,D,Op}(a::AbstractArray{T,D},b::T) where {T,D,Op} = new{T,D,Op}(a, LazyConstantArray(b, size(a))) - LazyElementwiseOperation{T,D,Op}(a::T,b::AbstractArray{T,D}) where {T,D,Op} = new{T,D,Op}(LazyConstantArray(a, size(b)), b) -end -# TODO: Move Op to be the first parameter? Compare to Binary operations - -Base.size(v::LazyElementwiseOperation) = size(v.a) - -evaluate(leo::LazyElementwiseOperation{T,D,:+}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] + leo.b[I...] -evaluate(leo::LazyElementwiseOperation{T,D,:-}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] - leo.b[I...] -evaluate(leo::LazyElementwiseOperation{T,D,:*}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] * leo.b[I...] -evaluate(leo::LazyElementwiseOperation{T,D,:/}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] / leo.b[I...] - -# TODO: Make sure boundschecking is done properly and that the lenght of the vectors are equal -# NOTE: Boundschecking in getindex functions now assumes that the size of the -# vectors in the LazyElementwiseOperation are the same size. If we remove the -# size assertion in the constructor we might have to handle -# boundschecking differently. -Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D}, I::Vararg{Int,D}) where {T,D} - @boundscheck if !checkbounds(Bool, leo.a, I...) - throw(BoundsError([leo], I...)) - end - return evaluate(leo, I...) -end - -# Define lazy operations for AbstractArrays. Operations constructs a LazyElementwiseOperation which -# can later be indexed into. Lazy operations are denoted by the usual operator followed by a tilde -Base.@propagate_inbounds +̃(a::AbstractArray{T,D}, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:+}(a,b) -Base.@propagate_inbounds -̃(a::AbstractArray{T,D}, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:-}(a,b) -Base.@propagate_inbounds *̃(a::AbstractArray{T,D}, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:*}(a,b) -Base.@propagate_inbounds /̃(a::AbstractArray{T,D}, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:/}(a,b) - -Base.@propagate_inbounds +̃(a::AbstractArray{T,D}, b::T) where {T,D} = LazyElementwiseOperation{T,D,:+}(a,b) -Base.@propagate_inbounds -̃(a::AbstractArray{T,D}, b::T) where {T,D} = LazyElementwiseOperation{T,D,:-}(a,b) -Base.@propagate_inbounds *̃(a::AbstractArray{T,D}, b::T) where {T,D} = LazyElementwiseOperation{T,D,:*}(a,b) -Base.@propagate_inbounds /̃(a::AbstractArray{T,D}, b::T) where {T,D} = LazyElementwiseOperation{T,D,:/}(a,b) - -Base.@propagate_inbounds +̃(a::T, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:+}(a,b) -Base.@propagate_inbounds -̃(a::T, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:-}(a,b) -Base.@propagate_inbounds *̃(a::T, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:*}(a,b) -Base.@propagate_inbounds /̃(a::T, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:/}(a,b) - - - -# NOTE: Är det knas att vi har till exempel * istället för .* ?? -# Oklart om det ens går att lösa.. -Base.@propagate_inbounds Base.:+(a::LazyArray{T,D}, b::LazyArray{T,D}) where {T,D} = a +̃ b -Base.@propagate_inbounds Base.:+(a::LazyArray{T,D}, b::AbstractArray{T,D}) where {T,D} = a +̃ b -Base.@propagate_inbounds Base.:+(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = a +̃ b - -Base.@propagate_inbounds Base.:-(a::LazyArray{T,D}, b::LazyArray{T,D}) where {T,D} = a -̃ b -Base.@propagate_inbounds Base.:-(a::LazyArray{T,D}, b::AbstractArray{T,D}) where {T,D} = a -̃ b -Base.@propagate_inbounds Base.:-(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = a -̃ b - -# Element wise operation for `*` and `\` are not overloaded due to conflicts with the behavior -# of regular `*` and `/` for AbstractArrays. Use tilde versions instead. - -export +̃, -̃, *̃, /̃
--- a/LazyTensors/src/lazy_tensor_operations.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,101 +0,0 @@ -""" - LazyTensorMappingApplication{T,R,D} <: LazyArray{T,R} - -Struct for lazy application of a TensorMapping. Created using `*`. - -Allows the result of a `TensorMapping` applied to a vector to be treated as an `AbstractArray`. -With a mapping `m` and a vector `v` the LazyTensorMappingApplication object can be created by `m*v`. -The actual result will be calcualted when indexing into `m*v`. -""" -struct LazyTensorMappingApplication{T,R,D} <: LazyArray{T,R} - t::TensorMapping{T,R,D} - o::AbstractArray{T,D} -end -export LazyTensorMappingApplication - -Base.:*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) -Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Index,R}) where {T,R,D} = apply(ta.t, ta.o, I...) -Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, Index{Unknown}.(I)...) -Base.size(ta::LazyTensorMappingApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o)) -# TODO: What else is needed to implement the AbstractArray interface? - -# # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' -Base.:*(a::TensorMapping{T,R,D}, b::TensorMapping{T,D,K}, args::Union{TensorMapping{T}, AbstractArray{T}}...) where {T,R,D,K} = foldr(*,(a,b,args...)) -# # Should we overload some other infix binary opesrator? -# →(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) -# TODO: We need to be really careful about good error messages. -# For example what happens if you try to multiply LazyTensorMappingApplication with a TensorMapping(wrong order)? - -""" - LazyTensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R} - -Struct for lazy transpose of a TensorMapping. - -If a mapping implements the the `apply_transpose` method this allows working with -the transpose of mapping `m` by using `m'`. `m'` will work as a regular TensorMapping lazily calling -the appropriate methods of `m`. -""" -struct LazyTensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R} - tm::TensorMapping{T,R,D} -end -export LazyTensorMappingTranspose - -# # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors? -Base.adjoint(tm::TensorMapping) = LazyTensorMappingTranspose(tm) -Base.adjoint(tmt::LazyTensorMappingTranspose) = tmt.tm - -apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) -apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmt.tm, v, I...) - -range_size(tmt::LazyTensorMappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, d_size) -domain_size(tmt::LazyTensorMappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, r_size) - - - - -struct LazyTensorMappingBinaryOperation{Op,T,R,D,T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} <: TensorMapping{T,D,R} - tm1::T1 - tm2::T2 - - @inline function LazyTensorMappingBinaryOperation{Op,T,R,D}(tm1::T1,tm2::T2) where {Op,T,R,D, T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} - return new{Op,T,R,D,T1,T2}(tm1,tm2) - end -end - -apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) -apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) - -range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, domain_size::NTuple{D,Integer}) where {Op,T,R,D} = range_size(tmBinOp.tm1, domain_size) -domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, range_size::NTuple{R,Integer}) where {Op,T,R,D} = domain_size(tmBinOp.tm2, range_size) - -Base.:+(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(tm1,tm2) -Base.:-(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:-,T,R,D}(tm1,tm2) - - -# TODO: Write tests and documentation for LazyTensorMappingComposition -# struct LazyTensorMappingComposition{T,R,K,D} <: TensorMapping{T,R,D} -# t1::TensorMapping{T,R,K} -# t2::TensorMapping{T,K,D} -# end - -# Base.:∘(s::TensorMapping{T,R,K}, t::TensorMapping{T,K,D}) where {T,R,K,D} = LazyTensorMappingComposition(s,t) - -# function range_size(tm::LazyTensorMappingComposition{T,R,K,D}, domain_size::NTuple{D,Integer}) where {T,R,K,D} -# range_size(tm.t1, domain_size(tm.t2, domain_size)) -# end - -# function domain_size(tm::LazyTensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,K,D} -# domain_size(tm.t1, domain_size(tm.t2, range_size)) -# end - -# function apply(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::NTuple{R,Int}) where {T,R,K,D} -# apply(c.t1, LazyTensorMappingApplication(c.t2,v), I...) -# end - -# function apply_transpose(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::NTuple{D,Int}) where {T,R,K,D} -# apply_transpose(c.t2, LazyTensorMappingApplication(c.t1',v), I...) -# end - -# # Have i gone too crazy with the type parameters? Maybe they aren't all needed? - -# export →
--- a/LazyTensors/src/tensor_mapping.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,83 +0,0 @@ -""" - TensorMapping{T,R,D} - -Describes a mapping of a D dimension tensor to an R dimension tensor. -The action of the mapping is implemented through the method - - apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} - -The size of range tensor should be dependent on the size of the domain tensor -and the type should implement the methods - - range_size(::TensorMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} - domain_size(::TensorMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} - -to allow querying for one or the other. - -Optionally the action of the transpose may be defined through - apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} -""" -abstract type TensorMapping{T,R,D} end -export TensorMapping - -""" - apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} - -Return the result of the mapping for a given index. -""" -function apply end -export apply - -""" - apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {R,D,T} - -Return the result of the transposed mapping for a given index. -""" -function apply_transpose end -export apply_transpose - -""" -Return the dimension of the range space of a given mapping -""" -range_dim(::TensorMapping{T,R,D}) where {T,R,D} = R - -""" -Return the dimension of the domain space of a given mapping -""" -domain_dim(::TensorMapping{T,R,D}) where {T,R,D} = D - -export range_dim, domain_dim - -""" - range_size(M::TensorMapping, domain_size) - -Return the resulting range size for the mapping applied to a given domain_size -""" -function range_size end - -""" - domain_size(M::TensorMapping, range_size) - -Return the resulting domain size for the mapping applied to a given range_size -""" -function domain_size end - -""" - Dummy type for representing dimensions of tensormappings when domain_size is unknown -""" -struct UnknownDim end -export range_size, domain_size, TensorMappingDim, UnknownDim - -# TODO: Think about boundschecking! - - -""" - TensorOperator{T,D} - -A `TensorMapping{T,D,D}` where the range and domain tensor have the same number of -dimensions and the same size. -""" -abstract type TensorOperator{T,D} <: TensorMapping{T,D,D} end -export TensorOperator -domain_size(::TensorOperator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size -range_size(::TensorOperator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size
--- a/LazyTensors/test/runtests.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,191 +0,0 @@ -using Test -using LazyTensors -using RegionIndices - -@testset "Generic Mapping methods" begin - struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end - LazyTensors.apply(m::DummyMapping{T,R,D}, v, i::NTuple{R,Index{<:Region}}) where {T,R,D} = :apply - @test range_dim(DummyMapping{Int,2,3}()) == 2 - @test domain_dim(DummyMapping{Int,2,3}()) == 3 - @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),(Index{Unknown}(0),Index{Unknown}(0))) == :apply -end - -@testset "Generic Operator methods" begin - struct DummyOperator{T,D} <: TensorOperator{T,D} end - @test range_size(DummyOperator{Int,2}(), (3,5)) == (3,5) - @test domain_size(DummyOperator{Float64, 3}(), (3,3,1)) == (3,3,1) -end - -@testset "Mapping transpose" begin - struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end - - LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},R}) where {T,R,D} = :apply - LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},D}) where {T,R,D} = :apply_transpose - - LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = :range_size - LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} = :domain_size - - m = DummyMapping{Float64,2,3}() - I = Index{Unknown}(0) - @test m' isa TensorMapping{Float64, 3,2} - @test m'' == m - @test apply(m',zeros(Float64,(0,0)), I, I, I) == :apply_transpose - @test apply(m'',zeros(Float64,(0,0,0)), I, I) == :apply - @test apply_transpose(m', zeros(Float64,(0,0,0)), I, I) == :apply - - @test range_size(m', (0,0)) == :domain_size - @test domain_size(m', (0,0,0)) == :range_size -end - -@testset "TensorApplication" begin - struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end - - LazyTensors.apply(m::DummyMapping{T,R,D}, v, i::Vararg{Index{<:Region},R}) where {T,R,D} = (:apply,v,i) - LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = 2 .* domain_size - LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} = range_size.÷2 - - - m = DummyMapping{Int, 1, 1}() - v = [0,1,2] - @test m*v isa AbstractVector{Int} - @test size(m*v) == 2 .*size(v) - @test (m*v)[Index{Upper}(0)] == (:apply,v,(Index{Upper}(0),)) - @test (m*v)[0] == (:apply,v,(Index{Unknown}(0),)) - @test m*m*v isa AbstractVector{Int} - @test (m*m*v)[Index{Upper}(1)] == (:apply,m*v,(Index{Upper}(1),)) - @test (m*m*v)[1] == (:apply,m*v,(Index{Unknown}(1),)) - @test (m*m*v)[Index{Interior}(3)] == (:apply,m*v,(Index{Interior}(3),)) - @test (m*m*v)[3] == (:apply,m*v,(Index{Unknown}(3),)) - @test (m*m*v)[Index{Lower}(6)] == (:apply,m*v,(Index{Lower}(6),)) - @test (m*m*v)[6] == (:apply,m*v,(Index{Unknown}(6),)) - @test_broken BoundsError == (m*m*v)[0] - @test_broken BoundsError == (m*m*v)[7] - - m = DummyMapping{Int, 2, 1}() - @test_throws MethodError m*ones(Int,2,2) - @test_throws MethodError m*m*v - - m = DummyMapping{Float64, 2, 2}() - v = ones(3,3) - I = (Index{Lower}(1),Index{Interior}(2)); - @test size(m*v) == 2 .*size(v) - @test (m*v)[I] == (:apply,v,I) - - struct ScalingOperator{T,D} <: TensorOperator{T,D} - λ::T - end - - LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I] - - m = ScalingOperator{Int,1}(2) - v = [1,2,3] - @test m*v isa AbstractVector - @test m*v == [2,4,6] - - m = ScalingOperator{Int,2}(2) - v = [[1 2];[3 4]] - @test m*v == [[2 4];[6 8]] - I = (Index{Upper}(2),Index{Lower}(1)) - @test (m*v)[I] == 6 -end - -@testset "TensorMapping binary operations" begin - struct ScalarMapping{T,R,D} <: TensorMapping{T,R,D} - λ::T - end - - LazyTensors.apply(m::ScalarMapping{T,R,D}, v, I::Vararg{Index{<:Region}}) where {T,R,D} = m.λ*v[I...] - LazyTensors.range_size(m::ScalarMapping, domain_size) = domain_size - LazyTensors.domain_size(m::ScalarMapping, range_sizes) = range_sizes - - A = ScalarMapping{Float64,1,1}(2.0) - B = ScalarMapping{Float64,1,1}(3.0) - - v = [1.1,1.2,1.3] - for i ∈ eachindex(v) - @test ((A+B)*v)[i] == 2*v[i] + 3*v[i] - end - - for i ∈ eachindex(v) - @test ((A-B)*v)[i] == 2*v[i] - 3*v[i] - end - - @test range_size(A+B, (3,)) == range_size(A, (3,)) == range_size(B,(3,)) - @test domain_size(A+B, (3,)) == domain_size(A, (3,)) == domain_size(B,(3,)) -end - -@testset "LazyArray" begin - @testset "LazyConstantArray" begin - @test LazyTensors.LazyConstantArray(3,(3,2)) isa LazyArray{Int,2} - - lca = LazyTensors.LazyConstantArray(3.0,(3,2)) - @test eltype(lca) == Float64 - @test ndims(lca) == 2 - @test size(lca) == (3,2) - @test lca[2] == 3.0 - end - struct DummyArray{T,D, T1<:AbstractArray{T,D}} <: LazyArray{T,D} - data::T1 - end - Base.size(v::DummyArray) = size(v.data) - Base.getindex(v::DummyArray{T,D}, I::Vararg{Int,D}) where {T,D} = v.data[I...] - - # Test lazy operations - v1 = [1, 2.3, 4] - v2 = [1., 2, 3] - s = 3.4 - r_add_v = v1 .+ v2 - r_sub_v = v1 .- v2 - r_times_v = v1 .* v2 - r_div_v = v1 ./ v2 - r_add_s = v1 .+ s - r_sub_s = v1 .- s - r_times_s = v1 .* s - r_div_s = v1 ./ s - @test isa(v1 +̃ v2, LazyArray) - @test isa(v1 -̃ v2, LazyArray) - @test isa(v1 *̃ v2, LazyArray) - @test isa(v1 /̃ v2, LazyArray) - @test isa(v1 +̃ s, LazyArray) - @test isa(v1 -̃ s, LazyArray) - @test isa(v1 *̃ s, LazyArray) - @test isa(v1 /̃ s, LazyArray) - @test isa(s +̃ v1, LazyArray) - @test isa(s -̃ v1, LazyArray) - @test isa(s *̃ v1, LazyArray) - @test isa(s /̃ v1, LazyArray) - for i ∈ eachindex(v1) - @test (v1 +̃ v2)[i] == r_add_v[i] - @test (v1 -̃ v2)[i] == r_sub_v[i] - @test (v1 *̃ v2)[i] == r_times_v[i] - @test (v1 /̃ v2)[i] == r_div_v[i] - @test (v1 +̃ s)[i] == r_add_s[i] - @test (v1 -̃ s)[i] == r_sub_s[i] - @test (v1 *̃ s)[i] == r_times_s[i] - @test (v1 /̃ s)[i] == r_div_s[i] - @test (s +̃ v1)[i] == r_add_s[i] - @test (s -̃ v1)[i] == -r_sub_s[i] - @test (s *̃ v1)[i] == r_times_s[i] - @test (s /̃ v1)[i] == 1/r_div_s[i] - end - @test_throws BoundsError (v1 +̃ v2)[4] - v2 = [1., 2, 3, 4] - # Test that size of arrays is asserted when not specified inbounds - @test_throws DimensionMismatch v1 +̃ v2 - - # Test operations on LazyArray - v1 = DummyArray([1, 2.3, 4]) - v2 = [1., 2, 3] - @test isa(v1 + v2, LazyArray) - @test isa(v2 + v1, LazyArray) - @test isa(v1 - v2, LazyArray) - @test isa(v2 - v1, LazyArray) - for i ∈ eachindex(v2) - @test (v1 + v2)[i] == (v2 + v1)[i] == r_add_v[i] - @test (v1 - v2)[i] == -(v2 - v1)[i] == r_sub_v[i] - end - @test_throws BoundsError (v1 + v2)[4] - v2 = [1., 2, 3, 4] - # Test that size of arrays is asserted when not specified inbounds - @test_throws DimensionMismatch v1 + v2 -end
--- a/Manifest.toml Fri Sep 25 14:12:10 2020 +0200 +++ b/Manifest.toml Sat Sep 26 15:22:13 2020 +0200 @@ -1,73 +1,12 @@ # This file is machine-generated - editing it directly is not advised -[[Base64]] -uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" - -[[DiffOps]] -deps = ["Grids", "LazyTensors", "RegionIndices", "SbpOperators", "TiledIteration"] -path = "DiffOps" -uuid = "39474f48-97ec-11e9-01fc-6ddcbe5918df" -version = "0.1.0" - -[[Distributed]] -deps = ["Random", "Serialization", "Sockets"] -uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" - -[[Grids]] -deps = ["RegionIndices"] -path = "Grids" -uuid = "960fdf28-97ed-11e9-2a74-bd90bf2fab5a" -version = "0.1.0" - -[[InteractiveUtils]] -deps = ["Markdown"] -uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" - -[[LazyTensors]] -deps = ["RegionIndices"] -path = "LazyTensors" -uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3" -version = "0.1.0" - -[[Logging]] -uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" - -[[Markdown]] -deps = ["Base64"] -uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" - [[OffsetArrays]] -git-tree-sha1 = "87d0a91efe29352d5caaa271ae3927083c096e33" +git-tree-sha1 = "9011c7c98769c451f83869a4d66461e2f23bc80b" uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "0.11.4" - -[[Random]] -deps = ["Serialization"] -uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" - -[[RegionIndices]] -path = "RegionIndices" -uuid = "5d527584-97f1-11e9-084c-4540c7ecf219" -version = "0.1.0" - -[[SbpOperators]] -deps = ["LazyTensors", "RegionIndices"] -path = "SbpOperators" -uuid = "204357d8-97fd-11e9-05e7-010897a14cd0" -version = "0.1.0" - -[[Serialization]] -uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" - -[[Sockets]] -uuid = "6462fe0b-24de-5631-8697-dd941f90decc" - -[[Test]] -deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] -uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +version = "1.2.1" [[TiledIteration]] -deps = ["OffsetArrays", "Test"] -git-tree-sha1 = "58f6f07d3b54a363ec283a8f5fc9fb4ecebde656" +deps = ["OffsetArrays"] +git-tree-sha1 = "98693daea9bb49aba71eaad6b168b152d2310358" uuid = "06e1c1a7-607b-532d-9fad-de7d9aa2abac" -version = "0.2.3" +version = "0.2.4"
--- a/Project.toml Fri Sep 25 14:12:10 2020 +0200 +++ b/Project.toml Sat Sep 26 15:22:13 2020 +0200 @@ -1,6 +1,10 @@ +name = "Sbplib" +uuid = "5a373a26-915f-4769-bcab-bf03835de17b" +authors = ["Jonatan Werpers <jonatan@werpers.com> and contributors"] +version = "0.1.0" + [deps] -DiffOps = "39474f48-97ec-11e9-01fc-6ddcbe5918df" -Grids = "960fdf28-97ed-11e9-2a74-bd90bf2fab5a" -LazyTensors = "62fbed2c-918d-11e9-279b-eb3a325b37d3" -RegionIndices = "5d527584-97f1-11e9-084c-4540c7ecf219" -SbpOperators = "204357d8-97fd-11e9-05e7-010897a14cd0" +TiledIteration = "06e1c1a7-607b-532d-9fad-de7d9aa2abac" + +[compat] +julia = "1.5"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/README.md Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,1 @@ +# Sbplib
--- a/RegionIndices/Project.toml Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,10 +0,0 @@ -name = "RegionIndices" -uuid = "5d527584-97f1-11e9-084c-4540c7ecf219" -authors = ["Jonatan Werpers <jonatan.werpers@it.uu.se>"] -version = "0.1.0" - -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[targets] -test = ["Test"]
--- a/RegionIndices/src/RegionIndices.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,85 +0,0 @@ -module RegionIndices - -abstract type Region end -struct Interior <: Region end -struct Lower <: Region end -struct Upper <: Region end -struct Unknown <: Region end - -export Region, Interior, Lower, Upper, Unknown - -struct Index{R<:Region, T<:Integer} - i::T - - Index{R,T}(i::T) where {R<:Region,T<:Integer} = new{R,T}(i) - Index{R}(i::T) where {R<:Region,T<:Integer} = new{R,T}(i) - Index(i::T, ::Type{R}) where {R<:Region,T<:Integer} = Index{R,T}(i) - Index(t::Tuple{T, DataType}) where {R<:Region,T<:Integer} = Index{t[2],T}(t[1]) # TBD: This is not very specific in what types are allowed in t[2]. Can this be fixed? -end - -export Index - -# Index(R::Type{<:Region}) = Index{R} - -## Vill kunna skriva -## IndexTupleType(Int, (Lower, Interior)) -Index(R::Type{<:Region}, T::Type{<:Integer}) = Index{R,T} -IndexTupleType(T::Type{<:Integer},R::NTuple{N, DataType} where N) = Tuple{Index.(R, T)...} - -Base.convert(::Type{T}, i::Index{R,T} where R) where T = i.i -Base.convert(::Type{CartesianIndex}, I::NTuple{N,Index} where N) = CartesianIndex(convert.(Int, I)) - -Base.Int(I::Index) = I.i -Base.to_index(I::Index) = Int(I) #How to get this to work for all cases?? -Base.getindex(A::AbstractArray{T,N}, I::NTuple{N,Index}) where {T,N} = A[I...] #Is this ok?? - -function Index(i::Integer, boundary_width::Integer, dim_size::Integer) - return Index{getregion(i,boundary_width,dim_size)}(i) -end - -IndexTuple(t::Vararg{Tuple{T, DataType}}) where T<:Integer = Index.(t) -export IndexTuple - -# TODO: Use the values of the region structs, e.g. Lower(), for the region parameter instead of the types. -# For example the following works: -# (Lower(),Upper()) isa NTuple{2, Region} -> true -# typeof((Lower(),Upper())) -> Tuple{Lower,Upper} -function regionindices(gridsize::NTuple{Dim,Integer}, closuresize::Integer, region::NTuple{Dim,DataType}) where Dim - return regionindices(gridsize, ntuple(x->closuresize,Dim), region) -end - -function regionindices(gridsize::NTuple{Dim,Integer}, closuresize::NTuple{Dim,Integer}, region::NTuple{Dim,DataType}) where Dim - regions = map(getrange,gridsize,closuresize,region) - return CartesianIndices(regions) -end - -export regionindices - -function getregion(i::Integer, boundary_width::Integer, dim_size::Integer) - if 0 < i <= boundary_width - return Lower - elseif boundary_width < i <= dim_size-boundary_width - return Interior - elseif dim_size-boundary_width < i <= dim_size - return Upper - else - error("Bounds error") # TODO: Make this more standard - end -end - -export getregion - -function getrange(gridsize::Integer, closuresize::Integer, region::DataType) - if region == Lower - r = 1:closuresize - elseif region == Interior - r = (closuresize+1):(gridsize - closuresize) - elseif region == Upper - r = (gridsize - closuresize + 1):gridsize - else - error("Unspecified region") - end - return r -end - -end # module
--- a/RegionIndices/test/runtests.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,4 +0,0 @@ -using RegionIndices -using Test - -@test_broken false
--- a/SbpOperators/Manifest.toml Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,12 +0,0 @@ -# This file is machine-generated - editing it directly is not advised - -[[LazyTensors]] -deps = ["RegionIndices"] -path = "../LazyTensors" -uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3" -version = "0.1.0" - -[[RegionIndices]] -path = "../RegionIndices" -uuid = "5d527584-97f1-11e9-084c-4540c7ecf219" -version = "0.1.0"
--- a/SbpOperators/Project.toml Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,15 +0,0 @@ -name = "SbpOperators" -uuid = "204357d8-97fd-11e9-05e7-010897a14cd0" -authors = ["Jonatan Werpers <jonatan.werpers@it.uu.se>"] -version = "0.1.0" - -[deps] -LazyTensors = "62fbed2c-918d-11e9-279b-eb3a325b37d3" -RegionIndices = "5d527584-97f1-11e9-084c-4540c7ecf219" - -[extras] -Grids = "960fdf28-97ed-11e9-2a74-bd90bf2fab5a" -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[targets] -test = ["Test", "Grids"]
--- a/SbpOperators/operators/d2_2nd.txt Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,13 +0,0 @@ -# D2 order 2 - -boundary_stencils -1 -2 1 - -inner_stencil -1 -2 1 - -e -1 - -d1 --3/2 2 -1/2
--- a/SbpOperators/operators/d2_4th.txt Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,16 +0,0 @@ -# D2 order 4 - -boundary_stencils - 2 -5 4 -1 0 0 - 1 -2 1 0 0 0 - -4/43 59/43 -110/43 59/43 -4/43 0 - -1/49 0 59/49 -118/49 64/49 -4/49 - -inner_stencil --1/12 4/3 -5/2 4/3 -1/12 - -e -1 - -d1 --11/6 3 -3/2 1/3 \ No newline at end of file
--- a/SbpOperators/operators/h_2nd.txt Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,7 +0,0 @@ -# H order 2 - -closure -1/2 - -inner_stencil -1
--- a/SbpOperators/operators/h_4th.txt Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,7 +0,0 @@ -# H order 4 - -closure -17/48 59/48 43/48 49/48 - -inner_stencil -1 \ No newline at end of file
--- a/SbpOperators/src/BoundaryValue.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,127 +0,0 @@ -""" - BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} - -Implements the boundary operator `e` as a TensorMapping -""" -struct BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} - eClosure::Stencil{T,M} - bId::CartesianBoundary -end -export BoundaryValue - -# TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? -# Can we give special treatment to TensorMappings that go to a higher dim? -function LazyTensors.range_size(e::BoundaryValue{T}, domain_size::NTuple{1,Integer}) where T - if dim(e.bId) == 1 - return (UnknownDim, domain_size[1]) - elseif dim(e.bId) == 2 - return (domain_size[1], UnknownDim) - end -end -LazyTensors.domain_size(e::BoundaryValue{T}, range_size::NTuple{2,Integer}) where T = (range_size[3-dim(e.bId)],) -# TODO: Make a nicer solution for 3-dim(e.bId) - -# TODO: Make this independent of dimension -function LazyTensors.apply(e::BoundaryValue{T}, v::AbstractArray{T}, I::NTuple{2,Index}) where T - i = I[dim(e.bId)] - j = I[3-dim(e.bId)] - N_i = size(e.grid)[dim(e.bId)] - return apply_boundary_value(e.op, v[j], i, N_i, region(e.bId)) -end - -function LazyTensors.apply_transpose(e::BoundaryValue{T}, v::AbstractArray{T}, I::NTuple{1,Index}) where T - u = selectdim(v,3-dim(e.bId),Int(I[1])) - return apply_boundary_value_transpose(e.op, u, region(e.bId)) -end - -function apply_boundary_value_transpose(op::ConstantStencilOperator, v::AbstractVector, ::Type{Lower}) - @boundscheck if length(v) < closuresize(op) - throw(BoundsError()) - end - apply_stencil(op.eClosure,v,1) -end - -function apply_boundary_value_transpose(op::ConstantStencilOperator, v::AbstractVector, ::Type{Upper}) - @boundscheck if length(v) < closuresize(op) - throw(BoundsError()) - end - apply_stencil_backwards(op.eClosure,v,length(v)) -end -export apply_boundary_value_transpose - -function apply_boundary_value(op::ConstantStencilOperator, v::Number, i::Index, N::Integer, ::Type{Lower}) - @boundscheck if !(0<length(Int(i)) <= N) - throw(BoundsError()) - end - op.eClosure[Int(i)-1]*v -end - -function apply_boundary_value(op::ConstantStencilOperator, v::Number, i::Index, N::Integer, ::Type{Upper}) - @boundscheck if !(0<length(Int(i)) <= N) - throw(BoundsError()) - end - op.eClosure[N-Int(i)]*v -end -export apply_boundary_value - - -""" - BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} - -Implements the boundary operator `e` as a TensorMapping -""" -struct BoundaryValue{D,T,M,R} <: TensorMapping{T,D,1} - e:BoundaryOperator{T,M,R} - bId::CartesianBoundary -end - -function LazyTensors.apply_transpose(bv::BoundaryValue{T,M,Lower}, v::AbstractVector{T}, i::Index) where T - u = selectdim(v,3-dim(bv.bId),Int(I[1])) - return apply_transpose(bv.e, u, I) -end - - -""" - BoundaryOperator{T,N,R} <: TensorMapping{T,1,1} - -Implements the boundary operator `e` as a TensorMapping -""" -export BoundaryOperator -struct BoundaryOperator{T,M,R<:Region} <: TensorMapping{T,1,1} - closure::Stencil{T,M} -end - -function LazyTensors.range_size(e::BoundaryOperator, domain_size::NTuple{1,Integer}) - return UnknownDim -end - -LazyTensors.domain_size(e::BoundaryOperator{T}, range_size::NTuple{1,Integer}) where T = range_size - -function LazyTensors.apply_transpose(e::BoundaryOperator{T,M,Lower}, v::AbstractVector{T}, i::Index{Lower}) where T - @boundscheck if length(v) < closuresize(e) #TODO: Use domain_size here? - throw(BoundsError()) - end - apply_stencil(e.closure,v,Int(i)) -end - -function LazyTensors.apply_transpose(e::BoundaryOperator{T,M,Upper}}, v::AbstractVector{T}, i::Index{Upper}) where T - @boundscheck if length(v) < closuresize(e) #TODO: Use domain_size here? - throw(BoundsError()) - end - apply_stencil_backwards(e.closure,v,Int(i)) -end - -function LazyTensors.apply_transpose(e::BoundaryOperator{T}, v::AbstractVector{T}, i::Index) where T - @boundscheck if length(v) < closuresize(e) #TODO: Use domain_size here? - throw(BoundsError()) - end - return eltype(v)(0) -end - -#TODO: Implement apply in a meaningful way. Should it return a vector or a single value (perferable?) Should fit into the -function LazyTensors.apply(e::BoundaryOperator, v::AbstractVector, i::Index) - @boundscheck if !(0<length(Int(i)) <= length(v)) - throw(BoundsError()) - end - return e.closure[Int(i)].*v -end
--- a/SbpOperators/src/SbpOperators.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,16 +0,0 @@ -module SbpOperators - -using RegionIndices -using LazyTensors - -include("stencil.jl") -include("constantstenciloperator.jl") -include("d2.jl") -include("readoperator.jl") -include("laplace/secondderivative.jl") -include("laplace/laplace.jl") -include("quadrature/diagonal_inner_product.jl") -include("quadrature/quadrature.jl") -include("quadrature/inverse_diagonal_inner_product.jl") -include("quadrature/inverse_quadrature.jl") -end # module
--- a/SbpOperators/src/constantstenciloperator.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,79 +0,0 @@ -abstract type ConstantStencilOperator end - -# Apply for different regions Lower/Interior/Upper or Unknown region -@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, i::Index{Lower}) - return @inbounds h_inv*h_inv*apply_stencil(op.closureStencils[Int(i)], v, Int(i)) -end - -@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, i::Index{Interior}) - return @inbounds h_inv*h_inv*apply_stencil(op.innerStencil, v, Int(i)) -end - -@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, i::Index{Upper}) - N = length(v) - return @inbounds h_inv*h_inv*Int(op.parity)*apply_stencil_backwards(op.closureStencils[N-Int(i)+1], v, Int(i)) -end - -@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, index::Index{Unknown}) - N = length(v) - r = getregion(Int(index), closuresize(op), N) - i = Index(Int(index), r) - return apply_2nd_derivative(op, h_inv, v, i) -end -export apply_2nd_derivative - -apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, i::Index{Lower}, N::Integer) where T = v*h*op.quadratureClosure[Int(i)] -apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, i::Index{Upper}, N::Integer) where T = v*h*op.quadratureClosure[N-Int(i)+1] -apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, i::Index{Interior}, N::Integer) where T = v*h - -function apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, index::Index{Unknown}, N::Integer) where T - r = getregion(Int(index), closuresize(op), N) - i = Index(Int(index), r) - return apply_quadrature(op, h, v, i, N) -end -export apply_quadrature - -# TODO: Evaluate if divisions affect performance -apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, i::Index{Lower}, N::Integer) where T = h_inv*v/op.quadratureClosure[Int(i)] -apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, i::Index{Upper}, N::Integer) where T = h_inv*v/op.quadratureClosure[N-Int(i)+1] -apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, i::Index{Interior}, N::Integer) where T = v*h_inv - -function apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, index::Index{Unknown}, N::Integer) where T - r = getregion(Int(index), closuresize(op), N) - i = Index(Int(index), r) - return apply_inverse_quadrature(op, h_inv, v, i, N) -end - -export apply_inverse_quadrature - -function apply_normal_derivative_transpose(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, ::Type{Lower}) - @boundscheck if length(v) < closuresize(op) - throw(BoundsError()) - end - h_inv*apply_stencil(op.dClosure,v,1) -end - -function apply_normal_derivative_transpose(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, ::Type{Upper}) - @boundscheck if length(v) < closuresize(op) - throw(BoundsError()) - end - -h_inv*apply_stencil_backwards(op.dClosure,v,length(v)) -end - -export apply_normal_derivative_transpose - -function apply_normal_derivative(op::ConstantStencilOperator, h_inv::Real, v::Number, i::Index, N::Integer, ::Type{Lower}) - @boundscheck if !(0<length(Int(i)) <= N) - throw(BoundsError()) - end - h_inv*op.dClosure[Int(i)-1]*v -end - -function apply_normal_derivative(op::ConstantStencilOperator, h_inv::Real, v::Number, i::Index, N::Integer, ::Type{Upper}) - @boundscheck if !(0<length(Int(i)) <= N) - throw(BoundsError()) - end - -h_inv*op.dClosure[N-Int(i)]*v -end - -export apply_normal_derivative
--- a/SbpOperators/src/d2.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,19 +0,0 @@ -export D2, closuresize, readOperator - -@enum Parity begin - odd = -1 - even = 1 -end - -struct D2{T,N,M,K} <: ConstantStencilOperator - quadratureClosure::NTuple{M,T} - innerStencil::Stencil{T,N} - closureStencils::NTuple{M,Stencil{T,K}} - eClosure::Stencil{T,M} - dClosure::Stencil{T,M} - parity::Parity -end - -function closuresize(D::D2)::Int - return length(D.quadratureClosure) -end
--- a/SbpOperators/src/laplace/laplace.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,138 +0,0 @@ -export Laplace -""" - Laplace{Dim,T<:Real,N,M,K} <: TensorOperator{T,Dim} - -Implements the Laplace operator `L` in Dim dimensions as a tensor operator -The multi-dimensional tensor operator consists of a tuple of 1D SecondDerivative -tensor operators. -""" -#export quadrature, inverse_quadrature, boundary_quadrature, boundary_value, normal_derivative -struct Laplace{Dim,T,N,M,K} <: TensorOperator{T,Dim} - D2::NTuple{Dim,SecondDerivative{T,N,M,K}} - #TODO: Write a good constructor -end - -LazyTensors.domain_size(L::Laplace{Dim}, range_size::NTuple{Dim,Integer}) where {Dim} = range_size - -function LazyTensors.apply(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} - error("not implemented") -end - -# u = L*v -function LazyTensors.apply(L::Laplace{1,T}, v::AbstractVector{T}, I::Index) where T - @inbounds u = LazyTensors.apply(L.D2[1],v,I) - return u -end - -function LazyTensors.apply(L::Laplace{2,T}, v::AbstractArray{T,2}, I::Index, J::Index) where T - # 2nd x-derivative - @inbounds vx = view(v, :, Int(J)) - @inbounds uᵢ = LazyTensors.apply(L.D2[1], vx , I) - - # 2nd y-derivative - @inbounds vy = view(v, Int(I), :) - @inbounds uᵢ += LazyTensors.apply(L.D2[2], vy , J) - - return uᵢ -end - -LazyTensors.apply_transpose(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} = LazyTensors.apply(L, v, I...) - -# quadrature(L::Laplace) = Quadrature(L.op, L.grid) -# inverse_quadrature(L::Laplace) = InverseQuadrature(L.op, L.grid) -# boundary_value(L::Laplace, bId::CartesianBoundary) = BoundaryValue(L.op, L.grid, bId) -# normal_derivative(L::Laplace, bId::CartesianBoundary) = NormalDerivative(L.op, L.grid, bId) -# boundary_quadrature(L::Laplace, bId::CartesianBoundary) = BoundaryQuadrature(L.op, L.grid, bId) -# export NormalDerivative -# """ -# NormalDerivative{T,N,M,K} <: TensorMapping{T,2,1} -# -# Implements the boundary operator `d` as a TensorMapping -# """ -# struct NormalDerivative{T,N,M,K} <: TensorMapping{T,2,1} -# op::D2{T,N,M,K} -# grid::EquidistantGrid{2} -# bId::CartesianBoundary -# end -# -# # TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? -# # Can we give special treatment to TensorMappings that go to a higher dim? -# function LazyTensors.range_size(e::NormalDerivative, domain_size::NTuple{1,Integer}) -# if dim(e.bId) == 1 -# return (UnknownDim, domain_size[1]) -# elseif dim(e.bId) == 2 -# return (domain_size[1], UnknownDim) -# end -# end -# LazyTensors.domain_size(e::NormalDerivative, range_size::NTuple{2,Integer}) = (range_size[3-dim(e.bId)],) -# -# # TODO: Not type stable D:< -# # TODO: Make this independent of dimension -# function LazyTensors.apply(d::NormalDerivative{T}, v::AbstractArray{T}, I::NTuple{2,Index}) where T -# i = I[dim(d.bId)] -# j = I[3-dim(d.bId)] -# N_i = size(d.grid)[dim(d.bId)] -# h_inv = inverse_spacing(d.grid)[dim(d.bId)] -# return apply_normal_derivative(d.op, h_inv, v[j], i, N_i, region(d.bId)) -# end -# -# function LazyTensors.apply_transpose(d::NormalDerivative{T}, v::AbstractArray{T}, I::NTuple{1,Index}) where T -# u = selectdim(v,3-dim(d.bId),Int(I[1])) -# return apply_normal_derivative_transpose(d.op, inverse_spacing(d.grid)[dim(d.bId)], u, region(d.bId)) -# end -# -# """ -# BoundaryQuadrature{T,N,M,K} <: TensorOperator{T,1} -# -# Implements the boundary operator `q` as a TensorOperator -# """ -# export BoundaryQuadrature -# struct BoundaryQuadrature{T,N,M,K} <: TensorOperator{T,1} -# op::D2{T,N,M,K} -# grid::EquidistantGrid{2} -# bId::CartesianBoundary -# end -# -# -# # TODO: Make this independent of dimension -# function LazyTensors.apply(q::BoundaryQuadrature{T}, v::AbstractArray{T,1}, I::NTuple{1,Index}) where T -# h = spacing(q.grid)[3-dim(q.bId)] -# N = size(v) -# return apply_quadrature(q.op, h, v[I[1]], I[1], N[1]) -# end -# -# LazyTensors.apply_transpose(q::BoundaryQuadrature{T}, v::AbstractArray{T,1}, I::NTuple{1,Index}) where T = LazyTensors.apply(q,v,I) -# -# -# -# -# struct Neumann{Bid<:BoundaryIdentifier} <: BoundaryCondition end -# -# function sat(L::Laplace{2,T}, bc::Neumann{Bid}, v::AbstractArray{T,2}, g::AbstractVector{T}, I::CartesianIndex{2}) where {T,Bid} -# e = boundary_value(L, Bid()) -# d = normal_derivative(L, Bid()) -# Hᵧ = boundary_quadrature(L, Bid()) -# H⁻¹ = inverse_quadrature(L) -# return (-H⁻¹*e*Hᵧ*(d'*v - g))[I] -# end -# -# struct Dirichlet{Bid<:BoundaryIdentifier} <: BoundaryCondition -# tau::Float64 -# end -# -# function sat(L::Laplace{2,T}, bc::Dirichlet{Bid}, v::AbstractArray{T,2}, g::AbstractVector{T}, i::CartesianIndex{2}) where {T,Bid} -# e = boundary_value(L, Bid()) -# d = normal_derivative(L, Bid()) -# Hᵧ = boundary_quadrature(L, Bid()) -# H⁻¹ = inverse_quadrature(L) -# return (-H⁻¹*(tau/h*e + d)*Hᵧ*(e'*v - g))[I] -# # Need to handle scalar multiplication and addition of TensorMapping -# end - -# function apply(s::MyWaveEq{D}, v::AbstractArray{T,D}, i::CartesianIndex{D}) where D - # return apply(s.L, v, i) + -# sat(s.L, Dirichlet{CartesianBoundary{1,Lower}}(s.tau), v, s.g_w, i) + -# sat(s.L, Dirichlet{CartesianBoundary{1,Upper}}(s.tau), v, s.g_e, i) + -# sat(s.L, Dirichlet{CartesianBoundary{2,Lower}}(s.tau), v, s.g_s, i) + -# sat(s.L, Dirichlet{CartesianBoundary{2,Upper}}(s.tau), v, s.g_n, i) -# end
--- a/SbpOperators/src/laplace/secondderivative.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,45 +0,0 @@ -""" - SecondDerivative{T<:Real,N,M,K} <: TensorOperator{T,1} -Implements the Laplace tensor operator `L` with constant grid spacing and coefficients -in 1D dimension -""" - -struct SecondDerivative{T,N,M,K} <: TensorOperator{T,1} - h_inv::T # The grid spacing could be included in the stencil already. Preferable? - innerStencil::Stencil{T,N} - closureStencils::NTuple{M,Stencil{T,K}} - parity::Parity - #TODO: Write a nice constructor -end -export SecondDerivative - -LazyTensors.domain_size(D2::SecondDerivative, range_size::NTuple{1,Integer}) = range_size - -#TODO: The 1D tensor mappings should not have to dispatch on 1D tuples if we write LazyTensor.apply for vararg right?!?! -# Currently have to index the Tuple{Index} in each method in order to call the stencil methods which is ugly. -# I thought I::Vararg{Index,R} fell back to just Index for R = 1 - -# Apply for different regions Lower/Interior/Upper or Unknown region -function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Lower}) where T - return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.closureStencils[Int(I)], v, Int(I)) -end - -function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Interior}) where T - return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.innerStencil, v, Int(I)) -end - -function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Upper}) where T - N = length(v) # TODO: Use domain_size here instead? N = domain_size(D2,size(v)) - return @inbounds D2.h_inv*D2.h_inv*Int(D2.parity)*apply_stencil_backwards(D2.closureStencils[N-Int(I)+1], v, Int(I)) -end - -function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, index::Index{Unknown}) where T - N = length(v) # TODO: Use domain_size here instead? - r = getregion(Int(index), closuresize(D2), N) - I = Index(Int(index), r) - return LazyTensors.apply(D2, v, I) -end - -LazyTensors.apply_transpose(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index) where {T} = LazyTensors.apply(D2, v, I) - -closuresize(D2::SecondDerivative{T,N,M,K}) where {T<:Real,N,M,K} = M
--- a/SbpOperators/src/quadrature/diagonal_inner_product.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,41 +0,0 @@ -export DiagonalInnerProduct, closuresize -""" - DiagonalInnerProduct{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim} - -Implements the diagnoal norm operator `H` of Dim dimension as a TensorMapping -""" -struct DiagonalInnerProduct{T,M} <: TensorOperator{T,1} - h::T # The grid spacing could be included in the stencil already. Preferable? - closure::NTuple{M,T} - #TODO: Write a nice constructor -end - -LazyTensors.domain_size(H::DiagonalInnerProduct, range_size::NTuple{1,Integer}) = range_size - -function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index) where T - return @inbounds apply(H, v, I) -end - -function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Lower}) where T - return @inbounds H.h*H.closure[Int(I)]*v[Int(I)] -end - -function LazyTensors.apply(H::DiagonalInnerProduct{T},v::AbstractVector{T}, I::Index{Upper}) where T - N = length(v); - return @inbounds H.h*H.closure[N-Int(I)+1]v[Int(I)] -end - -function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Interior}) where T - return @inbounds H.h*v[Int(I)] -end - -function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, index::Index{Unknown}) where T - N = length(v); - r = getregion(Int(index), closuresize(H), N) - i = Index(Int(index), r) - return LazyTensors.apply(H, v, i) -end - -LazyTensors.apply_transpose(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index) where T = LazyTensors.apply(H,v,I) - -closuresize(H::DiagonalInnerProduct{T,M}) where {T,M} = M
--- a/SbpOperators/src/quadrature/inverse_diagonal_inner_product.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,36 +0,0 @@ -export InverseDiagonalInnerProduct, closuresize -""" - InverseDiagonalInnerProduct{Dim,T<:Real,M} <: TensorOperator{T,1} - -Implements the inverse diagonal inner product operator `Hi` of as a 1D TensorOperator -""" -struct InverseDiagonalInnerProduct{T<:Real,M} <: TensorOperator{T,1} - h_inv::T # The reciprocl grid spacing could be included in the stencil already. Preferable? - closure::NTuple{M,T} - #TODO: Write a nice constructor -end - -function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Lower}) where T - return @inbounds Hi.h_inv*Hi.closure[Int(I)]*v[Int(I)] -end - -function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Upper}) where T - N = length(v); - return @inbounds Hi.h_inv*Hi.closure[N-Int(I)+1]v[Int(I)] -end - -function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Interior}) where T - return @inbounds Hi.h_inv*v[Int(I)] -end - -function LazyTensors.apply(Hi::InverseDiagonalInnerProduct, v::AbstractVector{T}, index::Index{Unknown}) where T - N = length(v); - r = getregion(Int(index), closuresize(Hi), N) - i = Index(Int(index), r) - return LazyTensors.apply(Hi, v, i) -end - -LazyTensors.apply_transpose(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index) where T = LazyTensors.apply(Hi,v,I) - - -closuresize(Hi::InverseDiagonalInnerProduct{T,M}) where {T,M} = M
--- a/SbpOperators/src/quadrature/inverse_quadrature.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,34 +0,0 @@ -export InverseQuadrature -""" - InverseQuadrature{Dim,T<:Real,M,K} <: TensorMapping{T,Dim,Dim} - -Implements the inverse quadrature operator `Qi` of Dim dimension as a TensorOperator -The multi-dimensional tensor operator consists of a tuple of 1D InverseDiagonalInnerProduct -tensor operators. -""" -struct InverseQuadrature{Dim,T<:Real,M} <: TensorOperator{T,Dim} - Hi::NTuple{Dim,InverseDiagonalInnerProduct{T,M}} -end - -LazyTensors.domain_size(Qi::InverseQuadrature{Dim}, range_size::NTuple{Dim,Integer}) where Dim = range_size - -function LazyTensors.apply(Qi::InverseQuadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} - error("not implemented") -end - -@inline function LazyTensors.apply(Qi::InverseQuadrature{1,T}, v::AbstractVector{T}, I::Index) where T - @inbounds q = apply(Qi.Hi[1], v , I) - return q -end - -@inline function LazyTensors.apply(Qi::InverseQuadrature{2,T}, v::AbstractArray{T,2}, I::Index, J::Index) where T - # InverseQuadrature in x direction - @inbounds vx = view(v, :, Int(J)) - @inbounds qx_inv = apply(Qi.Hi[1], vx , I) - # InverseQuadrature in y-direction - @inbounds vy = view(v, Int(I), :) - @inbounds qy_inv = apply(Qi.Hi[2], vy, J) - return qx_inv*qy_inv -end - -LazyTensors.apply_transpose(Qi::InverseQuadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {Dim,T} = LazyTensors.apply(Qi,v,I...)
--- a/SbpOperators/src/quadrature/quadrature.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,34 +0,0 @@ -export Quadrature -""" - Quadrature{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim} - -Implements the quadrature operator `Q` of Dim dimension as a TensorMapping -The multi-dimensional tensor operator consists of a tuple of 1D DiagonalInnerProduct H -tensor operators. -""" -struct Quadrature{Dim,T<:Real,M} <: TensorOperator{T,Dim} - H::NTuple{Dim,DiagonalInnerProduct{T,M}} -end - -LazyTensors.domain_size(Q::Quadrature{Dim}, range_size::NTuple{Dim,Integer}) where {Dim} = range_size - -function LazyTensors.apply(Q::Quadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} - error("not implemented") -end - -function LazyTensors.apply(Q::Quadrature{1,T}, v::AbstractVector{T}, I::Index) where T - @inbounds q = apply(Q.H[1], v , I) - return q -end - -function LazyTensors.apply(Q::Quadrature{2,T}, v::AbstractArray{T,2}, I::Index, J::Index) where T - # Quadrature in x direction - @inbounds vx = view(v, :, Int(J)) - @inbounds qx = apply(Q.H[1], vx , I) - # Quadrature in y-direction - @inbounds vy = view(v, Int(I), :) - @inbounds qy = apply(Q.H[2], vy, J) - return qx*qy -end - -LazyTensors.apply_transpose(Q::Quadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {Dim,T} = LazyTensors.apply(Q,v,I...)
--- a/SbpOperators/src/readoperator.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,80 +0,0 @@ -function readOperator(D2fn, Hfn) - d = readSectionedFile(D2fn) - h = readSectionedFile(Hfn) - - # Create inner stencil - innerStencilWeights = stringToTuple(Float64, d["inner_stencil"][1]) - width = length(innerStencilWeights) - r = (-div(width,2), div(width,2)) - - innerStencil = Stencil(r, innerStencilWeights) - - # Create boundary stencils - boundarySize = length(d["boundary_stencils"]) - closureStencils = Vector{typeof(innerStencil)}() # TBD: is the the right way to get the correct type? - - for i ∈ 1:boundarySize - stencilWeights = stringToTuple(Float64, d["boundary_stencils"][i]) - width = length(stencilWeights) - r = (1-i,width-i) - closureStencils = (closureStencils..., Stencil(r, stencilWeights)) - end - - quadratureClosure = pad_tuple(stringToTuple(Float64, h["closure"][1]), boundarySize) - eClosure = Stencil((0,boundarySize-1), pad_tuple(stringToTuple(Float64, d["e"][1]), boundarySize)) - dClosure = Stencil((0,boundarySize-1), pad_tuple(stringToTuple(Float64, d["d1"][1]), boundarySize)) - - d2 = D2( - quadratureClosure, - innerStencil, - closureStencils, - eClosure, - dClosure, - even - ) - - return d2 -end - -function readSectionedFile(filename)::Dict{String, Vector{String}} - f = open(filename) - sections = Dict{String, Vector{String}}() - currentKey = "" - - for ln ∈ eachline(f) - if ln == "" || ln[1] == '#' # Skip comments and empty lines - continue - end - - if isletter(ln[1]) # Found start of new section - if ~haskey(sections, ln) - sections[ln] = Vector{String}() - end - currentKey = ln - continue - end - - push!(sections[currentKey], ln) - end - - return sections -end - -function stringToTuple(T::DataType, s::String) - return Tuple(stringToVector(T,s)) -end - -function stringToVector(T::DataType, s::String) - return T.(eval.(Meta.parse.(split(s)))) -end - -function pad_tuple(t::NTuple{N, T}, n::Integer) where {N,T} - if N >= n - return t - else - return pad_tuple((t..., zero(T)), n) - end -end - -sbp_operators_path() = (@__DIR__) * "/../operators/" -export sbp_operators_path
--- a/SbpOperators/src/stencil.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,38 +0,0 @@ -struct Stencil{T<:Real,N} - range::Tuple{Int,Int} - weights::NTuple{N,T} - - function Stencil(range::Tuple{Int,Int},weights::NTuple{N,T}) where {T <: Real, N} - @assert range[2]-range[1]+1 == N - new{T,N}(range,weights) - end -end - -function flip(s::Stencil) - range = (-s.range[2], -s.range[1]) - return Stencil(range, reverse(s.weights)) -end - -# Provides index into the Stencil based on offset for the root element -@inline function Base.getindex(s::Stencil, i::Int) - @boundscheck if i < s.range[1] || s.range[2] < i - return eltype(s.weights)(0) - end - return s.weights[1 + i - s.range[1]] -end - -Base.@propagate_inbounds @inline function apply_stencil(s::Stencil{T,N}, v::AbstractVector, i::Int) where {T,N} - w = s.weights[1]*v[i + s.range[1]] - @simd for k ∈ 2:N - w += s.weights[k]*v[i + s.range[1] + k-1] - end - return w -end - -Base.@propagate_inbounds @inline function apply_stencil_backwards(s::Stencil{T,N}, v::AbstractVector, i::Int) where {T,N} - w = s.weights[N]*v[i - s.range[2]] - @simd for k ∈ N-1:-1:1 - w += s.weights[k]*v[i - s.range[1] - k + 1] - end - return w -end
--- a/SbpOperators/test/runtests.jl Fri Sep 25 14:12:10 2020 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,382 +0,0 @@ -using Test -using SbpOperators -using Grids -using RegionIndices -using LazyTensors - -# @testset "apply_quadrature" begin -# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") -# h = 0.5 -# -# @test apply_quadrature(op, h, 1.0, 10, 100) == h -# -# N = 10 -# qc = op.quadratureClosure -# q = h.*(qc..., ones(N-2*closuresize(op))..., reverse(qc)...) -# @assert length(q) == N -# -# for i ∈ 1:N -# @test apply_quadrature(op, h, 1.0, i, N) == q[i] -# end -# -# v = [2.,3.,2.,4.,5.,4.,3.,4.,5.,4.5] -# for i ∈ 1:N -# @test apply_quadrature(op, h, v[i], i, N) == q[i]*v[i] -# end -# end - -@testset "SecondDerivative" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - L = 3.5 - g = EquidistantGrid((101,), (0.0,), (L,)) - h_inv = inverse_spacing(g) - h = 1/h_inv[1]; - Dₓₓ = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity) - - f0(x::Float64) = 1. - f1(x::Float64) = x - f2(x::Float64) = 1/2*x^2 - f3(x::Float64) = 1/6*x^3 - f4(x::Float64) = 1/24*x^4 - f5(x::Float64) = sin(x) - f5ₓₓ(x::Float64) = -f5(x) - - v0 = evalOn(g,f0) - v1 = evalOn(g,f1) - v2 = evalOn(g,f2) - v3 = evalOn(g,f3) - v4 = evalOn(g,f4) - v5 = evalOn(g,f5) - - @test Dₓₓ isa TensorOperator{T,1} where T - @test Dₓₓ' isa TensorMapping{T,1,1} where T - - # TODO: Should perhaps set tolerance level for isapporx instead? - # Are these tolerance levels resonable or should tests be constructed - # differently? - equalitytol = 0.5*1e-10 - accuracytol = 0.5*1e-3 - # 4th order interior stencil, 2nd order boundary stencil, - # implies that L*v should be exact for v - monomial up to order 3. - # Exact differentiation is measured point-wise. For other grid functions - # the error is measured in the l2-norm. - @test all(abs.(collect(Dₓₓ*v0)) .<= equalitytol) - @test all(abs.(collect(Dₓₓ*v1)) .<= equalitytol) - @test all(abs.((collect(Dₓₓ*v2) - v0)) .<= equalitytol) - @test all(abs.((collect(Dₓₓ*v3) - v1)) .<= equalitytol) - e4 = collect(Dₓₓ*v4) - v2 - e5 = collect(Dₓₓ*v5) + v5 - @test sqrt(h*sum(collect(e4.^2))) <= accuracytol - @test sqrt(h*sum(collect(e5.^2))) <= accuracytol -end - -@testset "Laplace2D" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - Lx = 1.5 - Ly = 3.2 - g = EquidistantGrid((102,131), (0.0, 0.0), (Lx,Ly)) - - h_inv = inverse_spacing(g) - h = spacing(g) - D_xx = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity) - D_yy = SecondDerivative(h_inv[2],op.innerStencil,op.closureStencils,op.parity) - L = Laplace((D_xx,D_yy)) - - f0(x::Float64,y::Float64) = 2. - f1(x::Float64,y::Float64) = x+y - f2(x::Float64,y::Float64) = 1/2*x^2 + 1/2*y^2 - f3(x::Float64,y::Float64) = 1/6*x^3 + 1/6*y^3 - f4(x::Float64,y::Float64) = 1/24*x^4 + 1/24*y^4 - f5(x::Float64,y::Float64) = sin(x) + cos(y) - f5ₓₓ(x::Float64,y::Float64) = -f5(x,y) - - v0 = evalOn(g,f0) - v1 = evalOn(g,f1) - v2 = evalOn(g,f2) - v3 = evalOn(g,f3) - v4 = evalOn(g,f4) - v5 = evalOn(g,f5) - v5ₓₓ = evalOn(g,f5ₓₓ) - - @test L isa TensorOperator{T,2} where T - @test L' isa TensorMapping{T,2,2} where T - - # TODO: Should perhaps set tolerance level for isapporx instead? - # Are these tolerance levels resonable or should tests be constructed - # differently? - equalitytol = 0.5*1e-10 - accuracytol = 0.5*1e-3 - # 4th order interior stencil, 2nd order boundary stencil, - # implies that L*v should be exact for v - monomial up to order 3. - # Exact differentiation is measured point-wise. For other grid functions - # the error is measured in the H-norm. - @test all(abs.(collect(L*v0)) .<= equalitytol) - @test all(abs.(collect(L*v1)) .<= equalitytol) - @test all(collect(L*v2) .≈ v0) # Seems to be more accurate - @test all(abs.((collect(L*v3) - v1)) .<= equalitytol) - e4 = collect(L*v4) - v2 - e5 = collect(L*v5) - v5ₓₓ - @test sqrt(prod(h)*sum(collect(e4.^2))) <= accuracytol - @test sqrt(prod(h)*sum(collect(e5.^2))) <= accuracytol -end - -@testset "DiagonalInnerProduct" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - L = 2.3 - g = EquidistantGrid((77,), (0.0,), (L,)) - h = spacing(g) - H = DiagonalInnerProduct(h[1],op.quadratureClosure) - v = ones(Float64, size(g)) - - @test H isa TensorOperator{T,1} where T - @test H' isa TensorMapping{T,1,1} where T - @test sum(collect(H*v)) ≈ L - @test collect(H*v) == collect(H'*v) -end - -@testset "Quadrature" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - Lx = 2.3 - Ly = 5.2 - g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) - - h = spacing(g) - Hx = DiagonalInnerProduct(h[1],op.quadratureClosure); - Hy = DiagonalInnerProduct(h[2],op.quadratureClosure); - Q = Quadrature((Hx,Hy)) - - v = ones(Float64, size(g)) - - @test Q isa TensorOperator{T,2} where T - @test Q' isa TensorMapping{T,2,2} where T - @test sum(collect(Q*v)) ≈ (Lx*Ly) - @test collect(Q*v) == collect(Q'*v) -end - -@testset "InverseDiagonalInnerProduct" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - L = 2.3 - g = EquidistantGrid((77,), (0.0,), (L,)) - h = spacing(g) - H = DiagonalInnerProduct(h[1],op.quadratureClosure) - - h_i = inverse_spacing(g) - Hi = InverseDiagonalInnerProduct(h_i[1],1 ./ op.quadratureClosure) - v = evalOn(g, x->sin(x)) - - @test Hi isa TensorOperator{T,1} where T - @test Hi' isa TensorMapping{T,1,1} where T - @test collect(Hi*H*v) ≈ v - @test collect(Hi*v) == collect(Hi'*v) -end - -@testset "InverseQuadrature" begin - op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - Lx = 7.3 - Ly = 8.2 - g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) - - h = spacing(g) - Hx = DiagonalInnerProduct(h[1], op.quadratureClosure); - Hy = DiagonalInnerProduct(h[2], op.quadratureClosure); - Q = Quadrature((Hx,Hy)) - - hi = inverse_spacing(g) - Hix = InverseDiagonalInnerProduct(hi[1], 1 ./ op.quadratureClosure); - Hiy = InverseDiagonalInnerProduct(hi[2], 1 ./ op.quadratureClosure); - Qinv = InverseQuadrature((Hix,Hiy)) - v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) - - @test Qinv isa TensorOperator{T,2} where T - @test Qinv' isa TensorMapping{T,2,2} where T - @test collect(Qinv*Q*v) ≈ v - @test collect(Qinv*v) == collect(Qinv'*v) -end -# -# @testset "BoundaryValue" begin -# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") -# g = EquidistantGrid((4,5), (0.0, 0.0), (1.0,1.0)) -# -# e_w = BoundaryValue(op, g, CartesianBoundary{1,Lower}()) -# e_e = BoundaryValue(op, g, CartesianBoundary{1,Upper}()) -# e_s = BoundaryValue(op, g, CartesianBoundary{2,Lower}()) -# e_n = BoundaryValue(op, g, CartesianBoundary{2,Upper}()) -# -# v = zeros(Float64, 4, 5) -# v[:,5] = [1, 2, 3,4] -# v[:,4] = [1, 2, 3,4] -# v[:,3] = [4, 5, 6, 7] -# v[:,2] = [7, 8, 9, 10] -# v[:,1] = [10, 11, 12, 13] -# -# @test e_w isa TensorMapping{T,2,1} where T -# @test e_w' isa TensorMapping{T,1,2} where T -# -# @test domain_size(e_w, (3,2)) == (2,) -# @test domain_size(e_e, (3,2)) == (2,) -# @test domain_size(e_s, (3,2)) == (3,) -# @test domain_size(e_n, (3,2)) == (3,) -# -# @test size(e_w'*v) == (5,) -# @test size(e_e'*v) == (5,) -# @test size(e_s'*v) == (4,) -# @test size(e_n'*v) == (4,) -# -# @test collect(e_w'*v) == [10,7,4,1.0,1] -# @test collect(e_e'*v) == [13,10,7,4,4.0] -# @test collect(e_s'*v) == [10,11,12,13.0] -# @test collect(e_n'*v) == [1,2,3,4.0] -# -# g_x = [1,2,3,4.0] -# g_y = [5,4,3,2,1.0] -# -# G_w = zeros(Float64, (4,5)) -# G_w[1,:] = g_y -# -# G_e = zeros(Float64, (4,5)) -# G_e[4,:] = g_y -# -# G_s = zeros(Float64, (4,5)) -# G_s[:,1] = g_x -# -# G_n = zeros(Float64, (4,5)) -# G_n[:,5] = g_x -# -# @test size(e_w*g_y) == (UnknownDim,5) -# @test size(e_e*g_y) == (UnknownDim,5) -# @test size(e_s*g_x) == (4,UnknownDim) -# @test size(e_n*g_x) == (4,UnknownDim) -# -# # These tests should be moved to where they are possible (i.e we know what the grid should be) -# @test_broken collect(e_w*g_y) == G_w -# @test_broken collect(e_e*g_y) == G_e -# @test_broken collect(e_s*g_x) == G_s -# @test_broken collect(e_n*g_x) == G_n -# end -# -# @testset "NormalDerivative" begin -# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") -# g = EquidistantGrid((5,6), (0.0, 0.0), (4.0,5.0)) -# -# d_w = NormalDerivative(op, g, CartesianBoundary{1,Lower}()) -# d_e = NormalDerivative(op, g, CartesianBoundary{1,Upper}()) -# d_s = NormalDerivative(op, g, CartesianBoundary{2,Lower}()) -# d_n = NormalDerivative(op, g, CartesianBoundary{2,Upper}()) -# -# -# v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) -# v∂x = evalOn(g, (x,y)-> 2*x + y) -# v∂y = evalOn(g, (x,y)-> 2*(y-1) + x) -# -# @test d_w isa TensorMapping{T,2,1} where T -# @test d_w' isa TensorMapping{T,1,2} where T -# -# @test domain_size(d_w, (3,2)) == (2,) -# @test domain_size(d_e, (3,2)) == (2,) -# @test domain_size(d_s, (3,2)) == (3,) -# @test domain_size(d_n, (3,2)) == (3,) -# -# @test size(d_w'*v) == (6,) -# @test size(d_e'*v) == (6,) -# @test size(d_s'*v) == (5,) -# @test size(d_n'*v) == (5,) -# -# @test collect(d_w'*v) ≈ v∂x[1,:] -# @test collect(d_e'*v) ≈ v∂x[5,:] -# @test collect(d_s'*v) ≈ v∂y[:,1] -# @test collect(d_n'*v) ≈ v∂y[:,6] -# -# -# d_x_l = zeros(Float64, 5) -# d_x_u = zeros(Float64, 5) -# for i ∈ eachindex(d_x_l) -# d_x_l[i] = op.dClosure[i-1] -# d_x_u[i] = -op.dClosure[length(d_x_u)-i] -# end -# -# d_y_l = zeros(Float64, 6) -# d_y_u = zeros(Float64, 6) -# for i ∈ eachindex(d_y_l) -# d_y_l[i] = op.dClosure[i-1] -# d_y_u[i] = -op.dClosure[length(d_y_u)-i] -# end -# -# function prod_matrix(x,y) -# G = zeros(Float64, length(x), length(y)) -# for I ∈ CartesianIndices(G) -# G[I] = x[I[1]]*y[I[2]] -# end -# -# return G -# end -# -# g_x = [1,2,3,4.0,5] -# g_y = [5,4,3,2,1.0,11] -# -# G_w = prod_matrix(d_x_l, g_y) -# G_e = prod_matrix(d_x_u, g_y) -# G_s = prod_matrix(g_x, d_y_l) -# G_n = prod_matrix(g_x, d_y_u) -# -# -# @test size(d_w*g_y) == (UnknownDim,6) -# @test size(d_e*g_y) == (UnknownDim,6) -# @test size(d_s*g_x) == (5,UnknownDim) -# @test size(d_n*g_x) == (5,UnknownDim) -# -# # These tests should be moved to where they are possible (i.e we know what the grid should be) -# @test_broken collect(d_w*g_y) ≈ G_w -# @test_broken collect(d_e*g_y) ≈ G_e -# @test_broken collect(d_s*g_x) ≈ G_s -# @test_broken collect(d_n*g_x) ≈ G_n -# end -# -# @testset "BoundaryQuadrature" begin -# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") -# g = EquidistantGrid((10,11), (0.0, 0.0), (1.0,1.0)) -# -# H_w = BoundaryQuadrature(op, g, CartesianBoundary{1,Lower}()) -# H_e = BoundaryQuadrature(op, g, CartesianBoundary{1,Upper}()) -# H_s = BoundaryQuadrature(op, g, CartesianBoundary{2,Lower}()) -# H_n = BoundaryQuadrature(op, g, CartesianBoundary{2,Upper}()) -# -# v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) -# -# function get_quadrature(N) -# qc = op.quadratureClosure -# q = (qc..., ones(N-2*closuresize(op))..., reverse(qc)...) -# @assert length(q) == N -# return q -# end -# -# v_w = v[1,:] -# v_e = v[10,:] -# v_s = v[:,1] -# v_n = v[:,11] -# -# q_x = spacing(g)[1].*get_quadrature(10) -# q_y = spacing(g)[2].*get_quadrature(11) -# -# @test H_w isa TensorOperator{T,1} where T -# -# @test domain_size(H_w, (3,)) == (3,) -# @test domain_size(H_n, (3,)) == (3,) -# -# @test range_size(H_w, (3,)) == (3,) -# @test range_size(H_n, (3,)) == (3,) -# -# @test size(H_w*v_w) == (11,) -# @test size(H_e*v_e) == (11,) -# @test size(H_s*v_s) == (10,) -# @test size(H_n*v_n) == (10,) -# -# @test collect(H_w*v_w) ≈ q_y.*v_w -# @test collect(H_e*v_e) ≈ q_y.*v_e -# @test collect(H_s*v_s) ≈ q_x.*v_s -# @test collect(H_n*v_n) ≈ q_x.*v_n -# -# @test collect(H_w'*v_w) == collect(H_w'*v_w) -# @test collect(H_e'*v_e) == collect(H_e'*v_e) -# @test collect(H_s'*v_s) == collect(H_s'*v_s) -# @test collect(H_n'*v_n) == collect(H_n'*v_n) -# end
--- a/sbp.jl Fri Sep 25 14:12:10 2020 +0200 +++ b/sbp.jl Sat Sep 26 15:22:13 2020 +0200 @@ -1,9 +1,9 @@ module sbp -using Grids -using RegionIndices -using SbpOperators -using DiffOps +using Sbplib.Grids +using Sbplib.RegionIndices +using Sbplib.SbpOperators +using Sbplib.DiffOps include("TimeStepper.jl") end # module
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/DiffOps/DiffOps.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,102 @@ +module DiffOps + +using Sbplib.RegionIndices +using Sbplib.SbpOperators +using Sbplib.Grids +using Sbplib.LazyTensors + +""" + DiffOp + +Supertype of differential operator discretisations. +The action of the DiffOp is defined in the method + apply(D::DiffOp, v::AbstractVector, I...) +""" +abstract type DiffOp end + +function apply end + +function matrixRepresentation(D::DiffOp) + error("not implemented") +end + +abstract type DiffOpCartesian{Dim} <: DiffOp end + +# DiffOp must have a grid of dimension Dim!!! +function apply!(D::DiffOpCartesian{Dim}, u::AbstractArray{T,Dim}, v::AbstractArray{T,Dim}) where {T,Dim} + for I ∈ eachindex(D.grid) + u[I] = apply(D, v, I) + end + + return nothing +end +export apply! + +function apply_region!(D::DiffOpCartesian{2}, u::AbstractArray{T,2}, v::AbstractArray{T,2}) where T + apply_region!(D, u, v, Lower, Lower) + apply_region!(D, u, v, Lower, Interior) + apply_region!(D, u, v, Lower, Upper) + apply_region!(D, u, v, Interior, Lower) + apply_region!(D, u, v, Interior, Interior) + apply_region!(D, u, v, Interior, Upper) + apply_region!(D, u, v, Upper, Lower) + apply_region!(D, u, v, Upper, Interior) + apply_region!(D, u, v, Upper, Upper) + return nothing +end + +# Maybe this should be split according to b3fbef345810 after all?! Seems like it makes performance more predictable +function apply_region!(D::DiffOpCartesian{2}, u::AbstractArray{T,2}, v::AbstractArray{T,2}, r1::Type{<:Region}, r2::Type{<:Region}) where T + for I ∈ regionindices(D.grid.size, closuresize(D.op), (r1,r2)) + @inbounds indextuple = (Index{r1}(I[1]), Index{r2}(I[2])) + @inbounds u[I] = apply(D, v, indextuple) + end + return nothing +end +export apply_region! + +function apply_tiled!(D::DiffOpCartesian{2}, u::AbstractArray{T,2}, v::AbstractArray{T,2}) where T + apply_region_tiled!(D, u, v, Lower, Lower) + apply_region_tiled!(D, u, v, Lower, Interior) + apply_region_tiled!(D, u, v, Lower, Upper) + apply_region_tiled!(D, u, v, Interior, Lower) + apply_region_tiled!(D, u, v, Interior, Interior) + apply_region_tiled!(D, u, v, Interior, Upper) + apply_region_tiled!(D, u, v, Upper, Lower) + apply_region_tiled!(D, u, v, Upper, Interior) + apply_region_tiled!(D, u, v, Upper, Upper) + return nothing +end + +using TiledIteration +function apply_region_tiled!(D::DiffOpCartesian{2}, u::AbstractArray{T,2}, v::AbstractArray{T,2}, r1::Type{<:Region}, r2::Type{<:Region}) where T + ri = regionindices(D.grid.size, closuresize(D.op), (r1,r2)) + # TODO: Pass Tilesize to function + for tileaxs ∈ TileIterator(axes(ri), padded_tilesize(T, (5,5), 2)) + for j ∈ tileaxs[2], i ∈ tileaxs[1] + I = ri[i,j] + u[I] = apply(D, v, (Index{r1}(I[1]), Index{r2}(I[2]))) + end + end + return nothing +end +export apply_region_tiled! + +function apply(D::DiffOp, v::AbstractVector)::AbstractVector + u = zeros(eltype(v), size(v)) + apply!(D,v,u) + return u +end + +# TODO: This conflicts with LazyTensors. Shouldn't DiffOps be LazyTensorOperators and use that apply? +# export apply + + +""" +A BoundaryCondition should implement the method + sat(::DiffOp, v::AbstractArray, data::AbstractArray, ...) +""" +abstract type BoundaryCondition end + + +end # module
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Grids/AbstractGrid.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,24 @@ +""" + AbstractGrid + +Should implement + dimension(grid::AbstractGrid) + points(grid::AbstractGrid) + +""" +abstract type AbstractGrid end + +function dimension end +function points end +export dimension, points + +""" + evalOn(g::AbstractGrid, f::Function) + +Evaluate function f on the grid g +""" +function evalOn(g::AbstractGrid, f::Function) + F(x) = f(x...) + return F.(points(g)) +end +export evalOn
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Grids/EquidistantGrid.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,73 @@ +# EquidistantGrid is a grid with equidistant grid spacing per coordinat +# direction. The domain is defined through the two points P1 = x̄₁, P2 = x̄₂ +# by the exterior product of the vectors obtained by projecting (x̄₂-x̄₁) onto +# the coordinate directions. E.g for a 2D grid with x̄₁=(-1,0) and x̄₂=(1,2) +# the domain is defined as (-1,1)x(0,2). + +export EquidistantGrid + +struct EquidistantGrid{Dim,T<:Real} <: AbstractGrid + size::NTuple{Dim, Int} # First coordinate direction stored first + limit_lower::NTuple{Dim, T} + limit_upper::NTuple{Dim, T} + inverse_spacing::NTuple{Dim, T} # Reciprocal of grid spacing + + # General constructor + function EquidistantGrid(size::NTuple{Dim, Int}, limit_lower::NTuple{Dim, T}, limit_upper::NTuple{Dim, T}) where Dim where T + @assert all(size.>0) + @assert all(limit_upper.-limit_lower .!= 0) + inverse_spacing = (size.-1)./ abs.(limit_upper.-limit_lower) + return new{Dim,T}(size, limit_lower, limit_upper, inverse_spacing) + end +end + +function EquidistantGrid(size::Int, limit_lower::T, limit_upper::T) where T + return EquidistantGrid((size,),(limit_lower,),(limit_upper,)) +end + +function Base.eachindex(grid::EquidistantGrid) + CartesianIndices(grid.size) +end + +Base.size(g::EquidistantGrid) = g.size + +# Returns the number of dimensions of an EquidistantGrid. +# +# @Input: grid - an EquidistantGrid +# @Return: dimension - The dimension of the grid +function dimension(grid::EquidistantGrid) + return length(grid.size) +end + +# Returns the reciprocal of the spacing of the grid +# +function inverse_spacing(grid::EquidistantGrid) + return grid.inverse_spacing +end +export inverse_spacing + +# Returns the reciprocal of the spacing of the grid +# +# TODO: Evaluate if divisions affect performance +function spacing(grid::EquidistantGrid) + return 1.0./grid.inverse_spacing +end +export spacing + +# Computes the points of an EquidistantGrid as an array of tuples with +# the same dimension as the grid. +# +# @Input: grid - an EquidistantGrid +# @Return: points - the points of the grid. +function points(grid::EquidistantGrid) + # TODO: Make this return an abstract array? + indices = Tuple.(CartesianIndices(grid.size)) + h = spacing(grid) + return broadcast(I -> grid.limit_lower .+ (I.-1).*h, indices) +end + +function pointsalongdim(grid::EquidistantGrid, dim::Integer) + @assert dim<=dimension(grid) + @assert dim>0 + points = collect(range(grid.limit_lower[dim],stop=grid.limit_upper[dim],length=grid.size[dim])) +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Grids/Grids.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,17 @@ +module Grids + +using Sbplib.RegionIndices + +export BoundaryIdentifier, CartesianBoundary + +abstract type BoundaryIdentifier end +struct CartesianBoundary{Dim, R<:Region} <: BoundaryIdentifier end +dim(::CartesianBoundary{Dim, R}) where {Dim, R} = Dim +region(::CartesianBoundary{Dim, R}) where {Dim, R} = R + +export dim, region + +include("AbstractGrid.jl") +include("EquidistantGrid.jl") + +end # module
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/LazyTensors/LazyTensors.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,7 @@ +module LazyTensors +using Sbplib.RegionIndices +include("tensor_mapping.jl") +include("lazy_array.jl") +include("lazy_tensor_operations.jl") + +end # module
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/LazyTensors/lazy_array.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,93 @@ +""" + LazyArray{T,D} <: AbstractArray{T,D} + +Array which is calcualted lazily when indexing. + +A subtype of `LazyArray` will use lazy version of `+`, `-`, `*`, `/`. +""" +abstract type LazyArray{T,D} <: AbstractArray{T,D} end +export LazyArray + +struct LazyConstantArray{T,D} <: LazyArray{T,D} + val::T + size::NTuple{D,Int} +end + +Base.size(lca::LazyConstantArray) = lca.size +Base.getindex(lca::LazyConstantArray{T,D}, I::Vararg{Int,D}) where {T,D} = lca.val + +""" + LazyElementwiseOperation{T,D,Op} <: LazyArray{T,D} +Struct allowing for lazy evaluation of elementwise operations on AbstractArrays. + +A LazyElementwiseOperation contains two arrays together with an operation. +The operations are carried out when the LazyElementwiseOperation is indexed. +""" +struct LazyElementwiseOperation{T,D,Op} <: LazyArray{T,D} + a::AbstractArray{T,D} + b::AbstractArray{T,D} + + function LazyElementwiseOperation{T,D,Op}(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D,Op} + @boundscheck if size(a) != size(b) + throw(DimensionMismatch("dimensions must match")) + end + return new{T,D,Op}(a,b) + end + + LazyElementwiseOperation{T,D,Op}(a::AbstractArray{T,D},b::T) where {T,D,Op} = new{T,D,Op}(a, LazyConstantArray(b, size(a))) + LazyElementwiseOperation{T,D,Op}(a::T,b::AbstractArray{T,D}) where {T,D,Op} = new{T,D,Op}(LazyConstantArray(a, size(b)), b) +end +# TODO: Move Op to be the first parameter? Compare to Binary operations + +Base.size(v::LazyElementwiseOperation) = size(v.a) + +evaluate(leo::LazyElementwiseOperation{T,D,:+}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] + leo.b[I...] +evaluate(leo::LazyElementwiseOperation{T,D,:-}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] - leo.b[I...] +evaluate(leo::LazyElementwiseOperation{T,D,:*}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] * leo.b[I...] +evaluate(leo::LazyElementwiseOperation{T,D,:/}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] / leo.b[I...] + +# TODO: Make sure boundschecking is done properly and that the lenght of the vectors are equal +# NOTE: Boundschecking in getindex functions now assumes that the size of the +# vectors in the LazyElementwiseOperation are the same size. If we remove the +# size assertion in the constructor we might have to handle +# boundschecking differently. +Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D}, I::Vararg{Int,D}) where {T,D} + @boundscheck if !checkbounds(Bool, leo.a, I...) + throw(BoundsError([leo], I...)) + end + return evaluate(leo, I...) +end + +# Define lazy operations for AbstractArrays. Operations constructs a LazyElementwiseOperation which +# can later be indexed into. Lazy operations are denoted by the usual operator followed by a tilde +Base.@propagate_inbounds +̃(a::AbstractArray{T,D}, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:+}(a,b) +Base.@propagate_inbounds -̃(a::AbstractArray{T,D}, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:-}(a,b) +Base.@propagate_inbounds *̃(a::AbstractArray{T,D}, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:*}(a,b) +Base.@propagate_inbounds /̃(a::AbstractArray{T,D}, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:/}(a,b) + +Base.@propagate_inbounds +̃(a::AbstractArray{T,D}, b::T) where {T,D} = LazyElementwiseOperation{T,D,:+}(a,b) +Base.@propagate_inbounds -̃(a::AbstractArray{T,D}, b::T) where {T,D} = LazyElementwiseOperation{T,D,:-}(a,b) +Base.@propagate_inbounds *̃(a::AbstractArray{T,D}, b::T) where {T,D} = LazyElementwiseOperation{T,D,:*}(a,b) +Base.@propagate_inbounds /̃(a::AbstractArray{T,D}, b::T) where {T,D} = LazyElementwiseOperation{T,D,:/}(a,b) + +Base.@propagate_inbounds +̃(a::T, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:+}(a,b) +Base.@propagate_inbounds -̃(a::T, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:-}(a,b) +Base.@propagate_inbounds *̃(a::T, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:*}(a,b) +Base.@propagate_inbounds /̃(a::T, b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:/}(a,b) + + + +# NOTE: Är det knas att vi har till exempel * istället för .* ?? +# Oklart om det ens går att lösa.. +Base.@propagate_inbounds Base.:+(a::LazyArray{T,D}, b::LazyArray{T,D}) where {T,D} = a +̃ b +Base.@propagate_inbounds Base.:+(a::LazyArray{T,D}, b::AbstractArray{T,D}) where {T,D} = a +̃ b +Base.@propagate_inbounds Base.:+(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = a +̃ b + +Base.@propagate_inbounds Base.:-(a::LazyArray{T,D}, b::LazyArray{T,D}) where {T,D} = a -̃ b +Base.@propagate_inbounds Base.:-(a::LazyArray{T,D}, b::AbstractArray{T,D}) where {T,D} = a -̃ b +Base.@propagate_inbounds Base.:-(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = a -̃ b + +# Element wise operation for `*` and `\` are not overloaded due to conflicts with the behavior +# of regular `*` and `/` for AbstractArrays. Use tilde versions instead. + +export +̃, -̃, *̃, /̃
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/LazyTensors/lazy_tensor_operations.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,101 @@ +""" + LazyTensorMappingApplication{T,R,D} <: LazyArray{T,R} + +Struct for lazy application of a TensorMapping. Created using `*`. + +Allows the result of a `TensorMapping` applied to a vector to be treated as an `AbstractArray`. +With a mapping `m` and a vector `v` the LazyTensorMappingApplication object can be created by `m*v`. +The actual result will be calcualted when indexing into `m*v`. +""" +struct LazyTensorMappingApplication{T,R,D} <: LazyArray{T,R} + t::TensorMapping{T,R,D} + o::AbstractArray{T,D} +end +export LazyTensorMappingApplication + +Base.:*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) +Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Index,R}) where {T,R,D} = apply(ta.t, ta.o, I...) +Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, Index{Unknown}.(I)...) +Base.size(ta::LazyTensorMappingApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o)) +# TODO: What else is needed to implement the AbstractArray interface? + +# # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' +Base.:*(a::TensorMapping{T,R,D}, b::TensorMapping{T,D,K}, args::Union{TensorMapping{T}, AbstractArray{T}}...) where {T,R,D,K} = foldr(*,(a,b,args...)) +# # Should we overload some other infix binary opesrator? +# →(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) +# TODO: We need to be really careful about good error messages. +# For example what happens if you try to multiply LazyTensorMappingApplication with a TensorMapping(wrong order)? + +""" + LazyTensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R} + +Struct for lazy transpose of a TensorMapping. + +If a mapping implements the the `apply_transpose` method this allows working with +the transpose of mapping `m` by using `m'`. `m'` will work as a regular TensorMapping lazily calling +the appropriate methods of `m`. +""" +struct LazyTensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R} + tm::TensorMapping{T,R,D} +end +export LazyTensorMappingTranspose + +# # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors? +Base.adjoint(tm::TensorMapping) = LazyTensorMappingTranspose(tm) +Base.adjoint(tmt::LazyTensorMappingTranspose) = tmt.tm + +apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) +apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmt.tm, v, I...) + +range_size(tmt::LazyTensorMappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, d_size) +domain_size(tmt::LazyTensorMappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, r_size) + + + + +struct LazyTensorMappingBinaryOperation{Op,T,R,D,T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} <: TensorMapping{T,D,R} + tm1::T1 + tm2::T2 + + @inline function LazyTensorMappingBinaryOperation{Op,T,R,D}(tm1::T1,tm2::T2) where {Op,T,R,D, T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} + return new{Op,T,R,D,T1,T2}(tm1,tm2) + end +end + +apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) +apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) + +range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, domain_size::NTuple{D,Integer}) where {Op,T,R,D} = range_size(tmBinOp.tm1, domain_size) +domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, range_size::NTuple{R,Integer}) where {Op,T,R,D} = domain_size(tmBinOp.tm2, range_size) + +Base.:+(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(tm1,tm2) +Base.:-(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:-,T,R,D}(tm1,tm2) + + +# TODO: Write tests and documentation for LazyTensorMappingComposition +# struct LazyTensorMappingComposition{T,R,K,D} <: TensorMapping{T,R,D} +# t1::TensorMapping{T,R,K} +# t2::TensorMapping{T,K,D} +# end + +# Base.:∘(s::TensorMapping{T,R,K}, t::TensorMapping{T,K,D}) where {T,R,K,D} = LazyTensorMappingComposition(s,t) + +# function range_size(tm::LazyTensorMappingComposition{T,R,K,D}, domain_size::NTuple{D,Integer}) where {T,R,K,D} +# range_size(tm.t1, domain_size(tm.t2, domain_size)) +# end + +# function domain_size(tm::LazyTensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,K,D} +# domain_size(tm.t1, domain_size(tm.t2, range_size)) +# end + +# function apply(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::NTuple{R,Int}) where {T,R,K,D} +# apply(c.t1, LazyTensorMappingApplication(c.t2,v), I...) +# end + +# function apply_transpose(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::NTuple{D,Int}) where {T,R,K,D} +# apply_transpose(c.t2, LazyTensorMappingApplication(c.t1',v), I...) +# end + +# # Have i gone too crazy with the type parameters? Maybe they aren't all needed? + +# export →
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/LazyTensors/tensor_mapping.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,83 @@ +""" + TensorMapping{T,R,D} + +Describes a mapping of a D dimension tensor to an R dimension tensor. +The action of the mapping is implemented through the method + + apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} + +The size of range tensor should be dependent on the size of the domain tensor +and the type should implement the methods + + range_size(::TensorMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} + domain_size(::TensorMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} + +to allow querying for one or the other. + +Optionally the action of the transpose may be defined through + apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} +""" +abstract type TensorMapping{T,R,D} end +export TensorMapping + +""" + apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} + +Return the result of the mapping for a given index. +""" +function apply end +export apply + +""" + apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {R,D,T} + +Return the result of the transposed mapping for a given index. +""" +function apply_transpose end +export apply_transpose + +""" +Return the dimension of the range space of a given mapping +""" +range_dim(::TensorMapping{T,R,D}) where {T,R,D} = R + +""" +Return the dimension of the domain space of a given mapping +""" +domain_dim(::TensorMapping{T,R,D}) where {T,R,D} = D + +export range_dim, domain_dim + +""" + range_size(M::TensorMapping, domain_size) + +Return the resulting range size for the mapping applied to a given domain_size +""" +function range_size end + +""" + domain_size(M::TensorMapping, range_size) + +Return the resulting domain size for the mapping applied to a given range_size +""" +function domain_size end + +""" + Dummy type for representing dimensions of tensormappings when domain_size is unknown +""" +struct UnknownDim end +export range_size, domain_size, TensorMappingDim, UnknownDim + +# TODO: Think about boundschecking! + + +""" + TensorOperator{T,D} + +A `TensorMapping{T,D,D}` where the range and domain tensor have the same number of +dimensions and the same size. +""" +abstract type TensorOperator{T,D} <: TensorMapping{T,D,D} end +export TensorOperator +domain_size(::TensorOperator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size +range_size(::TensorOperator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/RegionIndices/RegionIndices.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,85 @@ +module RegionIndices + +abstract type Region end +struct Interior <: Region end +struct Lower <: Region end +struct Upper <: Region end +struct Unknown <: Region end + +export Region, Interior, Lower, Upper, Unknown + +struct Index{R<:Region, T<:Integer} + i::T + + Index{R,T}(i::T) where {R<:Region,T<:Integer} = new{R,T}(i) + Index{R}(i::T) where {R<:Region,T<:Integer} = new{R,T}(i) + Index(i::T, ::Type{R}) where {R<:Region,T<:Integer} = Index{R,T}(i) + Index(t::Tuple{T, DataType}) where {R<:Region,T<:Integer} = Index{t[2],T}(t[1]) # TBD: This is not very specific in what types are allowed in t[2]. Can this be fixed? +end + +export Index + +# Index(R::Type{<:Region}) = Index{R} + +## Vill kunna skriva +## IndexTupleType(Int, (Lower, Interior)) +Index(R::Type{<:Region}, T::Type{<:Integer}) = Index{R,T} +IndexTupleType(T::Type{<:Integer},R::NTuple{N, DataType} where N) = Tuple{Index.(R, T)...} + +Base.convert(::Type{T}, i::Index{R,T} where R) where T = i.i +Base.convert(::Type{CartesianIndex}, I::NTuple{N,Index} where N) = CartesianIndex(convert.(Int, I)) + +Base.Int(I::Index) = I.i +Base.to_index(I::Index) = Int(I) #How to get this to work for all cases?? +Base.getindex(A::AbstractArray{T,N}, I::NTuple{N,Index}) where {T,N} = A[I...] #Is this ok?? + +function Index(i::Integer, boundary_width::Integer, dim_size::Integer) + return Index{getregion(i,boundary_width,dim_size)}(i) +end + +IndexTuple(t::Vararg{Tuple{T, DataType}}) where T<:Integer = Index.(t) +export IndexTuple + +# TODO: Use the values of the region structs, e.g. Lower(), for the region parameter instead of the types. +# For example the following works: +# (Lower(),Upper()) isa NTuple{2, Region} -> true +# typeof((Lower(),Upper())) -> Tuple{Lower,Upper} +function regionindices(gridsize::NTuple{Dim,Integer}, closuresize::Integer, region::NTuple{Dim,DataType}) where Dim + return regionindices(gridsize, ntuple(x->closuresize,Dim), region) +end + +function regionindices(gridsize::NTuple{Dim,Integer}, closuresize::NTuple{Dim,Integer}, region::NTuple{Dim,DataType}) where Dim + regions = map(getrange,gridsize,closuresize,region) + return CartesianIndices(regions) +end + +export regionindices + +function getregion(i::Integer, boundary_width::Integer, dim_size::Integer) + if 0 < i <= boundary_width + return Lower + elseif boundary_width < i <= dim_size-boundary_width + return Interior + elseif dim_size-boundary_width < i <= dim_size + return Upper + else + error("Bounds error") # TODO: Make this more standard + end +end + +export getregion + +function getrange(gridsize::Integer, closuresize::Integer, region::DataType) + if region == Lower + r = 1:closuresize + elseif region == Interior + r = (closuresize+1):(gridsize - closuresize) + elseif region == Upper + r = (gridsize - closuresize + 1):gridsize + else + error("Unspecified region") + end + return r +end + +end # module
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/BoundaryValue.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,127 @@ +""" + BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} + +Implements the boundary operator `e` as a TensorMapping +""" +struct BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} + eClosure::Stencil{T,M} + bId::CartesianBoundary +end +export BoundaryValue + +# TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? +# Can we give special treatment to TensorMappings that go to a higher dim? +function LazyTensors.range_size(e::BoundaryValue{T}, domain_size::NTuple{1,Integer}) where T + if dim(e.bId) == 1 + return (UnknownDim, domain_size[1]) + elseif dim(e.bId) == 2 + return (domain_size[1], UnknownDim) + end +end +LazyTensors.domain_size(e::BoundaryValue{T}, range_size::NTuple{2,Integer}) where T = (range_size[3-dim(e.bId)],) +# TODO: Make a nicer solution for 3-dim(e.bId) + +# TODO: Make this independent of dimension +function LazyTensors.apply(e::BoundaryValue{T}, v::AbstractArray{T}, I::NTuple{2,Index}) where T + i = I[dim(e.bId)] + j = I[3-dim(e.bId)] + N_i = size(e.grid)[dim(e.bId)] + return apply_boundary_value(e.op, v[j], i, N_i, region(e.bId)) +end + +function LazyTensors.apply_transpose(e::BoundaryValue{T}, v::AbstractArray{T}, I::NTuple{1,Index}) where T + u = selectdim(v,3-dim(e.bId),Int(I[1])) + return apply_boundary_value_transpose(e.op, u, region(e.bId)) +end + +function apply_boundary_value_transpose(op::ConstantStencilOperator, v::AbstractVector, ::Type{Lower}) + @boundscheck if length(v) < closuresize(op) + throw(BoundsError()) + end + apply_stencil(op.eClosure,v,1) +end + +function apply_boundary_value_transpose(op::ConstantStencilOperator, v::AbstractVector, ::Type{Upper}) + @boundscheck if length(v) < closuresize(op) + throw(BoundsError()) + end + apply_stencil_backwards(op.eClosure,v,length(v)) +end +export apply_boundary_value_transpose + +function apply_boundary_value(op::ConstantStencilOperator, v::Number, i::Index, N::Integer, ::Type{Lower}) + @boundscheck if !(0<length(Int(i)) <= N) + throw(BoundsError()) + end + op.eClosure[Int(i)-1]*v +end + +function apply_boundary_value(op::ConstantStencilOperator, v::Number, i::Index, N::Integer, ::Type{Upper}) + @boundscheck if !(0<length(Int(i)) <= N) + throw(BoundsError()) + end + op.eClosure[N-Int(i)]*v +end +export apply_boundary_value + + +""" + BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} + +Implements the boundary operator `e` as a TensorMapping +""" +struct BoundaryValue{D,T,M,R} <: TensorMapping{T,D,1} + e:BoundaryOperator{T,M,R} + bId::CartesianBoundary +end + +function LazyTensors.apply_transpose(bv::BoundaryValue{T,M,Lower}, v::AbstractVector{T}, i::Index) where T + u = selectdim(v,3-dim(bv.bId),Int(I[1])) + return apply_transpose(bv.e, u, I) +end + + +""" + BoundaryOperator{T,N,R} <: TensorMapping{T,1,1} + +Implements the boundary operator `e` as a TensorMapping +""" +export BoundaryOperator +struct BoundaryOperator{T,M,R<:Region} <: TensorMapping{T,1,1} + closure::Stencil{T,M} +end + +function LazyTensors.range_size(e::BoundaryOperator, domain_size::NTuple{1,Integer}) + return UnknownDim +end + +LazyTensors.domain_size(e::BoundaryOperator{T}, range_size::NTuple{1,Integer}) where T = range_size + +function LazyTensors.apply_transpose(e::BoundaryOperator{T,M,Lower}, v::AbstractVector{T}, i::Index{Lower}) where T + @boundscheck if length(v) < closuresize(e) #TODO: Use domain_size here? + throw(BoundsError()) + end + apply_stencil(e.closure,v,Int(i)) +end + +function LazyTensors.apply_transpose(e::BoundaryOperator{T,M,Upper}}, v::AbstractVector{T}, i::Index{Upper}) where T + @boundscheck if length(v) < closuresize(e) #TODO: Use domain_size here? + throw(BoundsError()) + end + apply_stencil_backwards(e.closure,v,Int(i)) +end + +function LazyTensors.apply_transpose(e::BoundaryOperator{T}, v::AbstractVector{T}, i::Index) where T + @boundscheck if length(v) < closuresize(e) #TODO: Use domain_size here? + throw(BoundsError()) + end + return eltype(v)(0) +end + +#TODO: Implement apply in a meaningful way. Should it return a vector or a single value (perferable?) Should fit into the +function LazyTensors.apply(e::BoundaryOperator, v::AbstractVector, i::Index) + @boundscheck if !(0<length(Int(i)) <= length(v)) + throw(BoundsError()) + end + return e.closure[Int(i)].*v +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/SbpOperators.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,16 @@ +module SbpOperators + +using Sbplib.RegionIndices +using Sbplib.LazyTensors + +include("stencil.jl") +include("constantstenciloperator.jl") +include("d2.jl") +include("readoperator.jl") +include("laplace/secondderivative.jl") +include("laplace/laplace.jl") +include("quadrature/diagonal_inner_product.jl") +include("quadrature/quadrature.jl") +include("quadrature/inverse_diagonal_inner_product.jl") +include("quadrature/inverse_quadrature.jl") +end # module
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/constantstenciloperator.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,79 @@ +abstract type ConstantStencilOperator end + +# Apply for different regions Lower/Interior/Upper or Unknown region +@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, i::Index{Lower}) + return @inbounds h_inv*h_inv*apply_stencil(op.closureStencils[Int(i)], v, Int(i)) +end + +@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, i::Index{Interior}) + return @inbounds h_inv*h_inv*apply_stencil(op.innerStencil, v, Int(i)) +end + +@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, i::Index{Upper}) + N = length(v) + return @inbounds h_inv*h_inv*Int(op.parity)*apply_stencil_backwards(op.closureStencils[N-Int(i)+1], v, Int(i)) +end + +@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, index::Index{Unknown}) + N = length(v) + r = getregion(Int(index), closuresize(op), N) + i = Index(Int(index), r) + return apply_2nd_derivative(op, h_inv, v, i) +end +export apply_2nd_derivative + +apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, i::Index{Lower}, N::Integer) where T = v*h*op.quadratureClosure[Int(i)] +apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, i::Index{Upper}, N::Integer) where T = v*h*op.quadratureClosure[N-Int(i)+1] +apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, i::Index{Interior}, N::Integer) where T = v*h + +function apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, index::Index{Unknown}, N::Integer) where T + r = getregion(Int(index), closuresize(op), N) + i = Index(Int(index), r) + return apply_quadrature(op, h, v, i, N) +end +export apply_quadrature + +# TODO: Evaluate if divisions affect performance +apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, i::Index{Lower}, N::Integer) where T = h_inv*v/op.quadratureClosure[Int(i)] +apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, i::Index{Upper}, N::Integer) where T = h_inv*v/op.quadratureClosure[N-Int(i)+1] +apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, i::Index{Interior}, N::Integer) where T = v*h_inv + +function apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, index::Index{Unknown}, N::Integer) where T + r = getregion(Int(index), closuresize(op), N) + i = Index(Int(index), r) + return apply_inverse_quadrature(op, h_inv, v, i, N) +end + +export apply_inverse_quadrature + +function apply_normal_derivative_transpose(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, ::Type{Lower}) + @boundscheck if length(v) < closuresize(op) + throw(BoundsError()) + end + h_inv*apply_stencil(op.dClosure,v,1) +end + +function apply_normal_derivative_transpose(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, ::Type{Upper}) + @boundscheck if length(v) < closuresize(op) + throw(BoundsError()) + end + -h_inv*apply_stencil_backwards(op.dClosure,v,length(v)) +end + +export apply_normal_derivative_transpose + +function apply_normal_derivative(op::ConstantStencilOperator, h_inv::Real, v::Number, i::Index, N::Integer, ::Type{Lower}) + @boundscheck if !(0<length(Int(i)) <= N) + throw(BoundsError()) + end + h_inv*op.dClosure[Int(i)-1]*v +end + +function apply_normal_derivative(op::ConstantStencilOperator, h_inv::Real, v::Number, i::Index, N::Integer, ::Type{Upper}) + @boundscheck if !(0<length(Int(i)) <= N) + throw(BoundsError()) + end + -h_inv*op.dClosure[N-Int(i)]*v +end + +export apply_normal_derivative
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/d2.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,19 @@ +export D2, closuresize, readOperator + +@enum Parity begin + odd = -1 + even = 1 +end + +struct D2{T,N,M,K} <: ConstantStencilOperator + quadratureClosure::NTuple{M,T} + innerStencil::Stencil{T,N} + closureStencils::NTuple{M,Stencil{T,K}} + eClosure::Stencil{T,M} + dClosure::Stencil{T,M} + parity::Parity +end + +function closuresize(D::D2)::Int + return length(D.quadratureClosure) +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/laplace/laplace.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,138 @@ +export Laplace +""" + Laplace{Dim,T<:Real,N,M,K} <: TensorOperator{T,Dim} + +Implements the Laplace operator `L` in Dim dimensions as a tensor operator +The multi-dimensional tensor operator consists of a tuple of 1D SecondDerivative +tensor operators. +""" +#export quadrature, inverse_quadrature, boundary_quadrature, boundary_value, normal_derivative +struct Laplace{Dim,T,N,M,K} <: TensorOperator{T,Dim} + D2::NTuple{Dim,SecondDerivative{T,N,M,K}} + #TODO: Write a good constructor +end + +LazyTensors.domain_size(L::Laplace{Dim}, range_size::NTuple{Dim,Integer}) where {Dim} = range_size + +function LazyTensors.apply(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} + error("not implemented") +end + +# u = L*v +function LazyTensors.apply(L::Laplace{1,T}, v::AbstractVector{T}, I::Index) where T + @inbounds u = LazyTensors.apply(L.D2[1],v,I) + return u +end + +function LazyTensors.apply(L::Laplace{2,T}, v::AbstractArray{T,2}, I::Index, J::Index) where T + # 2nd x-derivative + @inbounds vx = view(v, :, Int(J)) + @inbounds uᵢ = LazyTensors.apply(L.D2[1], vx , I) + + # 2nd y-derivative + @inbounds vy = view(v, Int(I), :) + @inbounds uᵢ += LazyTensors.apply(L.D2[2], vy , J) + + return uᵢ +end + +LazyTensors.apply_transpose(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} = LazyTensors.apply(L, v, I...) + +# quadrature(L::Laplace) = Quadrature(L.op, L.grid) +# inverse_quadrature(L::Laplace) = InverseQuadrature(L.op, L.grid) +# boundary_value(L::Laplace, bId::CartesianBoundary) = BoundaryValue(L.op, L.grid, bId) +# normal_derivative(L::Laplace, bId::CartesianBoundary) = NormalDerivative(L.op, L.grid, bId) +# boundary_quadrature(L::Laplace, bId::CartesianBoundary) = BoundaryQuadrature(L.op, L.grid, bId) +# export NormalDerivative +# """ +# NormalDerivative{T,N,M,K} <: TensorMapping{T,2,1} +# +# Implements the boundary operator `d` as a TensorMapping +# """ +# struct NormalDerivative{T,N,M,K} <: TensorMapping{T,2,1} +# op::D2{T,N,M,K} +# grid::EquidistantGrid{2} +# bId::CartesianBoundary +# end +# +# # TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? +# # Can we give special treatment to TensorMappings that go to a higher dim? +# function LazyTensors.range_size(e::NormalDerivative, domain_size::NTuple{1,Integer}) +# if dim(e.bId) == 1 +# return (UnknownDim, domain_size[1]) +# elseif dim(e.bId) == 2 +# return (domain_size[1], UnknownDim) +# end +# end +# LazyTensors.domain_size(e::NormalDerivative, range_size::NTuple{2,Integer}) = (range_size[3-dim(e.bId)],) +# +# # TODO: Not type stable D:< +# # TODO: Make this independent of dimension +# function LazyTensors.apply(d::NormalDerivative{T}, v::AbstractArray{T}, I::NTuple{2,Index}) where T +# i = I[dim(d.bId)] +# j = I[3-dim(d.bId)] +# N_i = size(d.grid)[dim(d.bId)] +# h_inv = inverse_spacing(d.grid)[dim(d.bId)] +# return apply_normal_derivative(d.op, h_inv, v[j], i, N_i, region(d.bId)) +# end +# +# function LazyTensors.apply_transpose(d::NormalDerivative{T}, v::AbstractArray{T}, I::NTuple{1,Index}) where T +# u = selectdim(v,3-dim(d.bId),Int(I[1])) +# return apply_normal_derivative_transpose(d.op, inverse_spacing(d.grid)[dim(d.bId)], u, region(d.bId)) +# end +# +# """ +# BoundaryQuadrature{T,N,M,K} <: TensorOperator{T,1} +# +# Implements the boundary operator `q` as a TensorOperator +# """ +# export BoundaryQuadrature +# struct BoundaryQuadrature{T,N,M,K} <: TensorOperator{T,1} +# op::D2{T,N,M,K} +# grid::EquidistantGrid{2} +# bId::CartesianBoundary +# end +# +# +# # TODO: Make this independent of dimension +# function LazyTensors.apply(q::BoundaryQuadrature{T}, v::AbstractArray{T,1}, I::NTuple{1,Index}) where T +# h = spacing(q.grid)[3-dim(q.bId)] +# N = size(v) +# return apply_quadrature(q.op, h, v[I[1]], I[1], N[1]) +# end +# +# LazyTensors.apply_transpose(q::BoundaryQuadrature{T}, v::AbstractArray{T,1}, I::NTuple{1,Index}) where T = LazyTensors.apply(q,v,I) +# +# +# +# +# struct Neumann{Bid<:BoundaryIdentifier} <: BoundaryCondition end +# +# function sat(L::Laplace{2,T}, bc::Neumann{Bid}, v::AbstractArray{T,2}, g::AbstractVector{T}, I::CartesianIndex{2}) where {T,Bid} +# e = boundary_value(L, Bid()) +# d = normal_derivative(L, Bid()) +# Hᵧ = boundary_quadrature(L, Bid()) +# H⁻¹ = inverse_quadrature(L) +# return (-H⁻¹*e*Hᵧ*(d'*v - g))[I] +# end +# +# struct Dirichlet{Bid<:BoundaryIdentifier} <: BoundaryCondition +# tau::Float64 +# end +# +# function sat(L::Laplace{2,T}, bc::Dirichlet{Bid}, v::AbstractArray{T,2}, g::AbstractVector{T}, i::CartesianIndex{2}) where {T,Bid} +# e = boundary_value(L, Bid()) +# d = normal_derivative(L, Bid()) +# Hᵧ = boundary_quadrature(L, Bid()) +# H⁻¹ = inverse_quadrature(L) +# return (-H⁻¹*(tau/h*e + d)*Hᵧ*(e'*v - g))[I] +# # Need to handle scalar multiplication and addition of TensorMapping +# end + +# function apply(s::MyWaveEq{D}, v::AbstractArray{T,D}, i::CartesianIndex{D}) where D + # return apply(s.L, v, i) + +# sat(s.L, Dirichlet{CartesianBoundary{1,Lower}}(s.tau), v, s.g_w, i) + +# sat(s.L, Dirichlet{CartesianBoundary{1,Upper}}(s.tau), v, s.g_e, i) + +# sat(s.L, Dirichlet{CartesianBoundary{2,Lower}}(s.tau), v, s.g_s, i) + +# sat(s.L, Dirichlet{CartesianBoundary{2,Upper}}(s.tau), v, s.g_n, i) +# end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/laplace/secondderivative.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,45 @@ +""" + SecondDerivative{T<:Real,N,M,K} <: TensorOperator{T,1} +Implements the Laplace tensor operator `L` with constant grid spacing and coefficients +in 1D dimension +""" + +struct SecondDerivative{T,N,M,K} <: TensorOperator{T,1} + h_inv::T # The grid spacing could be included in the stencil already. Preferable? + innerStencil::Stencil{T,N} + closureStencils::NTuple{M,Stencil{T,K}} + parity::Parity + #TODO: Write a nice constructor +end +export SecondDerivative + +LazyTensors.domain_size(D2::SecondDerivative, range_size::NTuple{1,Integer}) = range_size + +#TODO: The 1D tensor mappings should not have to dispatch on 1D tuples if we write LazyTensor.apply for vararg right?!?! +# Currently have to index the Tuple{Index} in each method in order to call the stencil methods which is ugly. +# I thought I::Vararg{Index,R} fell back to just Index for R = 1 + +# Apply for different regions Lower/Interior/Upper or Unknown region +function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Lower}) where T + return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.closureStencils[Int(I)], v, Int(I)) +end + +function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Interior}) where T + return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.innerStencil, v, Int(I)) +end + +function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Upper}) where T + N = length(v) # TODO: Use domain_size here instead? N = domain_size(D2,size(v)) + return @inbounds D2.h_inv*D2.h_inv*Int(D2.parity)*apply_stencil_backwards(D2.closureStencils[N-Int(I)+1], v, Int(I)) +end + +function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, index::Index{Unknown}) where T + N = length(v) # TODO: Use domain_size here instead? + r = getregion(Int(index), closuresize(D2), N) + I = Index(Int(index), r) + return LazyTensors.apply(D2, v, I) +end + +LazyTensors.apply_transpose(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index) where {T} = LazyTensors.apply(D2, v, I) + +closuresize(D2::SecondDerivative{T,N,M,K}) where {T<:Real,N,M,K} = M
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/operators/d2_2nd.txt Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,13 @@ +# D2 order 2 + +boundary_stencils +1 -2 1 + +inner_stencil +1 -2 1 + +e +1 + +d1 +-3/2 2 -1/2
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/operators/d2_4th.txt Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,16 @@ +# D2 order 4 + +boundary_stencils + 2 -5 4 -1 0 0 + 1 -2 1 0 0 0 + -4/43 59/43 -110/43 59/43 -4/43 0 + -1/49 0 59/49 -118/49 64/49 -4/49 + +inner_stencil +-1/12 4/3 -5/2 4/3 -1/12 + +e +1 + +d1 +-11/6 3 -3/2 1/3 \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/operators/h_2nd.txt Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,7 @@ +# H order 2 + +closure +1/2 + +inner_stencil +1
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/operators/h_4th.txt Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,7 @@ +# H order 4 + +closure +17/48 59/48 43/48 49/48 + +inner_stencil +1 \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/quadrature/diagonal_inner_product.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,41 @@ +export DiagonalInnerProduct, closuresize +""" + DiagonalInnerProduct{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim} + +Implements the diagnoal norm operator `H` of Dim dimension as a TensorMapping +""" +struct DiagonalInnerProduct{T,M} <: TensorOperator{T,1} + h::T # The grid spacing could be included in the stencil already. Preferable? + closure::NTuple{M,T} + #TODO: Write a nice constructor +end + +LazyTensors.domain_size(H::DiagonalInnerProduct, range_size::NTuple{1,Integer}) = range_size + +function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index) where T + return @inbounds apply(H, v, I) +end + +function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Lower}) where T + return @inbounds H.h*H.closure[Int(I)]*v[Int(I)] +end + +function LazyTensors.apply(H::DiagonalInnerProduct{T},v::AbstractVector{T}, I::Index{Upper}) where T + N = length(v); + return @inbounds H.h*H.closure[N-Int(I)+1]v[Int(I)] +end + +function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Interior}) where T + return @inbounds H.h*v[Int(I)] +end + +function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, index::Index{Unknown}) where T + N = length(v); + r = getregion(Int(index), closuresize(H), N) + i = Index(Int(index), r) + return LazyTensors.apply(H, v, i) +end + +LazyTensors.apply_transpose(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index) where T = LazyTensors.apply(H,v,I) + +closuresize(H::DiagonalInnerProduct{T,M}) where {T,M} = M
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/quadrature/inverse_diagonal_inner_product.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,36 @@ +export InverseDiagonalInnerProduct, closuresize +""" + InverseDiagonalInnerProduct{Dim,T<:Real,M} <: TensorOperator{T,1} + +Implements the inverse diagonal inner product operator `Hi` of as a 1D TensorOperator +""" +struct InverseDiagonalInnerProduct{T<:Real,M} <: TensorOperator{T,1} + h_inv::T # The reciprocl grid spacing could be included in the stencil already. Preferable? + closure::NTuple{M,T} + #TODO: Write a nice constructor +end + +function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Lower}) where T + return @inbounds Hi.h_inv*Hi.closure[Int(I)]*v[Int(I)] +end + +function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Upper}) where T + N = length(v); + return @inbounds Hi.h_inv*Hi.closure[N-Int(I)+1]v[Int(I)] +end + +function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Interior}) where T + return @inbounds Hi.h_inv*v[Int(I)] +end + +function LazyTensors.apply(Hi::InverseDiagonalInnerProduct, v::AbstractVector{T}, index::Index{Unknown}) where T + N = length(v); + r = getregion(Int(index), closuresize(Hi), N) + i = Index(Int(index), r) + return LazyTensors.apply(Hi, v, i) +end + +LazyTensors.apply_transpose(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index) where T = LazyTensors.apply(Hi,v,I) + + +closuresize(Hi::InverseDiagonalInnerProduct{T,M}) where {T,M} = M
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/quadrature/inverse_quadrature.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,34 @@ +export InverseQuadrature +""" + InverseQuadrature{Dim,T<:Real,M,K} <: TensorMapping{T,Dim,Dim} + +Implements the inverse quadrature operator `Qi` of Dim dimension as a TensorOperator +The multi-dimensional tensor operator consists of a tuple of 1D InverseDiagonalInnerProduct +tensor operators. +""" +struct InverseQuadrature{Dim,T<:Real,M} <: TensorOperator{T,Dim} + Hi::NTuple{Dim,InverseDiagonalInnerProduct{T,M}} +end + +LazyTensors.domain_size(Qi::InverseQuadrature{Dim}, range_size::NTuple{Dim,Integer}) where Dim = range_size + +function LazyTensors.apply(Qi::InverseQuadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} + error("not implemented") +end + +@inline function LazyTensors.apply(Qi::InverseQuadrature{1,T}, v::AbstractVector{T}, I::Index) where T + @inbounds q = apply(Qi.Hi[1], v , I) + return q +end + +@inline function LazyTensors.apply(Qi::InverseQuadrature{2,T}, v::AbstractArray{T,2}, I::Index, J::Index) where T + # InverseQuadrature in x direction + @inbounds vx = view(v, :, Int(J)) + @inbounds qx_inv = apply(Qi.Hi[1], vx , I) + # InverseQuadrature in y-direction + @inbounds vy = view(v, Int(I), :) + @inbounds qy_inv = apply(Qi.Hi[2], vy, J) + return qx_inv*qy_inv +end + +LazyTensors.apply_transpose(Qi::InverseQuadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {Dim,T} = LazyTensors.apply(Qi,v,I...)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/quadrature/quadrature.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,34 @@ +export Quadrature +""" + Quadrature{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim} + +Implements the quadrature operator `Q` of Dim dimension as a TensorMapping +The multi-dimensional tensor operator consists of a tuple of 1D DiagonalInnerProduct H +tensor operators. +""" +struct Quadrature{Dim,T<:Real,M} <: TensorOperator{T,Dim} + H::NTuple{Dim,DiagonalInnerProduct{T,M}} +end + +LazyTensors.domain_size(Q::Quadrature{Dim}, range_size::NTuple{Dim,Integer}) where {Dim} = range_size + +function LazyTensors.apply(Q::Quadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} + error("not implemented") +end + +function LazyTensors.apply(Q::Quadrature{1,T}, v::AbstractVector{T}, I::Index) where T + @inbounds q = apply(Q.H[1], v , I) + return q +end + +function LazyTensors.apply(Q::Quadrature{2,T}, v::AbstractArray{T,2}, I::Index, J::Index) where T + # Quadrature in x direction + @inbounds vx = view(v, :, Int(J)) + @inbounds qx = apply(Q.H[1], vx , I) + # Quadrature in y-direction + @inbounds vy = view(v, Int(I), :) + @inbounds qy = apply(Q.H[2], vy, J) + return qx*qy +end + +LazyTensors.apply_transpose(Q::Quadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {Dim,T} = LazyTensors.apply(Q,v,I...)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/readoperator.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,80 @@ +function readOperator(D2fn, Hfn) + d = readSectionedFile(D2fn) + h = readSectionedFile(Hfn) + + # Create inner stencil + innerStencilWeights = stringToTuple(Float64, d["inner_stencil"][1]) + width = length(innerStencilWeights) + r = (-div(width,2), div(width,2)) + + innerStencil = Stencil(r, innerStencilWeights) + + # Create boundary stencils + boundarySize = length(d["boundary_stencils"]) + closureStencils = Vector{typeof(innerStencil)}() # TBD: is the the right way to get the correct type? + + for i ∈ 1:boundarySize + stencilWeights = stringToTuple(Float64, d["boundary_stencils"][i]) + width = length(stencilWeights) + r = (1-i,width-i) + closureStencils = (closureStencils..., Stencil(r, stencilWeights)) + end + + quadratureClosure = pad_tuple(stringToTuple(Float64, h["closure"][1]), boundarySize) + eClosure = Stencil((0,boundarySize-1), pad_tuple(stringToTuple(Float64, d["e"][1]), boundarySize)) + dClosure = Stencil((0,boundarySize-1), pad_tuple(stringToTuple(Float64, d["d1"][1]), boundarySize)) + + d2 = D2( + quadratureClosure, + innerStencil, + closureStencils, + eClosure, + dClosure, + even + ) + + return d2 +end + +function readSectionedFile(filename)::Dict{String, Vector{String}} + f = open(filename) + sections = Dict{String, Vector{String}}() + currentKey = "" + + for ln ∈ eachline(f) + if ln == "" || ln[1] == '#' # Skip comments and empty lines + continue + end + + if isletter(ln[1]) # Found start of new section + if ~haskey(sections, ln) + sections[ln] = Vector{String}() + end + currentKey = ln + continue + end + + push!(sections[currentKey], ln) + end + + return sections +end + +function stringToTuple(T::DataType, s::String) + return Tuple(stringToVector(T,s)) +end + +function stringToVector(T::DataType, s::String) + return T.(eval.(Meta.parse.(split(s)))) +end + +function pad_tuple(t::NTuple{N, T}, n::Integer) where {N,T} + if N >= n + return t + else + return pad_tuple((t..., zero(T)), n) + end +end + +sbp_operators_path() = (@__DIR__) * "/operators/" +export sbp_operators_path
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/stencil.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,38 @@ +struct Stencil{T<:Real,N} + range::Tuple{Int,Int} + weights::NTuple{N,T} + + function Stencil(range::Tuple{Int,Int},weights::NTuple{N,T}) where {T <: Real, N} + @assert range[2]-range[1]+1 == N + new{T,N}(range,weights) + end +end + +function flip(s::Stencil) + range = (-s.range[2], -s.range[1]) + return Stencil(range, reverse(s.weights)) +end + +# Provides index into the Stencil based on offset for the root element +@inline function Base.getindex(s::Stencil, i::Int) + @boundscheck if i < s.range[1] || s.range[2] < i + return eltype(s.weights)(0) + end + return s.weights[1 + i - s.range[1]] +end + +Base.@propagate_inbounds @inline function apply_stencil(s::Stencil{T,N}, v::AbstractVector, i::Int) where {T,N} + w = s.weights[1]*v[i + s.range[1]] + @simd for k ∈ 2:N + w += s.weights[k]*v[i + s.range[1] + k-1] + end + return w +end + +Base.@propagate_inbounds @inline function apply_stencil_backwards(s::Stencil{T,N}, v::AbstractVector, i::Int) where {T,N} + w = s.weights[N]*v[i - s.range[2]] + @simd for k ∈ N-1:-1:1 + w += s.weights[k]*v[i - s.range[1] - k + 1] + end + return w +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Sbplib.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,15 @@ +module Sbplib + +include("RegionIndices/RegionIndices.jl") +include("LazyTensors/LazyTensors.jl") +include("Grids/Grids.jl") +include("SbpOperators/SbpOperators.jl") +include("DiffOps/DiffOps.jl") + +export RegionIndices +export LazyTensors +export Grids +export SbpOperators +export DiffOps + +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/Manifest.toml Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,44 @@ +# This file is machine-generated - editing it directly is not advised + +[[Base64]] +uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" + +[[DeepDiffs]] +git-tree-sha1 = "9824894295b62a6a4ab6adf1c7bf337b3a9ca34c" +uuid = "ab62b9b5-e342-54a8-a765-a90f495de1a6" +version = "1.2.0" + +[[Distributed]] +deps = ["Random", "Serialization", "Sockets"] +uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" + +[[InteractiveUtils]] +deps = ["Markdown"] +uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" + +[[Logging]] +uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" + +[[Markdown]] +deps = ["Base64"] +uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" + +[[Random]] +deps = ["Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[Sockets]] +uuid = "6462fe0b-24de-5631-8697-dd941f90decc" + +[[Test]] +deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] +uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[TestSetExtensions]] +deps = ["DeepDiffs", "Distributed", "Test"] +git-tree-sha1 = "3a2919a78b04c29a1a57b05e1618e473162b15d0" +uuid = "98d24dd4-01ad-11ea-1b02-c9a08f80db04" +version = "2.0.0"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/Project.toml Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,3 @@ +[deps] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" +TestSetExtensions = "98d24dd4-01ad-11ea-1b02-c9a08f80db04"
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/runtests.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,6 @@ +using Test +using TestSetExtensions + +@testset ExtendedTestSet "All" begin + @includetests ARGS +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testDiffOps.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,273 @@ +using Test +using Sbplib.DiffOps +using Sbplib.Grids +using Sbplib.SbpOperators +using Sbplib.RegionIndices +using Sbplib.LazyTensors + +@testset "DiffOps" begin + +@testset "Laplace2D" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + Lx = 3.5 + Ly = 7.2 + g = EquidistantGrid((42,41), (0.0, 0.0), (Lx,Ly)) + L = Laplace(g, 1., op) + H = quadrature(L) + + f0(x::Float64,y::Float64) = 2. + f1(x::Float64,y::Float64) = x+y + f2(x::Float64,y::Float64) = 1/2*x^2 + 1/2*y^2 + f3(x::Float64,y::Float64) = 1/6*x^3 + 1/6*y^3 + f4(x::Float64,y::Float64) = 1/24*x^4 + 1/24*y^4 + f5(x::Float64,y::Float64) = sin(x) + cos(y) + f5ₓₓ(x::Float64,y::Float64) = -f5(x,y) + + v0 = evalOn(g,f0) + v1 = evalOn(g,f1) + v2 = evalOn(g,f2) + v3 = evalOn(g,f3) + v4 = evalOn(g,f4) + v5 = evalOn(g,f5) + v5ₓₓ = evalOn(g,f5ₓₓ) + + @test L isa TensorOperator{T,2} where T + @test L' isa TensorMapping{T,2,2} where T + + # TODO: Should perhaps set tolerance level for isapporx instead? + # Are these tolerance levels resonable or should tests be constructed + # differently? + equalitytol = 0.5*1e-10 + accuracytol = 0.5*1e-3 + # 4th order interior stencil, 2nd order boundary stencil, + # implies that L*v should be exact for v - monomial up to order 3. + # Exact differentiation is measured point-wise. For other grid functions + # the error is measured in the H-norm. + @test all(abs.(collect(L*v0)) .<= equalitytol) + @test all(abs.(collect(L*v1)) .<= equalitytol) + @test all(collect(L*v2) .≈ v0) # Seems to be more accurate + @test all(abs.((collect(L*v3) - v1)) .<= equalitytol) + e4 = collect(L*v4) - v2 + e5 = collect(L*v5) - v5ₓₓ + @test sum(collect(H*e4.^2)) <= accuracytol + @test sum(collect(H*e5.^2)) <= accuracytol +end + +@testset "Quadrature" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + Lx = 2.3 + Ly = 5.2 + g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) + H = Quadrature(op,g) + v = ones(Float64, size(g)) + + @test H isa TensorOperator{T,2} where T + @test H' isa TensorMapping{T,2,2} where T + @test sum(collect(H*v)) ≈ (Lx*Ly) + @test collect(H*v) == collect(H'*v) +end + +@testset "InverseQuadrature" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + Lx = 7.3 + Ly = 8.2 + g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) + H = Quadrature(op,g) + Hinv = InverseQuadrature(op,g) + v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) + + @test Hinv isa TensorOperator{T,2} where T + @test Hinv' isa TensorMapping{T,2,2} where T + @test collect(Hinv*H*v) ≈ v + @test collect(Hinv*v) == collect(Hinv'*v) +end + +@testset "BoundaryValue" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + g = EquidistantGrid((4,5), (0.0, 0.0), (1.0,1.0)) + + e_w = BoundaryValue(op, g, CartesianBoundary{1,Lower}()) + e_e = BoundaryValue(op, g, CartesianBoundary{1,Upper}()) + e_s = BoundaryValue(op, g, CartesianBoundary{2,Lower}()) + e_n = BoundaryValue(op, g, CartesianBoundary{2,Upper}()) + + v = zeros(Float64, 4, 5) + v[:,5] = [1, 2, 3,4] + v[:,4] = [1, 2, 3,4] + v[:,3] = [4, 5, 6, 7] + v[:,2] = [7, 8, 9, 10] + v[:,1] = [10, 11, 12, 13] + + @test e_w isa TensorMapping{T,2,1} where T + @test e_w' isa TensorMapping{T,1,2} where T + + @test domain_size(e_w, (3,2)) == (2,) + @test domain_size(e_e, (3,2)) == (2,) + @test domain_size(e_s, (3,2)) == (3,) + @test domain_size(e_n, (3,2)) == (3,) + + @test size(e_w'*v) == (5,) + @test size(e_e'*v) == (5,) + @test size(e_s'*v) == (4,) + @test size(e_n'*v) == (4,) + + @test collect(e_w'*v) == [10,7,4,1.0,1] + @test collect(e_e'*v) == [13,10,7,4,4.0] + @test collect(e_s'*v) == [10,11,12,13.0] + @test collect(e_n'*v) == [1,2,3,4.0] + + g_x = [1,2,3,4.0] + g_y = [5,4,3,2,1.0] + + G_w = zeros(Float64, (4,5)) + G_w[1,:] = g_y + + G_e = zeros(Float64, (4,5)) + G_e[4,:] = g_y + + G_s = zeros(Float64, (4,5)) + G_s[:,1] = g_x + + G_n = zeros(Float64, (4,5)) + G_n[:,5] = g_x + + @test size(e_w*g_y) == (UnknownDim,5) + @test size(e_e*g_y) == (UnknownDim,5) + @test size(e_s*g_x) == (4,UnknownDim) + @test size(e_n*g_x) == (4,UnknownDim) + + # These tests should be moved to where they are possible (i.e we know what the grid should be) + @test_broken collect(e_w*g_y) == G_w + @test_broken collect(e_e*g_y) == G_e + @test_broken collect(e_s*g_x) == G_s + @test_broken collect(e_n*g_x) == G_n +end + +@testset "NormalDerivative" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + g = EquidistantGrid((5,6), (0.0, 0.0), (4.0,5.0)) + + d_w = NormalDerivative(op, g, CartesianBoundary{1,Lower}()) + d_e = NormalDerivative(op, g, CartesianBoundary{1,Upper}()) + d_s = NormalDerivative(op, g, CartesianBoundary{2,Lower}()) + d_n = NormalDerivative(op, g, CartesianBoundary{2,Upper}()) + + + v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) + v∂x = evalOn(g, (x,y)-> 2*x + y) + v∂y = evalOn(g, (x,y)-> 2*(y-1) + x) + + @test d_w isa TensorMapping{T,2,1} where T + @test d_w' isa TensorMapping{T,1,2} where T + + @test domain_size(d_w, (3,2)) == (2,) + @test domain_size(d_e, (3,2)) == (2,) + @test domain_size(d_s, (3,2)) == (3,) + @test domain_size(d_n, (3,2)) == (3,) + + @test size(d_w'*v) == (6,) + @test size(d_e'*v) == (6,) + @test size(d_s'*v) == (5,) + @test size(d_n'*v) == (5,) + + @test collect(d_w'*v) ≈ v∂x[1,:] + @test collect(d_e'*v) ≈ v∂x[5,:] + @test collect(d_s'*v) ≈ v∂y[:,1] + @test collect(d_n'*v) ≈ v∂y[:,6] + + + d_x_l = zeros(Float64, 5) + d_x_u = zeros(Float64, 5) + for i ∈ eachindex(d_x_l) + d_x_l[i] = op.dClosure[i-1] + d_x_u[i] = -op.dClosure[length(d_x_u)-i] + end + + d_y_l = zeros(Float64, 6) + d_y_u = zeros(Float64, 6) + for i ∈ eachindex(d_y_l) + d_y_l[i] = op.dClosure[i-1] + d_y_u[i] = -op.dClosure[length(d_y_u)-i] + end + + function prod_matrix(x,y) + G = zeros(Float64, length(x), length(y)) + for I ∈ CartesianIndices(G) + G[I] = x[I[1]]*y[I[2]] + end + + return G + end + + g_x = [1,2,3,4.0,5] + g_y = [5,4,3,2,1.0,11] + + G_w = prod_matrix(d_x_l, g_y) + G_e = prod_matrix(d_x_u, g_y) + G_s = prod_matrix(g_x, d_y_l) + G_n = prod_matrix(g_x, d_y_u) + + + @test size(d_w*g_y) == (UnknownDim,6) + @test size(d_e*g_y) == (UnknownDim,6) + @test size(d_s*g_x) == (5,UnknownDim) + @test size(d_n*g_x) == (5,UnknownDim) + + # These tests should be moved to where they are possible (i.e we know what the grid should be) + @test_broken collect(d_w*g_y) ≈ G_w + @test_broken collect(d_e*g_y) ≈ G_e + @test_broken collect(d_s*g_x) ≈ G_s + @test_broken collect(d_n*g_x) ≈ G_n +end + +@testset "BoundaryQuadrature" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + g = EquidistantGrid((10,11), (0.0, 0.0), (1.0,1.0)) + + H_w = BoundaryQuadrature(op, g, CartesianBoundary{1,Lower}()) + H_e = BoundaryQuadrature(op, g, CartesianBoundary{1,Upper}()) + H_s = BoundaryQuadrature(op, g, CartesianBoundary{2,Lower}()) + H_n = BoundaryQuadrature(op, g, CartesianBoundary{2,Upper}()) + + v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) + + function get_quadrature(N) + qc = op.quadratureClosure + q = (qc..., ones(N-2*closuresize(op))..., reverse(qc)...) + @assert length(q) == N + return q + end + + v_w = v[1,:] + v_e = v[10,:] + v_s = v[:,1] + v_n = v[:,11] + + q_x = spacing(g)[1].*get_quadrature(10) + q_y = spacing(g)[2].*get_quadrature(11) + + @test H_w isa TensorOperator{T,1} where T + + @test domain_size(H_w, (3,)) == (3,) + @test domain_size(H_n, (3,)) == (3,) + + @test range_size(H_w, (3,)) == (3,) + @test range_size(H_n, (3,)) == (3,) + + @test size(H_w*v_w) == (11,) + @test size(H_e*v_e) == (11,) + @test size(H_s*v_s) == (10,) + @test size(H_n*v_n) == (10,) + + @test collect(H_w*v_w) ≈ q_y.*v_w + @test collect(H_e*v_e) ≈ q_y.*v_e + @test collect(H_s*v_s) ≈ q_x.*v_s + @test collect(H_n*v_n) ≈ q_x.*v_n + + @test collect(H_w'*v_w) == collect(H_w'*v_w) + @test collect(H_e'*v_e) == collect(H_e'*v_e) + @test collect(H_s'*v_s) == collect(H_s'*v_s) + @test collect(H_n'*v_n) == collect(H_n'*v_n) +end + +end \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testGrids.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,12 @@ +using Sbplib.Grids +using Test + +@testset "Grids" begin + +@testset "EquidistantGrid" begin + @test EquidistantGrid(4,0,1) isa EquidistantGrid + @test dimension(EquidistantGrid(4,0,1)) == 1 + @test EquidistantGrid(4,0,1) == EquidistantGrid((4,),(0,),(1,)) +end + +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testLazyTensors.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,195 @@ +using Test +using Sbplib.LazyTensors +using Sbplib.RegionIndices + +@testset "LazyTensors" begin + +@testset "Generic Mapping methods" begin + struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end + LazyTensors.apply(m::DummyMapping{T,R,D}, v, i::NTuple{R,Index{<:Region}}) where {T,R,D} = :apply + @test range_dim(DummyMapping{Int,2,3}()) == 2 + @test domain_dim(DummyMapping{Int,2,3}()) == 3 + @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),(Index{Unknown}(0),Index{Unknown}(0))) == :apply +end + +@testset "Generic Operator methods" begin + struct DummyOperator{T,D} <: TensorOperator{T,D} end + @test range_size(DummyOperator{Int,2}(), (3,5)) == (3,5) + @test domain_size(DummyOperator{Float64, 3}(), (3,3,1)) == (3,3,1) +end + +@testset "Mapping transpose" begin + struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end + + LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},R}) where {T,R,D} = :apply + LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},D}) where {T,R,D} = :apply_transpose + + LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = :range_size + LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} = :domain_size + + m = DummyMapping{Float64,2,3}() + I = Index{Unknown}(0) + @test m' isa TensorMapping{Float64, 3,2} + @test m'' == m + @test apply(m',zeros(Float64,(0,0)), I, I, I) == :apply_transpose + @test apply(m'',zeros(Float64,(0,0,0)), I, I) == :apply + @test apply_transpose(m', zeros(Float64,(0,0,0)), I, I) == :apply + + @test range_size(m', (0,0)) == :domain_size + @test domain_size(m', (0,0,0)) == :range_size +end + +@testset "TensorApplication" begin + struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end + + LazyTensors.apply(m::DummyMapping{T,R,D}, v, i::Vararg{Index{<:Region},R}) where {T,R,D} = (:apply,v,i) + LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = 2 .* domain_size + LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} = range_size.÷2 + + + m = DummyMapping{Int, 1, 1}() + v = [0,1,2] + @test m*v isa AbstractVector{Int} + @test size(m*v) == 2 .*size(v) + @test (m*v)[Index{Upper}(0)] == (:apply,v,(Index{Upper}(0),)) + @test (m*v)[0] == (:apply,v,(Index{Unknown}(0),)) + @test m*m*v isa AbstractVector{Int} + @test (m*m*v)[Index{Upper}(1)] == (:apply,m*v,(Index{Upper}(1),)) + @test (m*m*v)[1] == (:apply,m*v,(Index{Unknown}(1),)) + @test (m*m*v)[Index{Interior}(3)] == (:apply,m*v,(Index{Interior}(3),)) + @test (m*m*v)[3] == (:apply,m*v,(Index{Unknown}(3),)) + @test (m*m*v)[Index{Lower}(6)] == (:apply,m*v,(Index{Lower}(6),)) + @test (m*m*v)[6] == (:apply,m*v,(Index{Unknown}(6),)) + @test_broken BoundsError == (m*m*v)[0] + @test_broken BoundsError == (m*m*v)[7] + + m = DummyMapping{Int, 2, 1}() + @test_throws MethodError m*ones(Int,2,2) + @test_throws MethodError m*m*v + + m = DummyMapping{Float64, 2, 2}() + v = ones(3,3) + I = (Index{Lower}(1),Index{Interior}(2)); + @test size(m*v) == 2 .*size(v) + @test (m*v)[I] == (:apply,v,I) + + struct ScalingOperator{T,D} <: TensorOperator{T,D} + λ::T + end + + LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I] + + m = ScalingOperator{Int,1}(2) + v = [1,2,3] + @test m*v isa AbstractVector + @test m*v == [2,4,6] + + m = ScalingOperator{Int,2}(2) + v = [[1 2];[3 4]] + @test m*v == [[2 4];[6 8]] + I = (Index{Upper}(2),Index{Lower}(1)) + @test (m*v)[I] == 6 +end + +@testset "TensorMapping binary operations" begin + struct ScalarMapping{T,R,D} <: TensorMapping{T,R,D} + λ::T + end + + LazyTensors.apply(m::ScalarMapping{T,R,D}, v, I::Vararg{Index{<:Region}}) where {T,R,D} = m.λ*v[I...] + LazyTensors.range_size(m::ScalarMapping, domain_size) = domain_size + LazyTensors.domain_size(m::ScalarMapping, range_sizes) = range_sizes + + A = ScalarMapping{Float64,1,1}(2.0) + B = ScalarMapping{Float64,1,1}(3.0) + + v = [1.1,1.2,1.3] + for i ∈ eachindex(v) + @test ((A+B)*v)[i] == 2*v[i] + 3*v[i] + end + + for i ∈ eachindex(v) + @test ((A-B)*v)[i] == 2*v[i] - 3*v[i] + end + + @test range_size(A+B, (3,)) == range_size(A, (3,)) == range_size(B,(3,)) + @test domain_size(A+B, (3,)) == domain_size(A, (3,)) == domain_size(B,(3,)) +end + +@testset "LazyArray" begin + @testset "LazyConstantArray" begin + @test LazyTensors.LazyConstantArray(3,(3,2)) isa LazyArray{Int,2} + + lca = LazyTensors.LazyConstantArray(3.0,(3,2)) + @test eltype(lca) == Float64 + @test ndims(lca) == 2 + @test size(lca) == (3,2) + @test lca[2] == 3.0 + end + struct DummyArray{T,D, T1<:AbstractArray{T,D}} <: LazyArray{T,D} + data::T1 + end + Base.size(v::DummyArray) = size(v.data) + Base.getindex(v::DummyArray{T,D}, I::Vararg{Int,D}) where {T,D} = v.data[I...] + + # Test lazy operations + v1 = [1, 2.3, 4] + v2 = [1., 2, 3] + s = 3.4 + r_add_v = v1 .+ v2 + r_sub_v = v1 .- v2 + r_times_v = v1 .* v2 + r_div_v = v1 ./ v2 + r_add_s = v1 .+ s + r_sub_s = v1 .- s + r_times_s = v1 .* s + r_div_s = v1 ./ s + @test isa(v1 +̃ v2, LazyArray) + @test isa(v1 -̃ v2, LazyArray) + @test isa(v1 *̃ v2, LazyArray) + @test isa(v1 /̃ v2, LazyArray) + @test isa(v1 +̃ s, LazyArray) + @test isa(v1 -̃ s, LazyArray) + @test isa(v1 *̃ s, LazyArray) + @test isa(v1 /̃ s, LazyArray) + @test isa(s +̃ v1, LazyArray) + @test isa(s -̃ v1, LazyArray) + @test isa(s *̃ v1, LazyArray) + @test isa(s /̃ v1, LazyArray) + for i ∈ eachindex(v1) + @test (v1 +̃ v2)[i] == r_add_v[i] + @test (v1 -̃ v2)[i] == r_sub_v[i] + @test (v1 *̃ v2)[i] == r_times_v[i] + @test (v1 /̃ v2)[i] == r_div_v[i] + @test (v1 +̃ s)[i] == r_add_s[i] + @test (v1 -̃ s)[i] == r_sub_s[i] + @test (v1 *̃ s)[i] == r_times_s[i] + @test (v1 /̃ s)[i] == r_div_s[i] + @test (s +̃ v1)[i] == r_add_s[i] + @test (s -̃ v1)[i] == -r_sub_s[i] + @test (s *̃ v1)[i] == r_times_s[i] + @test (s /̃ v1)[i] == 1/r_div_s[i] + end + @test_throws BoundsError (v1 +̃ v2)[4] + v2 = [1., 2, 3, 4] + # Test that size of arrays is asserted when not specified inbounds + @test_throws DimensionMismatch v1 +̃ v2 + + # Test operations on LazyArray + v1 = DummyArray([1, 2.3, 4]) + v2 = [1., 2, 3] + @test isa(v1 + v2, LazyArray) + @test isa(v2 + v1, LazyArray) + @test isa(v1 - v2, LazyArray) + @test isa(v2 - v1, LazyArray) + for i ∈ eachindex(v2) + @test (v1 + v2)[i] == (v2 + v1)[i] == r_add_v[i] + @test (v1 - v2)[i] == -(v2 - v1)[i] == r_sub_v[i] + end + @test_throws BoundsError (v1 + v2)[4] + v2 = [1., 2, 3, 4] + # Test that size of arrays is asserted when not specified inbounds + @test_throws DimensionMismatch v1 + v2 +end + +end \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testRegionInidices.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,6 @@ +using Sbplib.RegionIndices +using Test + +@testset "RegionIndices" begin + @test_broken false +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/testSbpOperators.jl Sat Sep 26 15:22:13 2020 +0200 @@ -0,0 +1,386 @@ +using Test +using Sbplib.SbpOperators +using Sbplib.Grids +using Sbplib.RegionIndices +using Sbplib.LazyTensors + +@testset "SbpOperators" begin + +# @testset "apply_quadrature" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# h = 0.5 +# +# @test apply_quadrature(op, h, 1.0, 10, 100) == h +# +# N = 10 +# qc = op.quadratureClosure +# q = h.*(qc..., ones(N-2*closuresize(op))..., reverse(qc)...) +# @assert length(q) == N +# +# for i ∈ 1:N +# @test apply_quadrature(op, h, 1.0, i, N) == q[i] +# end +# +# v = [2.,3.,2.,4.,5.,4.,3.,4.,5.,4.5] +# for i ∈ 1:N +# @test apply_quadrature(op, h, v[i], i, N) == q[i]*v[i] +# end +# end + +@testset "SecondDerivative" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + L = 3.5 + g = EquidistantGrid((101,), (0.0,), (L,)) + h_inv = inverse_spacing(g) + h = 1/h_inv[1]; + Dₓₓ = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity) + + f0(x::Float64) = 1. + f1(x::Float64) = x + f2(x::Float64) = 1/2*x^2 + f3(x::Float64) = 1/6*x^3 + f4(x::Float64) = 1/24*x^4 + f5(x::Float64) = sin(x) + f5ₓₓ(x::Float64) = -f5(x) + + v0 = evalOn(g,f0) + v1 = evalOn(g,f1) + v2 = evalOn(g,f2) + v3 = evalOn(g,f3) + v4 = evalOn(g,f4) + v5 = evalOn(g,f5) + + @test Dₓₓ isa TensorOperator{T,1} where T + @test Dₓₓ' isa TensorMapping{T,1,1} where T + + # TODO: Should perhaps set tolerance level for isapporx instead? + # Are these tolerance levels resonable or should tests be constructed + # differently? + equalitytol = 0.5*1e-10 + accuracytol = 0.5*1e-3 + # 4th order interior stencil, 2nd order boundary stencil, + # implies that L*v should be exact for v - monomial up to order 3. + # Exact differentiation is measured point-wise. For other grid functions + # the error is measured in the l2-norm. + @test all(abs.(collect(Dₓₓ*v0)) .<= equalitytol) + @test all(abs.(collect(Dₓₓ*v1)) .<= equalitytol) + @test all(abs.((collect(Dₓₓ*v2) - v0)) .<= equalitytol) + @test all(abs.((collect(Dₓₓ*v3) - v1)) .<= equalitytol) + e4 = collect(Dₓₓ*v4) - v2 + e5 = collect(Dₓₓ*v5) + v5 + @test sqrt(h*sum(collect(e4.^2))) <= accuracytol + @test sqrt(h*sum(collect(e5.^2))) <= accuracytol +end + +@testset "Laplace2D" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + Lx = 1.5 + Ly = 3.2 + g = EquidistantGrid((102,131), (0.0, 0.0), (Lx,Ly)) + + h_inv = inverse_spacing(g) + h = spacing(g) + D_xx = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity) + D_yy = SecondDerivative(h_inv[2],op.innerStencil,op.closureStencils,op.parity) + L = Laplace((D_xx,D_yy)) + + f0(x::Float64,y::Float64) = 2. + f1(x::Float64,y::Float64) = x+y + f2(x::Float64,y::Float64) = 1/2*x^2 + 1/2*y^2 + f3(x::Float64,y::Float64) = 1/6*x^3 + 1/6*y^3 + f4(x::Float64,y::Float64) = 1/24*x^4 + 1/24*y^4 + f5(x::Float64,y::Float64) = sin(x) + cos(y) + f5ₓₓ(x::Float64,y::Float64) = -f5(x,y) + + v0 = evalOn(g,f0) + v1 = evalOn(g,f1) + v2 = evalOn(g,f2) + v3 = evalOn(g,f3) + v4 = evalOn(g,f4) + v5 = evalOn(g,f5) + v5ₓₓ = evalOn(g,f5ₓₓ) + + @test L isa TensorOperator{T,2} where T + @test L' isa TensorMapping{T,2,2} where T + + # TODO: Should perhaps set tolerance level for isapporx instead? + # Are these tolerance levels resonable or should tests be constructed + # differently? + equalitytol = 0.5*1e-10 + accuracytol = 0.5*1e-3 + # 4th order interior stencil, 2nd order boundary stencil, + # implies that L*v should be exact for v - monomial up to order 3. + # Exact differentiation is measured point-wise. For other grid functions + # the error is measured in the H-norm. + @test all(abs.(collect(L*v0)) .<= equalitytol) + @test all(abs.(collect(L*v1)) .<= equalitytol) + @test all(collect(L*v2) .≈ v0) # Seems to be more accurate + @test all(abs.((collect(L*v3) - v1)) .<= equalitytol) + e4 = collect(L*v4) - v2 + e5 = collect(L*v5) - v5ₓₓ + @test sqrt(prod(h)*sum(collect(e4.^2))) <= accuracytol + @test sqrt(prod(h)*sum(collect(e5.^2))) <= accuracytol +end + +@testset "DiagonalInnerProduct" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + L = 2.3 + g = EquidistantGrid((77,), (0.0,), (L,)) + h = spacing(g) + H = DiagonalInnerProduct(h[1],op.quadratureClosure) + v = ones(Float64, size(g)) + + @test H isa TensorOperator{T,1} where T + @test H' isa TensorMapping{T,1,1} where T + @test sum(collect(H*v)) ≈ L + @test collect(H*v) == collect(H'*v) +end + +@testset "Quadrature" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + Lx = 2.3 + Ly = 5.2 + g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) + + h = spacing(g) + Hx = DiagonalInnerProduct(h[1],op.quadratureClosure); + Hy = DiagonalInnerProduct(h[2],op.quadratureClosure); + Q = Quadrature((Hx,Hy)) + + v = ones(Float64, size(g)) + + @test Q isa TensorOperator{T,2} where T + @test Q' isa TensorMapping{T,2,2} where T + @test sum(collect(Q*v)) ≈ (Lx*Ly) + @test collect(Q*v) == collect(Q'*v) +end + +@testset "InverseDiagonalInnerProduct" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + L = 2.3 + g = EquidistantGrid((77,), (0.0,), (L,)) + h = spacing(g) + H = DiagonalInnerProduct(h[1],op.quadratureClosure) + + h_i = inverse_spacing(g) + Hi = InverseDiagonalInnerProduct(h_i[1],1 ./ op.quadratureClosure) + v = evalOn(g, x->sin(x)) + + @test Hi isa TensorOperator{T,1} where T + @test Hi' isa TensorMapping{T,1,1} where T + @test collect(Hi*H*v) ≈ v + @test collect(Hi*v) == collect(Hi'*v) +end + +@testset "InverseQuadrature" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + Lx = 7.3 + Ly = 8.2 + g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) + + h = spacing(g) + Hx = DiagonalInnerProduct(h[1], op.quadratureClosure); + Hy = DiagonalInnerProduct(h[2], op.quadratureClosure); + Q = Quadrature((Hx,Hy)) + + hi = inverse_spacing(g) + Hix = InverseDiagonalInnerProduct(hi[1], 1 ./ op.quadratureClosure); + Hiy = InverseDiagonalInnerProduct(hi[2], 1 ./ op.quadratureClosure); + Qinv = InverseQuadrature((Hix,Hiy)) + v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) + + @test Qinv isa TensorOperator{T,2} where T + @test Qinv' isa TensorMapping{T,2,2} where T + @test collect(Qinv*Q*v) ≈ v + @test collect(Qinv*v) == collect(Qinv'*v) +end +# +# @testset "BoundaryValue" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# g = EquidistantGrid((4,5), (0.0, 0.0), (1.0,1.0)) +# +# e_w = BoundaryValue(op, g, CartesianBoundary{1,Lower}()) +# e_e = BoundaryValue(op, g, CartesianBoundary{1,Upper}()) +# e_s = BoundaryValue(op, g, CartesianBoundary{2,Lower}()) +# e_n = BoundaryValue(op, g, CartesianBoundary{2,Upper}()) +# +# v = zeros(Float64, 4, 5) +# v[:,5] = [1, 2, 3,4] +# v[:,4] = [1, 2, 3,4] +# v[:,3] = [4, 5, 6, 7] +# v[:,2] = [7, 8, 9, 10] +# v[:,1] = [10, 11, 12, 13] +# +# @test e_w isa TensorMapping{T,2,1} where T +# @test e_w' isa TensorMapping{T,1,2} where T +# +# @test domain_size(e_w, (3,2)) == (2,) +# @test domain_size(e_e, (3,2)) == (2,) +# @test domain_size(e_s, (3,2)) == (3,) +# @test domain_size(e_n, (3,2)) == (3,) +# +# @test size(e_w'*v) == (5,) +# @test size(e_e'*v) == (5,) +# @test size(e_s'*v) == (4,) +# @test size(e_n'*v) == (4,) +# +# @test collect(e_w'*v) == [10,7,4,1.0,1] +# @test collect(e_e'*v) == [13,10,7,4,4.0] +# @test collect(e_s'*v) == [10,11,12,13.0] +# @test collect(e_n'*v) == [1,2,3,4.0] +# +# g_x = [1,2,3,4.0] +# g_y = [5,4,3,2,1.0] +# +# G_w = zeros(Float64, (4,5)) +# G_w[1,:] = g_y +# +# G_e = zeros(Float64, (4,5)) +# G_e[4,:] = g_y +# +# G_s = zeros(Float64, (4,5)) +# G_s[:,1] = g_x +# +# G_n = zeros(Float64, (4,5)) +# G_n[:,5] = g_x +# +# @test size(e_w*g_y) == (UnknownDim,5) +# @test size(e_e*g_y) == (UnknownDim,5) +# @test size(e_s*g_x) == (4,UnknownDim) +# @test size(e_n*g_x) == (4,UnknownDim) +# +# # These tests should be moved to where they are possible (i.e we know what the grid should be) +# @test_broken collect(e_w*g_y) == G_w +# @test_broken collect(e_e*g_y) == G_e +# @test_broken collect(e_s*g_x) == G_s +# @test_broken collect(e_n*g_x) == G_n +# end +# +# @testset "NormalDerivative" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# g = EquidistantGrid((5,6), (0.0, 0.0), (4.0,5.0)) +# +# d_w = NormalDerivative(op, g, CartesianBoundary{1,Lower}()) +# d_e = NormalDerivative(op, g, CartesianBoundary{1,Upper}()) +# d_s = NormalDerivative(op, g, CartesianBoundary{2,Lower}()) +# d_n = NormalDerivative(op, g, CartesianBoundary{2,Upper}()) +# +# +# v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) +# v∂x = evalOn(g, (x,y)-> 2*x + y) +# v∂y = evalOn(g, (x,y)-> 2*(y-1) + x) +# +# @test d_w isa TensorMapping{T,2,1} where T +# @test d_w' isa TensorMapping{T,1,2} where T +# +# @test domain_size(d_w, (3,2)) == (2,) +# @test domain_size(d_e, (3,2)) == (2,) +# @test domain_size(d_s, (3,2)) == (3,) +# @test domain_size(d_n, (3,2)) == (3,) +# +# @test size(d_w'*v) == (6,) +# @test size(d_e'*v) == (6,) +# @test size(d_s'*v) == (5,) +# @test size(d_n'*v) == (5,) +# +# @test collect(d_w'*v) ≈ v∂x[1,:] +# @test collect(d_e'*v) ≈ v∂x[5,:] +# @test collect(d_s'*v) ≈ v∂y[:,1] +# @test collect(d_n'*v) ≈ v∂y[:,6] +# +# +# d_x_l = zeros(Float64, 5) +# d_x_u = zeros(Float64, 5) +# for i ∈ eachindex(d_x_l) +# d_x_l[i] = op.dClosure[i-1] +# d_x_u[i] = -op.dClosure[length(d_x_u)-i] +# end +# +# d_y_l = zeros(Float64, 6) +# d_y_u = zeros(Float64, 6) +# for i ∈ eachindex(d_y_l) +# d_y_l[i] = op.dClosure[i-1] +# d_y_u[i] = -op.dClosure[length(d_y_u)-i] +# end +# +# function prod_matrix(x,y) +# G = zeros(Float64, length(x), length(y)) +# for I ∈ CartesianIndices(G) +# G[I] = x[I[1]]*y[I[2]] +# end +# +# return G +# end +# +# g_x = [1,2,3,4.0,5] +# g_y = [5,4,3,2,1.0,11] +# +# G_w = prod_matrix(d_x_l, g_y) +# G_e = prod_matrix(d_x_u, g_y) +# G_s = prod_matrix(g_x, d_y_l) +# G_n = prod_matrix(g_x, d_y_u) +# +# +# @test size(d_w*g_y) == (UnknownDim,6) +# @test size(d_e*g_y) == (UnknownDim,6) +# @test size(d_s*g_x) == (5,UnknownDim) +# @test size(d_n*g_x) == (5,UnknownDim) +# +# # These tests should be moved to where they are possible (i.e we know what the grid should be) +# @test_broken collect(d_w*g_y) ≈ G_w +# @test_broken collect(d_e*g_y) ≈ G_e +# @test_broken collect(d_s*g_x) ≈ G_s +# @test_broken collect(d_n*g_x) ≈ G_n +# end +# +# @testset "BoundaryQuadrature" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# g = EquidistantGrid((10,11), (0.0, 0.0), (1.0,1.0)) +# +# H_w = BoundaryQuadrature(op, g, CartesianBoundary{1,Lower}()) +# H_e = BoundaryQuadrature(op, g, CartesianBoundary{1,Upper}()) +# H_s = BoundaryQuadrature(op, g, CartesianBoundary{2,Lower}()) +# H_n = BoundaryQuadrature(op, g, CartesianBoundary{2,Upper}()) +# +# v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) +# +# function get_quadrature(N) +# qc = op.quadratureClosure +# q = (qc..., ones(N-2*closuresize(op))..., reverse(qc)...) +# @assert length(q) == N +# return q +# end +# +# v_w = v[1,:] +# v_e = v[10,:] +# v_s = v[:,1] +# v_n = v[:,11] +# +# q_x = spacing(g)[1].*get_quadrature(10) +# q_y = spacing(g)[2].*get_quadrature(11) +# +# @test H_w isa TensorOperator{T,1} where T +# +# @test domain_size(H_w, (3,)) == (3,) +# @test domain_size(H_n, (3,)) == (3,) +# +# @test range_size(H_w, (3,)) == (3,) +# @test range_size(H_n, (3,)) == (3,) +# +# @test size(H_w*v_w) == (11,) +# @test size(H_e*v_e) == (11,) +# @test size(H_s*v_s) == (10,) +# @test size(H_n*v_n) == (10,) +# +# @test collect(H_w*v_w) ≈ q_y.*v_w +# @test collect(H_e*v_e) ≈ q_y.*v_e +# @test collect(H_s*v_s) ≈ q_x.*v_s +# @test collect(H_n*v_n) ≈ q_x.*v_n +# +# @test collect(H_w'*v_w) == collect(H_w'*v_w) +# @test collect(H_e'*v_e) == collect(H_e'*v_e) +# @test collect(H_s'*v_s) == collect(H_s'*v_s) +# @test collect(H_n'*v_n) == collect(H_n'*v_n) +# end + +end