Mercurial > repos > public > sbplib_julia
changeset 357:e22b061f5299 refactor/remove_dynamic_size_tensormapping
Merge in default
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Mon, 28 Sep 2020 21:54:33 +0200 |
parents | 0844069ab5ff (diff) ffddaf053085 (current diff) |
children | ba46a952a450 |
files | |
diffstat | 14 files changed, 185 insertions(+), 118 deletions(-) [+] |
line wrap: on
line diff
--- a/src/Grids/EquidistantGrid.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/Grids/EquidistantGrid.jl Mon Sep 28 21:54:33 2020 +0200 @@ -66,6 +66,20 @@ return broadcast(I -> grid.limit_lower .+ (I.-1).*h, indices) end +""" + subgrid(::EquidistantGrid, dim) + +Pick out given dimensions from the grid and return a grid for them +""" +function subgrid(grid::EquidistantGrid, dim) + size = grid.size[dim] + limit_lower = grid.limit_lower[dim] + limit_upper = grid.limit_upper[dim] + + return EquidistantGrid(size, limit_lower, limit_upper) +end +export subgrid + function pointsalongdim(grid::EquidistantGrid, dim::Integer) @assert dim<=dimension(grid) @assert dim>0
--- a/src/LazyTensors/lazy_tensor_operations.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/LazyTensors/lazy_tensor_operations.jl Mon Sep 28 21:54:33 2020 +0200 @@ -11,12 +11,13 @@ t::TensorMapping{T,R,D} o::AbstractArray{T,D} end +# TODO: Do boundschecking on creation! export LazyTensorMappingApplication Base.:*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Index,R}) where {T,R,D} = apply(ta.t, ta.o, I...) Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, Index{Unknown}.(I)...) -Base.size(ta::LazyTensorMappingApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o)) +Base.size(ta::LazyTensorMappingApplication{T,R,D}) where {T,R,D} = range_size(ta.t) # TODO: What else is needed to implement the AbstractArray interface? # # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' @@ -41,16 +42,15 @@ export LazyTensorMappingTranspose # # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors? +# Jonatan 2020-09-25: Is the problem that you can take the transpose of any TensorMapping even if it doesn't implement `apply_transpose`? Base.adjoint(tm::TensorMapping) = LazyTensorMappingTranspose(tm) Base.adjoint(tmt::LazyTensorMappingTranspose) = tmt.tm apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmt.tm, v, I...) -range_size(tmt::LazyTensorMappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, d_size) -domain_size(tmt::LazyTensorMappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, r_size) - - +range_size(tmt::LazyTensorMappingTranspose{T,R,D}) where {T,R,D} = domain_size(tmt.tm) +domain_size(tmt::LazyTensorMappingTranspose{T,R,D}) where {T,R,D} = range_size(tmt.tm) struct LazyTensorMappingBinaryOperation{Op,T,R,D,T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} <: TensorMapping{T,D,R} @@ -61,12 +61,13 @@ return new{Op,T,R,D,T1,T2}(tm1,tm2) end end +# TODO: Boundschecking in constructor. apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) -range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, domain_size::NTuple{D,Integer}) where {Op,T,R,D} = range_size(tmBinOp.tm1, domain_size) -domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, range_size::NTuple{R,Integer}) where {Op,T,R,D} = domain_size(tmBinOp.tm2, range_size) +range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = range_size(tmBinOp.tm1) +domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = domain_size(tmBinOp.tm1) Base.:+(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(tm1,tm2) Base.:-(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:-,T,R,D}(tm1,tm2)
--- a/src/LazyTensors/tensor_mapping.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/LazyTensors/tensor_mapping.jl Mon Sep 28 21:54:33 2020 +0200 @@ -6,11 +6,11 @@ apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} -The size of range tensor should be dependent on the size of the domain tensor -and the type should implement the methods +The size of the range and domain that the operator works with should be returned by +the functions - range_size(::TensorMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} - domain_size(::TensorMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} + range_size(::TensorMapping) + domain_size(::TensorMapping) to allow querying for one or the other. @@ -49,35 +49,19 @@ export range_dim, domain_dim """ - range_size(M::TensorMapping, domain_size) + range_size(M::TensorMapping) -Return the resulting range size for the mapping applied to a given domain_size +Return the range size for the mapping. """ function range_size end """ - domain_size(M::TensorMapping, range_size) + domain_size(M::TensorMapping) -Return the resulting domain size for the mapping applied to a given range_size +Return the domain size for the mapping. """ function domain_size end -""" - Dummy type for representing dimensions of tensormappings when domain_size is unknown -""" -struct UnknownDim end -export range_size, domain_size, TensorMappingDim, UnknownDim +export range_size, domain_size # TODO: Think about boundschecking! - - -""" - TensorOperator{T,D} - -A `TensorMapping{T,D,D}` where the range and domain tensor have the same number of -dimensions and the same size. -""" -abstract type TensorOperator{T,D} <: TensorMapping{T,D,D} end -export TensorOperator -domain_size(::TensorOperator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size -range_size(::TensorOperator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size
--- a/src/SbpOperators/SbpOperators.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/SbpOperators/SbpOperators.jl Mon Sep 28 21:54:33 2020 +0200 @@ -2,6 +2,7 @@ using Sbplib.RegionIndices using Sbplib.LazyTensors +using Sbplib.Grids include("stencil.jl") include("constantstenciloperator.jl") @@ -13,4 +14,21 @@ include("quadrature/quadrature.jl") include("quadrature/inverse_diagonal_inner_product.jl") include("quadrature/inverse_quadrature.jl") + +abstract type SbpOperator{T,R,D} <: TensorMapping{T,R,D} end + +""" + grid(::ColocationOperator) + +Return the the grid which the sbp-operator lives on +""" +function grid end + +abstract type ColocationOperator{T,R,D} <: SbpOperator{T,R,D} end + +LazyTensors.range_size(co::ColocationOperator) = size(grid(co)) +LazyTensors.domain_size(co::ColocationOperator) = size(grid(co)) + +# Allt ovan kanske är overkill.. Eventuellt bara lättare och tydligare att alla typer definerar sina range och domain size hur dom vill. (I praktiken typ alltid genom att ha gridden som ett fält i structen.) + end # module
--- a/src/SbpOperators/laplace/laplace.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/SbpOperators/laplace/laplace.jl Mon Sep 28 21:54:33 2020 +0200 @@ -1,18 +1,27 @@ export Laplace """ - Laplace{Dim,T<:Real,N,M,K} <: TensorOperator{T,Dim} + Laplace{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim} Implements the Laplace operator `L` in Dim dimensions as a tensor operator The multi-dimensional tensor operator consists of a tuple of 1D SecondDerivative tensor operators. """ #export quadrature, inverse_quadrature, boundary_quadrature, boundary_value, normal_derivative -struct Laplace{Dim,T,N,M,K} <: TensorOperator{T,Dim} +struct Laplace{Dim,T,N,M,K} <: TensorMapping{T,Dim,Dim} D2::NTuple{Dim,SecondDerivative{T,N,M,K}} - #TODO: Write a good constructor end -LazyTensors.domain_size(L::Laplace{Dim}, range_size::NTuple{Dim,Integer}) where {Dim} = range_size +function Laplace(g::EquidistantGrid{Dim}, innerStencil, closureStencils) where Dim + D2 = () + for i ∈ 1:Dim + D2 = (D2..., SecondDerivative(subgrid(g,i), innerStencil, closureStencils)) + end + + return Laplace(D2) +end + +LazyTensors.range_size(L::Laplace) = getindex.(range_size.(L.D2),1) +LazyTensors.domain_size(L::Laplace) = getindex.(domain_size.(L.D2),1) function LazyTensors.apply(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} error("not implemented")
--- a/src/SbpOperators/laplace/secondderivative.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/SbpOperators/laplace/secondderivative.jl Mon Sep 28 21:54:33 2020 +0200 @@ -4,16 +4,22 @@ in 1D dimension """ -struct SecondDerivative{T,N,M,K} <: TensorOperator{T,1} +struct SecondDerivative{T,N,M,K} <: TensorMapping{T,1,1} h_inv::T # The grid spacing could be included in the stencil already. Preferable? innerStencil::Stencil{T,N} closureStencils::NTuple{M,Stencil{T,K}} parity::Parity - #TODO: Write a nice constructor + size::NTuple{1,Int} end export SecondDerivative -LazyTensors.domain_size(D2::SecondDerivative, range_size::NTuple{1,Integer}) = range_size +function SecondDerivative(grid::EquidistantGrid{1}, innerStencil, closureStencils) + h_inv = grid.inverse_spacing[1] + return SecondDerivative(h_inv, innerStencil, closureStencils, even, size(grid)) +end + +LazyTensors.range_size(D2::SecondDerivative) = D2.size +LazyTensors.domain_size(D2::SecondDerivative) = D2.size #TODO: The 1D tensor mappings should not have to dispatch on 1D tuples if we write LazyTensor.apply for vararg right?!?! # Currently have to index the Tuple{Index} in each method in order to call the stencil methods which is ugly.
--- a/src/SbpOperators/quadrature/diagonal_inner_product.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/SbpOperators/quadrature/diagonal_inner_product.jl Mon Sep 28 21:54:33 2020 +0200 @@ -4,13 +4,18 @@ Implements the diagnoal norm operator `H` of Dim dimension as a TensorMapping """ -struct DiagonalInnerProduct{T,M} <: TensorOperator{T,1} - h::T # The grid spacing could be included in the stencil already. Preferable? +struct DiagonalInnerProduct{T,M} <: TensorMapping{T,1,1} + h::T closure::NTuple{M,T} - #TODO: Write a nice constructor + size::Tuple{Int} end -LazyTensors.domain_size(H::DiagonalInnerProduct, range_size::NTuple{1,Integer}) = range_size +function DiagonalInnerProduct(g::EquidistantGrid{1}, closure) + return DiagonalInnerProduct(spacing(g)[1], closure, size(g)) +end + +LazyTensors.range_size(H::DiagonalInnerProduct) = H.size +LazyTensors.domain_size(H::DiagonalInnerProduct) = H.size function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index) where T return @inbounds apply(H, v, I)
--- a/src/SbpOperators/quadrature/inverse_diagonal_inner_product.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/SbpOperators/quadrature/inverse_diagonal_inner_product.jl Mon Sep 28 21:54:33 2020 +0200 @@ -1,22 +1,30 @@ export InverseDiagonalInnerProduct, closuresize """ - InverseDiagonalInnerProduct{Dim,T<:Real,M} <: TensorOperator{T,1} + InverseDiagonalInnerProduct{Dim,T<:Real,M} <: TensorMapping{T,1,1} Implements the inverse diagonal inner product operator `Hi` of as a 1D TensorOperator """ -struct InverseDiagonalInnerProduct{T<:Real,M} <: TensorOperator{T,1} - h_inv::T # The reciprocl grid spacing could be included in the stencil already. Preferable? - closure::NTuple{M,T} - #TODO: Write a nice constructor +struct InverseDiagonalInnerProduct{T<:Real,M} <: TensorMapping{T,1,1} + h_inv::T + inverseQuadratureClosure::NTuple{M,T} + size::Tuple{Int} end +function InverseDiagonalInnerProduct(g::EquidistantGrid{1}, quadratureClosure) + return InverseDiagonalInnerProduct(inverse_spacing(g)[1], 1 ./ quadratureClosure, size(g)) +end + +LazyTensors.range_size(Hi::InverseDiagonalInnerProduct) = Hi.size +LazyTensors.domain_size(Hi::InverseDiagonalInnerProduct) = Hi.size + + function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Lower}) where T - return @inbounds Hi.h_inv*Hi.closure[Int(I)]*v[Int(I)] + return @inbounds Hi.h_inv*Hi.inverseQuadratureClosure[Int(I)]*v[Int(I)] end function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Upper}) where T N = length(v); - return @inbounds Hi.h_inv*Hi.closure[N-Int(I)+1]v[Int(I)] + return @inbounds Hi.h_inv*Hi.inverseQuadratureClosure[N-Int(I)+1]v[Int(I)] end function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Interior}) where T
--- a/src/SbpOperators/quadrature/inverse_quadrature.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/SbpOperators/quadrature/inverse_quadrature.jl Mon Sep 28 21:54:33 2020 +0200 @@ -2,14 +2,27 @@ """ InverseQuadrature{Dim,T<:Real,M,K} <: TensorMapping{T,Dim,Dim} -Implements the inverse quadrature operator `Qi` of Dim dimension as a TensorOperator +Implements the inverse quadrature operator `Qi` of Dim dimension as a TensorMapping The multi-dimensional tensor operator consists of a tuple of 1D InverseDiagonalInnerProduct tensor operators. """ -struct InverseQuadrature{Dim,T<:Real,M} <: TensorOperator{T,Dim} +struct InverseQuadrature{Dim,T<:Real,M} <: TensorMapping{T,Dim,Dim} Hi::NTuple{Dim,InverseDiagonalInnerProduct{T,M}} end + +function InverseQuadrature(g::EquidistantGrid{Dim}, quadratureClosure) where Dim + Hi = () + for i ∈ 1:Dim + Hi = (Hi..., InverseDiagonalInnerProduct(subgrid(g,i), quadratureClosure)) + end + + return InverseQuadrature(Hi) +end + +LazyTensors.range_size(Hi::InverseQuadrature) = getindex.(range_size.(Hi.Hi),1) +LazyTensors.domain_size(Hi::InverseQuadrature) = getindex.(domain_size.(Hi.Hi),1) + LazyTensors.domain_size(Qi::InverseQuadrature{Dim}, range_size::NTuple{Dim,Integer}) where Dim = range_size function LazyTensors.apply(Qi::InverseQuadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim}
--- a/src/SbpOperators/quadrature/quadrature.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/SbpOperators/quadrature/quadrature.jl Mon Sep 28 21:54:33 2020 +0200 @@ -6,11 +6,21 @@ The multi-dimensional tensor operator consists of a tuple of 1D DiagonalInnerProduct H tensor operators. """ -struct Quadrature{Dim,T<:Real,M} <: TensorOperator{T,Dim} +struct Quadrature{Dim,T<:Real,M} <: TensorMapping{T,Dim,Dim} H::NTuple{Dim,DiagonalInnerProduct{T,M}} end -LazyTensors.domain_size(Q::Quadrature{Dim}, range_size::NTuple{Dim,Integer}) where {Dim} = range_size +function Quadrature(g::EquidistantGrid{Dim}, quadratureClosure) where Dim + H = () + for i ∈ 1:Dim + H = (H..., DiagonalInnerProduct(subgrid(g,i), quadratureClosure)) + end + + return Quadrature(H) +end + +LazyTensors.range_size(H::Quadrature) = getindex.(range_size.(H.H),1) +LazyTensors.domain_size(H::Quadrature) = getindex.(domain_size.(H.H),1) function LazyTensors.apply(Q::Quadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} error("not implemented")
--- a/src/Sbplib.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/src/Sbplib.jl Mon Sep 28 21:54:33 2020 +0200 @@ -4,7 +4,7 @@ include("LazyTensors/LazyTensors.jl") include("Grids/Grids.jl") include("SbpOperators/SbpOperators.jl") -include("DiffOps/DiffOps.jl") +# include("DiffOps/DiffOps.jl") export RegionIndices export LazyTensors
--- a/test/testGrids.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/test/testGrids.jl Mon Sep 28 21:54:33 2020 +0200 @@ -4,9 +4,23 @@ @testset "Grids" begin @testset "EquidistantGrid" begin - @test EquidistantGrid(4,0,1) isa EquidistantGrid - @test dimension(EquidistantGrid(4,0,1)) == 1 - @test EquidistantGrid(4,0,1) == EquidistantGrid((4,),(0,),(1,)) + @test EquidistantGrid(4,0.0,1.0) isa EquidistantGrid + @test EquidistantGrid(4,0.0,8.0) isa EquidistantGrid + @test dimension(EquidistantGrid(4,0.0,1.0)) == 1 + @test EquidistantGrid(4,0.0,1.0) == EquidistantGrid((4,),(0.0,),(1.0,)) + + g = EquidistantGrid((5,3), (0.0,0.0), (2.0,1.0)) + @test subgrid(g, 1) == EquidistantGrid(5,0.0,2.0) + @test subgrid(g, 2) == EquidistantGrid(3,0.0,1.0) + + g = EquidistantGrid((2,5,3), (0.0,0.0,0.0), (2.0,1.0,3.0)) + @test subgrid(g, 1) == EquidistantGrid(2,0.0,2.0) + @test subgrid(g, 2) == EquidistantGrid(5,0.0,1.0) + @test subgrid(g, 3) == EquidistantGrid(3,0.0,3.0) + @test subgrid(g, 1:2) == EquidistantGrid((2,5),(0.0,0.0),(2.0,1.0)) + @test subgrid(g, 2:3) == EquidistantGrid((5,3),(0.0,0.0),(1.0,3.0)) + @test subgrid(g, [1,3]) == EquidistantGrid((2,3),(0.0,0.0),(2.0,3.0)) + @test subgrid(g, [2,1]) == EquidistantGrid((5,2),(0.0,0.0),(1.0,2.0)) end end
--- a/test/testLazyTensors.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/test/testLazyTensors.jl Mon Sep 28 21:54:33 2020 +0200 @@ -12,20 +12,14 @@ @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),(Index{Unknown}(0),Index{Unknown}(0))) == :apply end -@testset "Generic Operator methods" begin - struct DummyOperator{T,D} <: TensorOperator{T,D} end - @test range_size(DummyOperator{Int,2}(), (3,5)) == (3,5) - @test domain_size(DummyOperator{Float64, 3}(), (3,3,1)) == (3,3,1) -end - @testset "Mapping transpose" begin struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},R}) where {T,R,D} = :apply LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},D}) where {T,R,D} = :apply_transpose - LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = :range_size - LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} = :domain_size + LazyTensors.range_size(m::DummyMapping{T,R,D}) where {T,R,D} = :range_size + LazyTensors.domain_size(m::DummyMapping{T,R,D}) where {T,R,D} = :domain_size m = DummyMapping{Float64,2,3}() I = Index{Unknown}(0) @@ -35,19 +29,21 @@ @test apply(m'',zeros(Float64,(0,0,0)), I, I) == :apply @test apply_transpose(m', zeros(Float64,(0,0,0)), I, I) == :apply - @test range_size(m', (0,0)) == :domain_size - @test domain_size(m', (0,0,0)) == :range_size + @test range_size(m') == :domain_size + @test domain_size(m') == :range_size end @testset "TensorApplication" begin - struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end + struct SizeDoublingMapping{T,R,D} <: TensorMapping{T,R,D} + domain_size::NTuple{D,Int} + end - LazyTensors.apply(m::DummyMapping{T,R,D}, v, i::Vararg{Index{<:Region},R}) where {T,R,D} = (:apply,v,i) - LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = 2 .* domain_size - LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} = range_size.÷2 + LazyTensors.apply(m::SizeDoublingMapping{T,R,D}, v, i::Vararg{Index{<:Region},R}) where {T,R,D} = (:apply,v,i) + LazyTensors.range_size(m::SizeDoublingMapping) = 2 .* m.domain_size + LazyTensors.domain_size(m::SizeDoublingMapping) = m.domain_size - m = DummyMapping{Int, 1, 1}() + m = SizeDoublingMapping{Int, 1, 1}((3,)) v = [0,1,2] @test m*v isa AbstractVector{Int} @test size(m*v) == 2 .*size(v) @@ -63,28 +59,31 @@ @test_broken BoundsError == (m*m*v)[0] @test_broken BoundsError == (m*m*v)[7] - m = DummyMapping{Int, 2, 1}() + m = SizeDoublingMapping{Int, 2, 1}((3,)) @test_throws MethodError m*ones(Int,2,2) @test_throws MethodError m*m*v - m = DummyMapping{Float64, 2, 2}() + m = SizeDoublingMapping{Float64, 2, 2}((3,3)) v = ones(3,3) I = (Index{Lower}(1),Index{Interior}(2)); @test size(m*v) == 2 .*size(v) @test (m*v)[I] == (:apply,v,I) - struct ScalingOperator{T,D} <: TensorOperator{T,D} + struct ScalingOperator{T,D} <: TensorMapping{T,D,D} λ::T + size::NTuple{D,Int} end LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I] + LazyTensors.range_size(m::ScalingOperator) = m.size + LazyTensors.domain_size(m::ScalingOperator) = m.size - m = ScalingOperator{Int,1}(2) + m = ScalingOperator{Int,1}(2,(3,)) v = [1,2,3] @test m*v isa AbstractVector @test m*v == [2,4,6] - m = ScalingOperator{Int,2}(2) + m = ScalingOperator{Int,2}(2,(2,2)) v = [[1 2];[3 4]] @test m*v == [[2 4];[6 8]] I = (Index{Upper}(2),Index{Lower}(1)) @@ -94,14 +93,16 @@ @testset "TensorMapping binary operations" begin struct ScalarMapping{T,R,D} <: TensorMapping{T,R,D} λ::T + range_size::NTuple{R,Int} + domain_size::NTuple{D,Int} end LazyTensors.apply(m::ScalarMapping{T,R,D}, v, I::Vararg{Index{<:Region}}) where {T,R,D} = m.λ*v[I...] - LazyTensors.range_size(m::ScalarMapping, domain_size) = domain_size - LazyTensors.domain_size(m::ScalarMapping, range_sizes) = range_sizes + LazyTensors.range_size(m::ScalarMapping) = m.domain_size + LazyTensors.domain_size(m::ScalarMapping) = m.range_size - A = ScalarMapping{Float64,1,1}(2.0) - B = ScalarMapping{Float64,1,1}(3.0) + A = ScalarMapping{Float64,1,1}(2.0, (3,), (3,)) + B = ScalarMapping{Float64,1,1}(3.0, (3,), (3,)) v = [1.1,1.2,1.3] for i ∈ eachindex(v) @@ -112,8 +113,8 @@ @test ((A-B)*v)[i] == 2*v[i] - 3*v[i] end - @test range_size(A+B, (3,)) == range_size(A, (3,)) == range_size(B,(3,)) - @test domain_size(A+B, (3,)) == domain_size(A, (3,)) == domain_size(B,(3,)) + @test range_size(A+B) == range_size(A) == range_size(B) + @test domain_size(A+B) == domain_size(A) == domain_size(B) end @testset "LazyArray" begin @@ -192,4 +193,4 @@ @test_throws DimensionMismatch v1 + v2 end -end \ No newline at end of file +end
--- a/test/testSbpOperators.jl Sun Sep 27 15:26:05 2020 +0200 +++ b/test/testSbpOperators.jl Mon Sep 28 21:54:33 2020 +0200 @@ -33,7 +33,7 @@ g = EquidistantGrid((101,), (0.0,), (L,)) h_inv = inverse_spacing(g) h = 1/h_inv[1]; - Dₓₓ = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity) + Dₓₓ = SecondDerivative(g,op.innerStencil,op.closureStencils) f0(x::Float64) = 1. f1(x::Float64) = x @@ -50,7 +50,7 @@ v4 = evalOn(g,f4) v5 = evalOn(g,f5) - @test Dₓₓ isa TensorOperator{T,1} where T + @test Dₓₓ isa TensorMapping{T,1,1} where T @test Dₓₓ' isa TensorMapping{T,1,1} where T # TODO: Should perhaps set tolerance level for isapporx instead? @@ -77,12 +77,8 @@ Lx = 1.5 Ly = 3.2 g = EquidistantGrid((102,131), (0.0, 0.0), (Lx,Ly)) + L = Laplace(g, op.innerStencil, op.closureStencils) - h_inv = inverse_spacing(g) - h = spacing(g) - D_xx = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity) - D_yy = SecondDerivative(h_inv[2],op.innerStencil,op.closureStencils,op.parity) - L = Laplace((D_xx,D_yy)) f0(x::Float64,y::Float64) = 2. f1(x::Float64,y::Float64) = x+y @@ -100,7 +96,7 @@ v5 = evalOn(g,f5) v5ₓₓ = evalOn(g,f5ₓₓ) - @test L isa TensorOperator{T,2} where T + @test L isa TensorMapping{T,2,2} where T @test L' isa TensorMapping{T,2,2} where T # TODO: Should perhaps set tolerance level for isapporx instead? @@ -118,6 +114,8 @@ @test all(abs.((collect(L*v3) - v1)) .<= equalitytol) e4 = collect(L*v4) - v2 e5 = collect(L*v5) - v5ₓₓ + + h = spacing(g) @test sqrt(prod(h)*sum(collect(e4.^2))) <= accuracytol @test sqrt(prod(h)*sum(collect(e5.^2))) <= accuracytol end @@ -126,11 +124,10 @@ op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") L = 2.3 g = EquidistantGrid((77,), (0.0,), (L,)) - h = spacing(g) - H = DiagonalInnerProduct(h[1],op.quadratureClosure) + H = DiagonalInnerProduct(g,op.quadratureClosure) v = ones(Float64, size(g)) - @test H isa TensorOperator{T,1} where T + @test H isa TensorMapping{T,1,1} where T @test H' isa TensorMapping{T,1,1} where T @test sum(collect(H*v)) ≈ L @test collect(H*v) == collect(H'*v) @@ -142,14 +139,11 @@ Ly = 5.2 g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) - h = spacing(g) - Hx = DiagonalInnerProduct(h[1],op.quadratureClosure); - Hy = DiagonalInnerProduct(h[2],op.quadratureClosure); - Q = Quadrature((Hx,Hy)) + Q = Quadrature(g, op.quadratureClosure) v = ones(Float64, size(g)) - @test Q isa TensorOperator{T,2} where T + @test Q isa TensorMapping{T,2,2} where T @test Q' isa TensorMapping{T,2,2} where T @test sum(collect(Q*v)) ≈ (Lx*Ly) @test collect(Q*v) == collect(Q'*v) @@ -159,14 +153,11 @@ op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") L = 2.3 g = EquidistantGrid((77,), (0.0,), (L,)) - h = spacing(g) - H = DiagonalInnerProduct(h[1],op.quadratureClosure) - - h_i = inverse_spacing(g) - Hi = InverseDiagonalInnerProduct(h_i[1],1 ./ op.quadratureClosure) + H = DiagonalInnerProduct(g, op.quadratureClosure) + Hi = InverseDiagonalInnerProduct(g,op.quadratureClosure) v = evalOn(g, x->sin(x)) - @test Hi isa TensorOperator{T,1} where T + @test Hi isa TensorMapping{T,1,1} where T @test Hi' isa TensorMapping{T,1,1} where T @test collect(Hi*H*v) ≈ v @test collect(Hi*v) == collect(Hi'*v) @@ -178,20 +169,13 @@ Ly = 8.2 g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) - h = spacing(g) - Hx = DiagonalInnerProduct(h[1], op.quadratureClosure); - Hy = DiagonalInnerProduct(h[2], op.quadratureClosure); - Q = Quadrature((Hx,Hy)) - - hi = inverse_spacing(g) - Hix = InverseDiagonalInnerProduct(hi[1], 1 ./ op.quadratureClosure); - Hiy = InverseDiagonalInnerProduct(hi[2], 1 ./ op.quadratureClosure); - Qinv = InverseQuadrature((Hix,Hiy)) + Q = Quadrature(g, op.quadratureClosure) + Qinv = InverseQuadrature(g, op.quadratureClosure) v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) - @test Qinv isa TensorOperator{T,2} where T + @test Qinv isa TensorMapping{T,2,2} where T @test Qinv' isa TensorMapping{T,2,2} where T - @test collect(Qinv*Q*v) ≈ v + @test collect(Qinv*(Q*v)) ≈ v @test collect(Qinv*v) == collect(Qinv'*v) end #