changeset 370:8e55dee6a1a1

Merge branch refactor/remove_dynamic_size_tensormapping
author Jonatan Werpers <jonatan@werpers.com>
date Mon, 28 Sep 2020 22:56:54 +0200
parents 0af6da238d95 (current diff) 4ded0f543976 (diff)
children 241bd2512c20 99296cbb7bcd
files
diffstat 11 files changed, 140 insertions(+), 121 deletions(-) [+]
line wrap: on
line diff
--- a/src/LazyTensors/lazy_tensor_operations.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/src/LazyTensors/lazy_tensor_operations.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -11,12 +11,13 @@
     t::TensorMapping{T,R,D}
     o::AbstractArray{T,D}
 end
+# TODO: Do boundschecking on creation!
 export LazyTensorMappingApplication
 
 Base.:*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o)
 Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Index,R}) where {T,R,D} = apply(ta.t, ta.o, I...)
 Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, Index{Unknown}.(I)...)
-Base.size(ta::LazyTensorMappingApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o))
+Base.size(ta::LazyTensorMappingApplication) = range_size(ta.t)
 # TODO: What else is needed to implement the AbstractArray interface?
 
 # # We need the associativity to be a→b→c = a→(b→c), which is the case for '→'
@@ -41,16 +42,15 @@
 export LazyTensorMappingTranspose
 
 # # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors?
+# Jonatan 2020-09-25: Is the problem that you can take the transpose of any TensorMapping even if it doesn't implement `apply_transpose`?
 Base.adjoint(tm::TensorMapping) = LazyTensorMappingTranspose(tm)
 Base.adjoint(tmt::LazyTensorMappingTranspose) = tmt.tm
 
 apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...)
 apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmt.tm, v, I...)
 
-range_size(tmt::LazyTensorMappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, d_size)
-domain_size(tmt::LazyTensorMappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, r_size)
-
-
+range_size(tmt::LazyTensorMappingTranspose) = domain_size(tmt.tm)
+domain_size(tmt::LazyTensorMappingTranspose) = range_size(tmt.tm)
 
 
 struct LazyTensorMappingBinaryOperation{Op,T,R,D,T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} <: TensorMapping{T,D,R}
@@ -61,12 +61,13 @@
         return new{Op,T,R,D,T1,T2}(tm1,tm2)
     end
 end
+# TODO: Boundschecking in constructor.
 
 apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...)
 apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...)
 
-range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, domain_size::NTuple{D,Integer}) where {Op,T,R,D} = range_size(tmBinOp.tm1, domain_size)
-domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, range_size::NTuple{R,Integer}) where {Op,T,R,D} = domain_size(tmBinOp.tm2, range_size)
+range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = range_size(tmBinOp.tm1)
+domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = domain_size(tmBinOp.tm1)
 
 Base.:+(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(tm1,tm2)
 Base.:-(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:-,T,R,D}(tm1,tm2)
--- a/src/LazyTensors/tensor_mapping.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/src/LazyTensors/tensor_mapping.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -6,11 +6,11 @@
 
     apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T}
 
-The size of range tensor should be dependent on the size of the domain tensor
-and the type should implement the methods
+The size of the range and domain that the operator works with should be returned by
+the functions
 
-    range_size(::TensorMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D}
-    domain_size(::TensorMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D}
+    range_size(::TensorMapping)
+    domain_size(::TensorMapping)
 
 to allow querying for one or the other.
 
@@ -49,35 +49,19 @@
 export range_dim, domain_dim
 
 """
-    range_size(M::TensorMapping, domain_size)
+    range_size(M::TensorMapping)
 
-Return the resulting range size for the mapping applied to a given domain_size
+Return the range size for the mapping.
 """
 function range_size end
 
 """
-    domain_size(M::TensorMapping, range_size)
+    domain_size(M::TensorMapping)
 
-Return the resulting domain size for the mapping applied to a given range_size
+Return the domain size for the mapping.
 """
 function domain_size end
 
-"""
-    Dummy type for representing dimensions of tensormappings when domain_size is unknown
-"""
-struct UnknownDim end
-export range_size, domain_size, TensorMappingDim, UnknownDim
+export range_size, domain_size
 
 # TODO: Think about boundschecking!
-
-
-"""
-    TensorOperator{T,D}
-
-A `TensorMapping{T,D,D}` where the range and domain tensor have the same number of
-dimensions and the same size.
-"""
-abstract type TensorOperator{T,D} <: TensorMapping{T,D,D} end
-export TensorOperator
-domain_size(::TensorOperator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size
-range_size(::TensorOperator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size
--- a/src/SbpOperators/SbpOperators.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/src/SbpOperators/SbpOperators.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -2,6 +2,7 @@
 
 using Sbplib.RegionIndices
 using Sbplib.LazyTensors
+using Sbplib.Grids
 
 include("stencil.jl")
 include("constantstenciloperator.jl")
@@ -13,4 +14,5 @@
 include("quadrature/quadrature.jl")
 include("quadrature/inverse_diagonal_inner_product.jl")
 include("quadrature/inverse_quadrature.jl")
+
 end # module
--- a/src/SbpOperators/laplace/laplace.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/src/SbpOperators/laplace/laplace.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -1,18 +1,27 @@
 export Laplace
 """
-    Laplace{Dim,T<:Real,N,M,K} <: TensorOperator{T,Dim}
+    Laplace{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim}
 
 Implements the Laplace operator `L` in Dim dimensions as a tensor operator
 The multi-dimensional tensor operator consists of a tuple of 1D SecondDerivative
 tensor operators.
 """
 #export quadrature, inverse_quadrature, boundary_quadrature, boundary_value, normal_derivative
-struct Laplace{Dim,T,N,M,K} <: TensorOperator{T,Dim}
+struct Laplace{Dim,T,N,M,K} <: TensorMapping{T,Dim,Dim}
     D2::NTuple{Dim,SecondDerivative{T,N,M,K}}
-    #TODO: Write a good constructor
 end
 
-LazyTensors.domain_size(L::Laplace{Dim}, range_size::NTuple{Dim,Integer}) where {Dim} = range_size
+function Laplace(g::EquidistantGrid{Dim}, innerStencil, closureStencils) where Dim
+    D2 = ()
+    for i ∈ 1:Dim
+        D2 = (D2..., SecondDerivative(restrict(g,i), innerStencil, closureStencils))
+    end
+
+    return Laplace(D2)
+end
+
+LazyTensors.range_size(L::Laplace) = getindex.(range_size.(L.D2),1)
+LazyTensors.domain_size(L::Laplace) = getindex.(domain_size.(L.D2),1)
 
 function LazyTensors.apply(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim}
     error("not implemented")
@@ -36,8 +45,6 @@
     return uᵢ
 end
 
-LazyTensors.apply_transpose(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} = LazyTensors.apply(L, v, I...)
-
 # quadrature(L::Laplace) = Quadrature(L.op, L.grid)
 # inverse_quadrature(L::Laplace) = InverseQuadrature(L.op, L.grid)
 # boundary_value(L::Laplace, bId::CartesianBoundary) = BoundaryValue(L.op, L.grid, bId)
--- a/src/SbpOperators/laplace/secondderivative.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/src/SbpOperators/laplace/secondderivative.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -4,16 +4,22 @@
 in 1D dimension
 """
 
-struct SecondDerivative{T,N,M,K} <: TensorOperator{T,1}
+struct SecondDerivative{T,N,M,K} <: TensorMapping{T,1,1}
     h_inv::T # The grid spacing could be included in the stencil already. Preferable?
     innerStencil::Stencil{T,N}
     closureStencils::NTuple{M,Stencil{T,K}}
     parity::Parity
-    #TODO: Write a nice constructor
+    size::NTuple{1,Int}
 end
 export SecondDerivative
 
-LazyTensors.domain_size(D2::SecondDerivative, range_size::NTuple{1,Integer}) = range_size
+function SecondDerivative(grid::EquidistantGrid{1}, innerStencil, closureStencils)
+    h_inv = grid.inverse_spacing[1]
+    return SecondDerivative(h_inv, innerStencil, closureStencils, even, size(grid))
+end
+
+LazyTensors.range_size(D2::SecondDerivative) = D2.size
+LazyTensors.domain_size(D2::SecondDerivative) = D2.size
 
 #TODO: The 1D tensor mappings should not have to dispatch on 1D tuples if we write LazyTensor.apply for vararg right?!?!
 #      Currently have to index the Tuple{Index} in each method in order to call the stencil methods which is ugly.
@@ -40,6 +46,4 @@
     return LazyTensors.apply(D2, v, I)
 end
 
-LazyTensors.apply_transpose(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index) where {T} = LazyTensors.apply(D2, v, I)
-
 closuresize(D2::SecondDerivative{T,N,M,K}) where {T<:Real,N,M,K} = M
--- a/src/SbpOperators/quadrature/diagonal_inner_product.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/src/SbpOperators/quadrature/diagonal_inner_product.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -4,25 +4,30 @@
 
 Implements the diagnoal norm operator `H` of Dim dimension as a TensorMapping
 """
-struct DiagonalInnerProduct{T,M} <: TensorOperator{T,1}
-    h::T # The grid spacing could be included in the stencil already. Preferable?
-    closure::NTuple{M,T}
-    #TODO: Write a nice constructor
+struct DiagonalInnerProduct{T,M} <: TensorMapping{T,1,1}
+    h::T
+    quadratureClosure::NTuple{M,T}
+    size::Tuple{Int}
 end
 
-LazyTensors.domain_size(H::DiagonalInnerProduct, range_size::NTuple{1,Integer}) = range_size
+function DiagonalInnerProduct(g::EquidistantGrid{1}, quadratureClosure)
+    return DiagonalInnerProduct(spacing(g)[1], quadratureClosure, size(g))
+end
+
+LazyTensors.range_size(H::DiagonalInnerProduct) = H.size
+LazyTensors.domain_size(H::DiagonalInnerProduct) = H.size
 
 function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index) where T
     return @inbounds apply(H, v, I)
 end
 
 function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Lower}) where T
-    return @inbounds H.h*H.closure[Int(I)]*v[Int(I)]
+    return @inbounds H.h*H.quadratureClosure[Int(I)]*v[Int(I)]
 end
 
 function LazyTensors.apply(H::DiagonalInnerProduct{T},v::AbstractVector{T}, I::Index{Upper}) where T
     N = length(v);
-    return @inbounds H.h*H.closure[N-Int(I)+1]v[Int(I)]
+    return @inbounds H.h*H.quadratureClosure[N-Int(I)+1]*v[Int(I)]
 end
 
 function LazyTensors.apply(H::DiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Interior}) where T
--- a/src/SbpOperators/quadrature/inverse_diagonal_inner_product.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/src/SbpOperators/quadrature/inverse_diagonal_inner_product.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -1,22 +1,30 @@
 export InverseDiagonalInnerProduct, closuresize
 """
-    InverseDiagonalInnerProduct{Dim,T<:Real,M} <: TensorOperator{T,1}
+    InverseDiagonalInnerProduct{Dim,T<:Real,M} <: TensorMapping{T,1,1}
 
 Implements the inverse diagonal inner product operator `Hi` of as a 1D TensorOperator
 """
-struct InverseDiagonalInnerProduct{T<:Real,M} <: TensorOperator{T,1}
-    h_inv::T # The reciprocl grid spacing could be included in the stencil already. Preferable?
-    closure::NTuple{M,T}
-    #TODO: Write a nice constructor
+struct InverseDiagonalInnerProduct{T<:Real,M} <: TensorMapping{T,1,1}
+    h_inv::T
+    inverseQuadratureClosure::NTuple{M,T}
+    size::Tuple{Int}
 end
 
+function InverseDiagonalInnerProduct(g::EquidistantGrid{1}, quadratureClosure)
+    return InverseDiagonalInnerProduct(inverse_spacing(g)[1], 1 ./ quadratureClosure, size(g))
+end
+
+LazyTensors.range_size(Hi::InverseDiagonalInnerProduct) = Hi.size
+LazyTensors.domain_size(Hi::InverseDiagonalInnerProduct) = Hi.size
+
+
 function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Lower}) where T
-    return @inbounds Hi.h_inv*Hi.closure[Int(I)]*v[Int(I)]
+    return @inbounds Hi.h_inv*Hi.inverseQuadratureClosure[Int(I)]*v[Int(I)]
 end
 
 function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Upper}) where T
     N = length(v);
-    return @inbounds Hi.h_inv*Hi.closure[N-Int(I)+1]v[Int(I)]
+    return @inbounds Hi.h_inv*Hi.inverseQuadratureClosure[N-Int(I)+1]*v[Int(I)]
 end
 
 function LazyTensors.apply(Hi::InverseDiagonalInnerProduct{T}, v::AbstractVector{T}, I::Index{Interior}) where T
--- a/src/SbpOperators/quadrature/inverse_quadrature.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/src/SbpOperators/quadrature/inverse_quadrature.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -2,14 +2,27 @@
 """
     InverseQuadrature{Dim,T<:Real,M,K} <: TensorMapping{T,Dim,Dim}
 
-Implements the inverse quadrature operator `Qi` of Dim dimension as a TensorOperator
+Implements the inverse quadrature operator `Qi` of Dim dimension as a TensorMapping
 The multi-dimensional tensor operator consists of a tuple of 1D InverseDiagonalInnerProduct
 tensor operators.
 """
-struct InverseQuadrature{Dim,T<:Real,M} <: TensorOperator{T,Dim}
+struct InverseQuadrature{Dim,T<:Real,M} <: TensorMapping{T,Dim,Dim}
     Hi::NTuple{Dim,InverseDiagonalInnerProduct{T,M}}
 end
 
+
+function InverseQuadrature(g::EquidistantGrid{Dim}, quadratureClosure) where Dim
+    Hi = ()
+    for i ∈ 1:Dim
+        Hi = (Hi..., InverseDiagonalInnerProduct(restrict(g,i), quadratureClosure))
+    end
+
+    return InverseQuadrature(Hi)
+end
+
+LazyTensors.range_size(Hi::InverseQuadrature) = getindex.(range_size.(Hi.Hi),1)
+LazyTensors.domain_size(Hi::InverseQuadrature) = getindex.(domain_size.(Hi.Hi),1)
+
 LazyTensors.domain_size(Qi::InverseQuadrature{Dim}, range_size::NTuple{Dim,Integer}) where Dim = range_size
 
 function LazyTensors.apply(Qi::InverseQuadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim}
--- a/src/SbpOperators/quadrature/quadrature.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/src/SbpOperators/quadrature/quadrature.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -6,11 +6,21 @@
 The multi-dimensional tensor operator consists of a tuple of 1D DiagonalInnerProduct H
 tensor operators.
 """
-struct Quadrature{Dim,T<:Real,M} <: TensorOperator{T,Dim}
+struct Quadrature{Dim,T<:Real,M} <: TensorMapping{T,Dim,Dim}
     H::NTuple{Dim,DiagonalInnerProduct{T,M}}
 end
 
-LazyTensors.domain_size(Q::Quadrature{Dim}, range_size::NTuple{Dim,Integer}) where {Dim} = range_size
+function Quadrature(g::EquidistantGrid{Dim}, quadratureClosure) where Dim
+    H = ()
+    for i ∈ 1:Dim
+        H = (H..., DiagonalInnerProduct(restrict(g,i), quadratureClosure))
+    end
+
+    return Quadrature(H)
+end
+
+LazyTensors.range_size(H::Quadrature) = getindex.(range_size.(H.H),1)
+LazyTensors.domain_size(H::Quadrature) = getindex.(domain_size.(H.H),1)
 
 function LazyTensors.apply(Q::Quadrature{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim}
     error("not implemented")
--- a/test/testLazyTensors.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/test/testLazyTensors.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -12,20 +12,14 @@
     @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),(Index{Unknown}(0),Index{Unknown}(0))) == :apply
 end
 
-@testset "Generic Operator methods" begin
-    struct DummyOperator{T,D} <: TensorOperator{T,D} end
-    @test range_size(DummyOperator{Int,2}(), (3,5)) == (3,5)
-    @test domain_size(DummyOperator{Float64, 3}(), (3,3,1)) == (3,3,1)
-end
-
 @testset "Mapping transpose" begin
     struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end
 
     LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},R}) where {T,R,D} = :apply
     LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},D}) where {T,R,D} = :apply_transpose
 
-    LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = :range_size
-    LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} = :domain_size
+    LazyTensors.range_size(m::DummyMapping{T,R,D}) where {T,R,D} = :range_size
+    LazyTensors.domain_size(m::DummyMapping{T,R,D}) where {T,R,D} = :domain_size
 
     m = DummyMapping{Float64,2,3}()
     I = Index{Unknown}(0)
@@ -35,19 +29,21 @@
     @test apply(m'',zeros(Float64,(0,0,0)), I, I) == :apply
     @test apply_transpose(m', zeros(Float64,(0,0,0)), I, I) == :apply
 
-    @test range_size(m', (0,0)) == :domain_size
-    @test domain_size(m', (0,0,0)) == :range_size
+    @test range_size(m') == :domain_size
+    @test domain_size(m') == :range_size
 end
 
 @testset "TensorApplication" begin
-    struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end
+    struct SizeDoublingMapping{T,R,D} <: TensorMapping{T,R,D}
+        domain_size::NTuple{D,Int}
+    end
 
-    LazyTensors.apply(m::DummyMapping{T,R,D}, v, i::Vararg{Index{<:Region},R}) where {T,R,D} = (:apply,v,i)
-    LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = 2 .* domain_size
-    LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} = range_size.÷2
+    LazyTensors.apply(m::SizeDoublingMapping{T,R,D}, v, i::Vararg{Index{<:Region},R}) where {T,R,D} = (:apply,v,i)
+    LazyTensors.range_size(m::SizeDoublingMapping) = 2 .* m.domain_size
+    LazyTensors.domain_size(m::SizeDoublingMapping) = m.domain_size
 
 
-    m = DummyMapping{Int, 1, 1}()
+    m = SizeDoublingMapping{Int, 1, 1}((3,))
     v = [0,1,2]
     @test m*v isa AbstractVector{Int}
     @test size(m*v) == 2 .*size(v)
@@ -63,28 +59,31 @@
     @test_broken BoundsError == (m*m*v)[0]
     @test_broken BoundsError == (m*m*v)[7]
 
-    m = DummyMapping{Int, 2, 1}()
+    m = SizeDoublingMapping{Int, 2, 1}((3,))
     @test_throws MethodError m*ones(Int,2,2)
     @test_throws MethodError m*m*v
 
-    m = DummyMapping{Float64, 2, 2}()
+    m = SizeDoublingMapping{Float64, 2, 2}((3,3))
     v = ones(3,3)
     I = (Index{Lower}(1),Index{Interior}(2));
     @test size(m*v) == 2 .*size(v)
     @test (m*v)[I] == (:apply,v,I)
 
-    struct ScalingOperator{T,D} <: TensorOperator{T,D}
+    struct ScalingOperator{T,D} <: TensorMapping{T,D,D}
         λ::T
+        size::NTuple{D,Int}
     end
 
     LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I]
+    LazyTensors.range_size(m::ScalingOperator) = m.size
+    LazyTensors.domain_size(m::ScalingOperator) = m.size
 
-    m = ScalingOperator{Int,1}(2)
+    m = ScalingOperator{Int,1}(2,(3,))
     v = [1,2,3]
     @test m*v isa AbstractVector
     @test m*v == [2,4,6]
 
-    m = ScalingOperator{Int,2}(2)
+    m = ScalingOperator{Int,2}(2,(2,2))
     v = [[1 2];[3 4]]
     @test m*v == [[2 4];[6 8]]
     I = (Index{Upper}(2),Index{Lower}(1))
@@ -94,14 +93,16 @@
 @testset "TensorMapping binary operations" begin
     struct ScalarMapping{T,R,D} <: TensorMapping{T,R,D}
         λ::T
+        range_size::NTuple{R,Int}
+        domain_size::NTuple{D,Int}
     end
 
     LazyTensors.apply(m::ScalarMapping{T,R,D}, v, I::Vararg{Index{<:Region}}) where {T,R,D} = m.λ*v[I...]
-    LazyTensors.range_size(m::ScalarMapping, domain_size) = domain_size
-    LazyTensors.domain_size(m::ScalarMapping, range_sizes) = range_sizes
+    LazyTensors.range_size(m::ScalarMapping) = m.domain_size
+    LazyTensors.domain_size(m::ScalarMapping) = m.range_size
 
-    A = ScalarMapping{Float64,1,1}(2.0)
-    B = ScalarMapping{Float64,1,1}(3.0)
+    A = ScalarMapping{Float64,1,1}(2.0, (3,), (3,))
+    B = ScalarMapping{Float64,1,1}(3.0, (3,), (3,))
 
     v = [1.1,1.2,1.3]
     for i ∈ eachindex(v)
@@ -112,8 +113,8 @@
         @test ((A-B)*v)[i] == 2*v[i] - 3*v[i]
     end
 
-    @test range_size(A+B, (3,)) == range_size(A, (3,)) == range_size(B,(3,))
-    @test domain_size(A+B, (3,)) == domain_size(A, (3,)) == domain_size(B,(3,))
+    @test range_size(A+B) == range_size(A) == range_size(B)
+    @test domain_size(A+B) == domain_size(A) == domain_size(B)
 end
 
 @testset "LazyArray" begin
@@ -192,4 +193,4 @@
     @test_throws DimensionMismatch v1 + v2
 end
 
-end
\ No newline at end of file
+end
--- a/test/testSbpOperators.jl	Mon Sep 28 22:49:21 2020 +0200
+++ b/test/testSbpOperators.jl	Mon Sep 28 22:56:54 2020 +0200
@@ -33,7 +33,7 @@
     g = EquidistantGrid((101,), (0.0,), (L,))
     h_inv = inverse_spacing(g)
     h = 1/h_inv[1];
-    Dₓₓ = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity)
+    Dₓₓ = SecondDerivative(g,op.innerStencil,op.closureStencils)
 
     f0(x::Float64) = 1.
     f1(x::Float64) = x
@@ -50,7 +50,7 @@
     v4 = evalOn(g,f4)
     v5 = evalOn(g,f5)
 
-    @test Dₓₓ isa TensorOperator{T,1} where T
+    @test Dₓₓ isa TensorMapping{T,1,1} where T
     @test Dₓₓ' isa TensorMapping{T,1,1} where T
 
     # TODO: Should perhaps set tolerance level for isapporx instead?
@@ -77,12 +77,8 @@
     Lx = 1.5
     Ly = 3.2
     g = EquidistantGrid((102,131), (0.0, 0.0), (Lx,Ly))
+    L = Laplace(g, op.innerStencil, op.closureStencils)
 
-    h_inv = inverse_spacing(g)
-    h = spacing(g)
-    D_xx = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity)
-    D_yy = SecondDerivative(h_inv[2],op.innerStencil,op.closureStencils,op.parity)
-    L = Laplace((D_xx,D_yy))
 
     f0(x::Float64,y::Float64) = 2.
     f1(x::Float64,y::Float64) = x+y
@@ -100,7 +96,7 @@
     v5 = evalOn(g,f5)
     v5ₓₓ = evalOn(g,f5ₓₓ)
 
-    @test L isa TensorOperator{T,2} where T
+    @test L isa TensorMapping{T,2,2} where T
     @test L' isa TensorMapping{T,2,2} where T
 
     # TODO: Should perhaps set tolerance level for isapporx instead?
@@ -118,6 +114,8 @@
     @test all(abs.((collect(L*v3) - v1)) .<= equalitytol)
     e4 = collect(L*v4) - v2
     e5 = collect(L*v5) - v5ₓₓ
+
+    h = spacing(g)
     @test sqrt(prod(h)*sum(collect(e4.^2))) <= accuracytol
     @test sqrt(prod(h)*sum(collect(e5.^2))) <= accuracytol
 end
@@ -126,11 +124,10 @@
     op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt")
     L = 2.3
     g = EquidistantGrid((77,), (0.0,), (L,))
-    h = spacing(g)
-    H = DiagonalInnerProduct(h[1],op.quadratureClosure)
+    H = DiagonalInnerProduct(g,op.quadratureClosure)
     v = ones(Float64, size(g))
 
-    @test H isa TensorOperator{T,1} where T
+    @test H isa TensorMapping{T,1,1} where T
     @test H' isa TensorMapping{T,1,1} where T
     @test sum(collect(H*v)) ≈ L
     @test collect(H*v) == collect(H'*v)
@@ -142,14 +139,11 @@
     Ly = 5.2
     g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly))
 
-    h = spacing(g)
-    Hx = DiagonalInnerProduct(h[1],op.quadratureClosure);
-    Hy = DiagonalInnerProduct(h[2],op.quadratureClosure);
-    Q = Quadrature((Hx,Hy))
+    Q = Quadrature(g, op.quadratureClosure)
 
     v = ones(Float64, size(g))
 
-    @test Q isa TensorOperator{T,2} where T
+    @test Q isa TensorMapping{T,2,2} where T
     @test Q' isa TensorMapping{T,2,2} where T
     @test sum(collect(Q*v)) ≈ (Lx*Ly)
     @test collect(Q*v) == collect(Q'*v)
@@ -159,14 +153,11 @@
     op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt")
     L = 2.3
     g = EquidistantGrid((77,), (0.0,), (L,))
-    h = spacing(g)
-    H = DiagonalInnerProduct(h[1],op.quadratureClosure)
-
-    h_i = inverse_spacing(g)
-    Hi = InverseDiagonalInnerProduct(h_i[1],1 ./ op.quadratureClosure)
+    H = DiagonalInnerProduct(g, op.quadratureClosure)
+    Hi = InverseDiagonalInnerProduct(g,op.quadratureClosure)
     v = evalOn(g, x->sin(x))
 
-    @test Hi isa TensorOperator{T,1} where T
+    @test Hi isa TensorMapping{T,1,1} where T
     @test Hi' isa TensorMapping{T,1,1} where T
     @test collect(Hi*H*v)  ≈ v
     @test collect(Hi*v) == collect(Hi'*v)
@@ -178,20 +169,13 @@
     Ly = 8.2
     g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly))
 
-    h = spacing(g)
-    Hx = DiagonalInnerProduct(h[1], op.quadratureClosure);
-    Hy = DiagonalInnerProduct(h[2], op.quadratureClosure);
-    Q = Quadrature((Hx,Hy))
-
-    hi = inverse_spacing(g)
-    Hix = InverseDiagonalInnerProduct(hi[1], 1 ./ op.quadratureClosure);
-    Hiy = InverseDiagonalInnerProduct(hi[2], 1 ./ op.quadratureClosure);
-    Qinv = InverseQuadrature((Hix,Hiy))
+    Q = Quadrature(g, op.quadratureClosure)
+    Qinv = InverseQuadrature(g, op.quadratureClosure)
     v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y)
 
-    @test Qinv isa TensorOperator{T,2} where T
+    @test Qinv isa TensorMapping{T,2,2} where T
     @test Qinv' isa TensorMapping{T,2,2} where T
-    @test collect(Qinv*Q*v)  ≈ v
+    @test collect(Qinv*(Q*v)) ≈ v
     @test collect(Qinv*v) == collect(Qinv'*v)
 end
 #