changeset 1044:f857057e61e6 refactor/sbpoperators/inflation

Merge default
author Jonatan Werpers <jonatan@werpers.com>
date Tue, 22 Mar 2022 22:05:34 +0100
parents 5be17f647018 (current diff) c16116e403e2 (diff)
children 0e31b9901160 62f321caa964
files src/LazyTensors/lazy_tensor_operations.jl test/LazyTensors/lazy_tensor_operations_test.jl
diffstat 25 files changed, 556 insertions(+), 530 deletions(-) [+]
line wrap: on
line diff
--- a/Notes.md	Mon Mar 21 10:04:15 2022 +0100
+++ b/Notes.md	Tue Mar 22 22:05:34 2022 +0100
@@ -136,12 +136,9 @@
 
 ## Reasearch and thinking
  - [ ] Use a trait to indicate that a LazyTensor har the same range and domain?
- - [ ] Rename all the Tensor stuff to just LazyOperator, LazyApplication and so on?
  - [ ] Check how the native julia doc generator works
     - [ ] Check if Vidars design docs fit in there
  - [ ] Create a macro @lazy which replaces a binary op (+,-) by its lazy equivalent? Would be a neat way to indicate which evaluations are lazy without cluttering/confusing with special characters.
- - [ ] Specificera operatorer i TOML eller något liknande?
- H.. H_gamma etc.)
  - [ ] Dispatch on Lower() instead of the type Lower so `::Lower` instead of `::Type{Lower}` ???
  	Seems better unless there is some specific reason to use the type instead of the value.
  - [ ] How do we handle mixes of periodic and non-periodic grids? Seems it should be supported on the grid level and on the 1d operator level. Between there it should be transparent.
--- a/TODO.md	Mon Mar 21 10:04:15 2022 +0100
+++ b/TODO.md	Tue Mar 22 22:05:34 2022 +0100
@@ -10,10 +10,7 @@
  - [ ] Replace getindex hack for flattening tuples with flatten_tuple. (eg. `getindex.(range_size.(L.D2),1)`)
  - [ ] Use `@inferred` in a lot of tests.
  - [ ] Make sure we are setting tolerances in tests in a consistent way
- - [ ] Add check for correct domain sizes to lazy tensor operations using SizeMismatch
  - [ ] Write down some coding guideline or checklist for code conventions. For example i,j,... for indices and I for multi-index
- - [ ] Add boundschecking in LazyTensorApplication
- - [ ] Start renaming things in LazyTensors
  - [ ] Clean up RegionIndices
     1. [ ] Write tests for how things should work
     2. [ ] Update RegionIndices accordingly
@@ -23,7 +20,6 @@
  - [ ] Add custom pretty printing to LazyTensors/SbpOperators to enhance readability of e.g error messages.
        See (https://docs.julialang.org/en/v1/manual/types/#man-custom-pretty-printing)
  - [ ] Move export statements to top of each module
- - [ ] Add a type StencilSet for easier dispatch
 
 ## Repo
  - [ ] Rename repo to Sbplib.jl
--- a/src/LazyTensors/LazyTensors.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/LazyTensors/LazyTensors.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -1,12 +1,12 @@
 module LazyTensors
 
-export LazyTensorApplication
-export LazyTensorTranspose
-export LazyTensorComposition
-export LazyLinearMap
+export TensorApplication
+export TensorTranspose
+export TensorComposition
+export DenseTensor
 export IdentityTensor
 export ScalingTensor
-export InflatedLazyTensor
+export InflatedTensor
 export LazyOuterProduct
 export ⊗
 export DomainSizeMismatch
@@ -19,16 +19,16 @@
 include("tuple_manipulation.jl")
 
 # Applying lazy tensors to vectors
-Base.:*(a::LazyTensor, v::AbstractArray) = LazyTensorApplication(a,v)
+Base.:*(a::LazyTensor, v::AbstractArray) = TensorApplication(a,v)
 Base.:*(a::LazyTensor, b::LazyTensor) = throw(MethodError(Base.:*,(a,b)))
 Base.:*(a::LazyTensor, args::Union{LazyTensor, AbstractArray}...) = foldr(*,(a,args...))
 
 # Addition and subtraction of lazy tensors
-Base.:+(s::LazyTensor, t::LazyTensor) = LazyTensorBinaryOperation{:+}(s,t)
-Base.:-(s::LazyTensor, t::LazyTensor) = LazyTensorBinaryOperation{:-}(s,t)
+Base.:+(s::LazyTensor, t::LazyTensor) = ElementwiseTensorOperation{:+}(s,t)
+Base.:-(s::LazyTensor, t::LazyTensor) = ElementwiseTensorOperation{:-}(s,t)
 
 # Composing lazy tensors
-Base.:∘(s::LazyTensor, t::LazyTensor) = LazyTensorComposition(s,t)
+Base.:∘(s::LazyTensor, t::LazyTensor) = TensorComposition(s,t)
 
 # Outer products of tensors
 ⊗(a::LazyTensor, b::LazyTensor) = LazyOuterProduct(a,b)
--- a/src/LazyTensors/lazy_array.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/LazyTensors/lazy_array.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -36,7 +36,7 @@
 
 function Base.getindex(lfa::LazyFunctionArray{F,T,D}, I::Vararg{Int,D}) where {F,T,D}
     @boundscheck checkbounds(lfa, I...)
-    return lfa.f(I...)
+    return @inbounds lfa.f(I...)
 end
 
 
@@ -64,14 +64,14 @@
 
 Base.size(v::LazyElementwiseOperation) = size(v.a)
 
-evaluate(leo::LazyElementwiseOperation{T,D,:+}, I::Vararg{Int,D}) where {T,D} = @inbounds leo.a[I...] + leo.b[I...]
-evaluate(leo::LazyElementwiseOperation{T,D,:-}, I::Vararg{Int,D}) where {T,D} = @inbounds leo.a[I...] - leo.b[I...]
-evaluate(leo::LazyElementwiseOperation{T,D,:*}, I::Vararg{Int,D}) where {T,D} = @inbounds leo.a[I...] * leo.b[I...]
-evaluate(leo::LazyElementwiseOperation{T,D,:/}, I::Vararg{Int,D}) where {T,D} = @inbounds leo.a[I...] / leo.b[I...]
+Base.@propagate_inbounds evaluate(leo::LazyElementwiseOperation{T,D,:+}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] + leo.b[I...]
+Base.@propagate_inbounds evaluate(leo::LazyElementwiseOperation{T,D,:-}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] - leo.b[I...]
+Base.@propagate_inbounds evaluate(leo::LazyElementwiseOperation{T,D,:*}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] * leo.b[I...]
+Base.@propagate_inbounds evaluate(leo::LazyElementwiseOperation{T,D,:/}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] / leo.b[I...]
 
 function Base.getindex(leo::LazyElementwiseOperation{T,D}, I::Vararg{Int,D}) where {T,D}
     @boundscheck checkbounds(leo.a, I...)
-    return evaluate(leo, I...)
+    return @inbounds evaluate(leo, I...)
 end
 
 # Define lazy operations for AbstractArrays. Operations constructs a LazyElementwiseOperation which
--- a/src/LazyTensors/lazy_tensor_operations.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/LazyTensors/lazy_tensor_operations.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -1,17 +1,17 @@
 """
-    LazyTensorApplication{T,R,D} <: LazyArray{T,R}
+    TensorApplication{T,R,D} <: LazyArray{T,R}
 
 Struct for lazy application of a LazyTensor. Created using `*`.
 
 Allows the result of a `LazyTensor` applied to a vector to be treated as an `AbstractArray`.
-With a mapping `m` and a vector `v` the LazyTensorApplication object can be created by `m*v`.
+With a mapping `m` and a vector `v` the TensorApplication object can be created by `m*v`.
 The actual result will be calcualted when indexing into `m*v`.
 """
-struct LazyTensorApplication{T,R,D, TM<:LazyTensor{<:Any,R,D}, AA<:AbstractArray{<:Any,D}} <: LazyArray{T,R}
+struct TensorApplication{T,R,D, TM<:LazyTensor{<:Any,R,D}, AA<:AbstractArray{<:Any,D}} <: LazyArray{T,R}
     t::TM
     o::AA
 
-    function LazyTensorApplication(t::LazyTensor{<:Any,R,D}, o::AbstractArray{<:Any,D}) where {R,D}
+    function TensorApplication(t::LazyTensor{<:Any,R,D}, o::AbstractArray{<:Any,D}) where {R,D}
         @boundscheck check_domain_size(t, size(o))
         I = ntuple(i->1, range_dim(t))
         T = typeof(apply(t,o,I...))
@@ -19,16 +19,16 @@
     end
 end
 
-function Base.getindex(ta::LazyTensorApplication{T,R}, I::Vararg{Any,R}) where {T,R}
+function Base.getindex(ta::TensorApplication{T,R}, I::Vararg{Any,R}) where {T,R}
     @boundscheck checkbounds(ta, Int.(I)...)
-    return apply(ta.t, ta.o, I...)
+    return @inbounds apply(ta.t, ta.o, I...)
 end
-Base.getindex(ta::LazyTensorApplication{T,1} where T, I::CartesianIndex{1}) = ta[Tuple(I)...] # Would otherwise be caught in the previous method.
-Base.size(ta::LazyTensorApplication) = range_size(ta.t)
+Base.@propagate_inbounds Base.getindex(ta::TensorApplication{T,1} where T, I::CartesianIndex{1}) = ta[Tuple(I)...] # Would otherwise be caught in the previous method.
+Base.size(ta::TensorApplication) = range_size(ta.t)
 
 
 """
-    LazyTensorTranspose{T,R,D} <: LazyTensor{T,D,R}
+    TensorTranspose{T,R,D} <: LazyTensor{T,D,R}
 
 Struct for lazy transpose of a LazyTensor.
 
@@ -36,102 +36,102 @@
 the transpose of mapping `m` by using `m'`. `m'` will work as a regular LazyTensor lazily calling
 the appropriate methods of `m`.
 """
-struct LazyTensorTranspose{T,R,D, TM<:LazyTensor{T,R,D}} <: LazyTensor{T,D,R}
+struct TensorTranspose{T,R,D, TM<:LazyTensor{T,R,D}} <: LazyTensor{T,D,R}
     tm::TM
 end
 
 # # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors?
 # Jonatan 2020-09-25: Is the problem that you can take the transpose of any LazyTensor even if it doesn't implement `apply_transpose`?
-Base.adjoint(tm::LazyTensor) = LazyTensorTranspose(tm)
-Base.adjoint(tmt::LazyTensorTranspose) = tmt.tm
+Base.adjoint(tm::LazyTensor) = TensorTranspose(tm)
+Base.adjoint(tmt::TensorTranspose) = tmt.tm
 
-apply(tmt::LazyTensorTranspose{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...)
-apply_transpose(tmt::LazyTensorTranspose{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmt.tm, v, I...)
+apply(tmt::TensorTranspose{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...)
+apply_transpose(tmt::TensorTranspose{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmt.tm, v, I...)
 
-range_size(tmt::LazyTensorTranspose) = domain_size(tmt.tm)
-domain_size(tmt::LazyTensorTranspose) = range_size(tmt.tm)
+range_size(tmt::TensorTranspose) = domain_size(tmt.tm)
+domain_size(tmt::TensorTranspose) = range_size(tmt.tm)
 
 
-struct LazyTensorBinaryOperation{Op,T,R,D,T1<:LazyTensor{T,R,D},T2<:LazyTensor{T,R,D}} <: LazyTensor{T,D,R}
+struct ElementwiseTensorOperation{Op,T,R,D,T1<:LazyTensor{T,R,D},T2<:LazyTensor{T,R,D}} <: LazyTensor{T,D,R}
     tm1::T1
     tm2::T2
 
-    function LazyTensorBinaryOperation{Op,T,R,D}(tm1::T1,tm2::T2) where {Op,T,R,D, T1<:LazyTensor{T,R,D},T2<:LazyTensor{T,R,D}}
+    function ElementwiseTensorOperation{Op,T,R,D}(tm1::T1,tm2::T2) where {Op,T,R,D, T1<:LazyTensor{T,R,D},T2<:LazyTensor{T,R,D}}
         @boundscheck check_domain_size(tm2, domain_size(tm1))
         @boundscheck check_range_size(tm2, range_size(tm1))
         return new{Op,T,R,D,T1,T2}(tm1,tm2)
     end
 end
 
-LazyTensorBinaryOperation{Op}(s,t) where Op = LazyTensorBinaryOperation{Op,eltype(s), range_dim(s), domain_dim(s)}(s,t)
+ElementwiseTensorOperation{Op}(s,t) where Op = ElementwiseTensorOperation{Op,eltype(s), range_dim(s), domain_dim(s)}(s,t)
 
-apply(tmBinOp::LazyTensorBinaryOperation{:+,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...)
-apply(tmBinOp::LazyTensorBinaryOperation{:-,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...)
+apply(tmBinOp::ElementwiseTensorOperation{:+,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...)
+apply(tmBinOp::ElementwiseTensorOperation{:-,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...)
 
-range_size(tmBinOp::LazyTensorBinaryOperation) = range_size(tmBinOp.tm1)
-domain_size(tmBinOp::LazyTensorBinaryOperation) = domain_size(tmBinOp.tm1)
+range_size(tmBinOp::ElementwiseTensorOperation) = range_size(tmBinOp.tm1)
+domain_size(tmBinOp::ElementwiseTensorOperation) = domain_size(tmBinOp.tm1)
 
 
 """
-    LazyTensorComposition{T,R,K,D}
+    TensorComposition{T,R,K,D}
 
 Lazily compose two `LazyTensor`s, so that they can be handled as a single `LazyTensor`.
 """
-struct LazyTensorComposition{T,R,K,D, TM1<:LazyTensor{T,R,K}, TM2<:LazyTensor{T,K,D}} <: LazyTensor{T,R,D}
+struct TensorComposition{T,R,K,D, TM1<:LazyTensor{T,R,K}, TM2<:LazyTensor{T,K,D}} <: LazyTensor{T,R,D}
     t1::TM1
     t2::TM2
 
-    function LazyTensorComposition(t1::LazyTensor{T,R,K}, t2::LazyTensor{T,K,D}) where {T,R,K,D}
+    function TensorComposition(t1::LazyTensor{T,R,K}, t2::LazyTensor{T,K,D}) where {T,R,K,D}
         @boundscheck check_domain_size(t1, range_size(t2))
         return new{T,R,K,D, typeof(t1), typeof(t2)}(t1,t2)
     end
 end
 
-range_size(tm::LazyTensorComposition) = range_size(tm.t1)
-domain_size(tm::LazyTensorComposition) = domain_size(tm.t2)
+range_size(tm::TensorComposition) = range_size(tm.t1)
+domain_size(tm::TensorComposition) = domain_size(tm.t2)
 
-function apply(c::LazyTensorComposition{T,R,K,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,K,D}
+function apply(c::TensorComposition{T,R,K,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,K,D}
     apply(c.t1, c.t2*v, I...)
 end
 
-function apply_transpose(c::LazyTensorComposition{T,R,K,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,K,D}
+function apply_transpose(c::TensorComposition{T,R,K,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,K,D}
     apply_transpose(c.t2, c.t1'*v, I...)
 end
 
 
 """
-    LazyTensorComposition(tm, tmi::IdentityTensor)
-    LazyTensorComposition(tmi::IdentityTensor, tm)
+    TensorComposition(tm, tmi::IdentityTensor)
+    TensorComposition(tmi::IdentityTensor, tm)
 
 Composes a `Tensormapping` `tm` with an `IdentityTensor` `tmi`, by returning `tm`
 """
-function LazyTensorComposition(tm::LazyTensor{T,R,D}, tmi::IdentityTensor{T,D}) where {T,R,D}
+function TensorComposition(tm::LazyTensor{T,R,D}, tmi::IdentityTensor{T,D}) where {T,R,D}
     @boundscheck check_domain_size(tm, range_size(tmi))
     return tm
 end
 
-function LazyTensorComposition(tmi::IdentityTensor{T,R}, tm::LazyTensor{T,R,D}) where {T,R,D}
+function TensorComposition(tmi::IdentityTensor{T,R}, tm::LazyTensor{T,R,D}) where {T,R,D}
     @boundscheck check_domain_size(tmi, range_size(tm))
     return tm
 end
 # Specialization for the case where tm is an IdentityTensor. Required to resolve ambiguity.
-function LazyTensorComposition(tm::IdentityTensor{T,D}, tmi::IdentityTensor{T,D}) where {T,D}
+function TensorComposition(tm::IdentityTensor{T,D}, tmi::IdentityTensor{T,D}) where {T,D}
     @boundscheck check_domain_size(tm, range_size(tmi))
     return tmi
 end
 
 
 """
-    InflatedLazyTensor{T,R,D} <: LazyTensor{T,R,D}
+    InflatedTensor{T,R,D} <: LazyTensor{T,R,D}
 
 An inflated `LazyTensor` with dimensions added before and afer its actual dimensions.
 """
-struct InflatedLazyTensor{T,R,D,D_before,R_middle,D_middle,D_after, TM<:LazyTensor{T,R_middle,D_middle}} <: LazyTensor{T,R,D}
+struct InflatedTensor{T,R,D,D_before,R_middle,D_middle,D_after, TM<:LazyTensor{T,R_middle,D_middle}} <: LazyTensor{T,R,D}
     before::IdentityTensor{T,D_before}
     tm::TM
     after::IdentityTensor{T,D_after}
 
-    function InflatedLazyTensor(before, tm::LazyTensor{T}, after) where T
+    function InflatedTensor(before, tm::LazyTensor{T}, after) where T
         R_before = range_dim(before)
         R_middle = range_dim(tm)
         R_after = range_dim(after)
@@ -146,35 +146,35 @@
 end
 
 """
-    InflatedLazyTensor(before, tm, after)
-    InflatedLazyTensor(before,tm)
-    InflatedLazyTensor(tm,after)
+    InflatedTensor(before, tm, after)
+    InflatedTensor(before,tm)
+    InflatedTensor(tm,after)
 
 The outer product of `before`, `tm` and `after`, where `before` and `after` are `IdentityTensor`s.
 
 If one of `before` or `after` is left out, a 0-dimensional `IdentityTensor` is used as the default value.
 
-If `tm` already is an `InflatedLazyTensor`, `before` and `after` will be extended instead of
-creating a nested `InflatedLazyTensor`.
+If `tm` already is an `InflatedTensor`, `before` and `after` will be extended instead of
+creating a nested `InflatedTensor`.
 """
-InflatedLazyTensor(::IdentityTensor, ::LazyTensor, ::IdentityTensor)
+InflatedTensor(::IdentityTensor, ::LazyTensor, ::IdentityTensor)
 
-function InflatedLazyTensor(before, itm::InflatedLazyTensor, after)
-    return InflatedLazyTensor(
+function InflatedTensor(before, itm::InflatedTensor, after)
+    return InflatedTensor(
         IdentityTensor(before.size...,  itm.before.size...),
         itm.tm,
         IdentityTensor(itm.after.size..., after.size...),
     )
 end
 
-InflatedLazyTensor(before::IdentityTensor, tm::LazyTensor{T}) where T = InflatedLazyTensor(before,tm,IdentityTensor{T}())
-InflatedLazyTensor(tm::LazyTensor{T}, after::IdentityTensor) where T = InflatedLazyTensor(IdentityTensor{T}(),tm,after)
+InflatedTensor(before::IdentityTensor, tm::LazyTensor{T}) where T = InflatedTensor(before,tm,IdentityTensor{T}())
+InflatedTensor(tm::LazyTensor{T}, after::IdentityTensor) where T = InflatedTensor(IdentityTensor{T}(),tm,after)
 # Resolve ambiguity between the two previous methods
-InflatedLazyTensor(I1::IdentityTensor{T}, I2::IdentityTensor{T}) where T = InflatedLazyTensor(I1,I2,IdentityTensor{T}())
+InflatedTensor(I1::IdentityTensor{T}, I2::IdentityTensor{T}) where T = InflatedTensor(I1,I2,IdentityTensor{T}())
 
-# TODO: Implement some pretty printing in terms of ⊗. E.g InflatedLazyTensor(I(3),B,I(2)) -> I(3)⊗B⊗I(2)
+# TODO: Implement some pretty printing in terms of ⊗. E.g InflatedTensor(I(3),B,I(2)) -> I(3)⊗B⊗I(2)
 
-function range_size(itm::InflatedLazyTensor)
+function range_size(itm::InflatedTensor)
     return flatten_tuple(
         range_size(itm.before),
         range_size(itm.tm),
@@ -182,7 +182,7 @@
     )
 end
 
-function domain_size(itm::InflatedLazyTensor)
+function domain_size(itm::InflatedTensor)
     return flatten_tuple(
         domain_size(itm.before),
         domain_size(itm.tm),
@@ -190,7 +190,7 @@
     )
 end
 
-function apply(itm::InflatedLazyTensor{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D}
+function apply(itm::InflatedTensor{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D}
     dim_before = range_dim(itm.before)
     dim_domain = domain_dim(itm.tm)
     dim_range = range_dim(itm.tm)
@@ -202,7 +202,7 @@
     return apply(itm.tm, v_inner, inner_index...)
 end
 
-function apply_transpose(itm::InflatedLazyTensor{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D}
+function apply_transpose(itm::InflatedTensor{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D}
     dim_before = range_dim(itm.before)
     dim_domain = domain_dim(itm.tm)
     dim_range = range_dim(itm.tm)
@@ -218,7 +218,7 @@
 @doc raw"""
     LazyOuterProduct(tms...)
 
-Creates a `LazyTensorComposition` for the outerproduct of `tms...`.
+Creates a `TensorComposition` for the outerproduct of `tms...`.
 This is done by separating the outer product into regular products of outer products involving only identity mappings and one non-identity mapping.
 
 First let
@@ -255,15 +255,15 @@
 function LazyOuterProduct end
 
 function LazyOuterProduct(tm1::LazyTensor{T}, tm2::LazyTensor{T}) where T
-    itm1 = InflatedLazyTensor(tm1, IdentityTensor{T}(range_size(tm2)))
-    itm2 = InflatedLazyTensor(IdentityTensor{T}(domain_size(tm1)),tm2)
+    itm1 = InflatedTensor(tm1, IdentityTensor{T}(range_size(tm2)))
+    itm2 = InflatedTensor(IdentityTensor{T}(domain_size(tm1)),tm2)
 
     return itm1∘itm2
 end
 
 LazyOuterProduct(t1::IdentityTensor{T}, t2::IdentityTensor{T}) where T = IdentityTensor{T}(t1.size...,t2.size...)
-LazyOuterProduct(t1::LazyTensor, t2::IdentityTensor) = InflatedLazyTensor(t1, t2)
-LazyOuterProduct(t1::IdentityTensor, t2::LazyTensor) = InflatedLazyTensor(t1, t2)
+LazyOuterProduct(t1::LazyTensor, t2::IdentityTensor) = InflatedTensor(t1, t2)
+LazyOuterProduct(t1::IdentityTensor, t2::LazyTensor) = InflatedTensor(t1, t2)
 
 LazyOuterProduct(tms::Vararg{LazyTensor}) = foldl(LazyOuterProduct, tms)
 
--- a/src/LazyTensors/tensor_types.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/LazyTensors/tensor_types.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -2,7 +2,7 @@
     IdentityTensor{T,D} <: LazyTensor{T,D,D}
 
 The lazy identity LazyTensor for a given size. Usefull for building up higher dimensional tensor mappings from lower
-dimensional ones through outer products. Also used in the Implementation for InflatedLazyTensor.
+dimensional ones through outer products. Also used in the Implementation for InflatedTensor.
 """
 struct IdentityTensor{T,D} <: LazyTensor{T,D,D}
     size::NTuple{D,Int}
@@ -37,20 +37,20 @@
 
 
 """
-    LazyLinearMap{T,R,D,...}(A, range_indicies, domain_indicies)
+    DenseTensor{T,R,D,...}(A, range_indicies, domain_indicies)
 
 LazyTensor defined by the AbstractArray A. `range_indicies` and `domain_indicies` define which indicies of A should
 be considerd the range and domain of the LazyTensor. Each set of indices must be ordered in ascending order.
 
-For instance, if A is a m x n matrix, and range_size = (1,), domain_size = (2,), then the LazyLinearMap performs the
+For instance, if A is a m x n matrix, and range_size = (1,), domain_size = (2,), then the DenseTensor performs the
 standard matrix-vector product on vectors of size n.
 """
-struct LazyLinearMap{T,R,D, RD, AA<:AbstractArray{T,RD}} <: LazyTensor{T,R,D}
+struct DenseTensor{T,R,D, RD, AA<:AbstractArray{T,RD}} <: LazyTensor{T,R,D}
     A::AA
     range_indicies::NTuple{R,Int}
     domain_indicies::NTuple{D,Int}
 
-    function LazyLinearMap(A::AA, range_indicies::NTuple{R,Int}, domain_indicies::NTuple{D,Int}) where {T,R,D, RD, AA<:AbstractArray{T,RD}}
+    function DenseTensor(A::AA, range_indicies::NTuple{R,Int}, domain_indicies::NTuple{D,Int}) where {T,R,D, RD, AA<:AbstractArray{T,RD}}
         if !issorted(range_indicies) || !issorted(domain_indicies)
             throw(DomainError("range_indicies and domain_indicies must be sorted in ascending order"))
         end
@@ -59,10 +59,10 @@
     end
 end
 
-range_size(llm::LazyLinearMap) = size(llm.A)[[llm.range_indicies...]]
-domain_size(llm::LazyLinearMap) = size(llm.A)[[llm.domain_indicies...]]
+range_size(llm::DenseTensor) = size(llm.A)[[llm.range_indicies...]]
+domain_size(llm::DenseTensor) = size(llm.A)[[llm.domain_indicies...]]
 
-function apply(llm::LazyLinearMap{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D}
+function apply(llm::DenseTensor{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D}
     view_index = ntuple(i->:,ndims(llm.A))
     for i ∈ 1:R
         view_index = Base.setindex(view_index, Int(I[i]), llm.range_indicies[i])
@@ -71,6 +71,6 @@
     return sum(A_view.*v)
 end
 
-function apply_transpose(llm::LazyLinearMap{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D}
-    apply(LazyLinearMap(llm.A, llm.domain_indicies, llm.range_indicies), v, I...)
+function apply_transpose(llm::DenseTensor{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D}
+    apply(DenseTensor(llm.A, llm.domain_indicies, llm.range_indicies), v, I...)
 end
--- a/src/SbpOperators/SbpOperators.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/SbpOperators/SbpOperators.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -1,5 +1,15 @@
 module SbpOperators
 
+# Stencil set
+export StencilSet
+export read_stencil_set
+export get_stencil_set
+export parse_stencil
+export parse_scalar
+export parse_tuple
+export sbp_operators_path
+
+# Operators
 export boundary_quadrature
 export boundary_restriction
 export inner_product
@@ -20,7 +30,7 @@
 end
 
 include("stencil.jl")
-include("readoperator.jl")
+include("stencil_set.jl")
 include("volumeops/volume_operator.jl")
 include("volumeops/constant_interior_scaling_operator.jl")
 include("volumeops/derivatives/first_derivative.jl")
--- a/src/SbpOperators/boundaryops/boundary_restriction.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/SbpOperators/boundaryops/boundary_restriction.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -1,6 +1,3 @@
-# TODO: The type parameter closure_stencil::Stencil is required since there isnt any suitable type
-# for stencil_set. We should consider adding type ::StencilSet and dispatch on that instead.
-# The same goes for other operators
 """
     boundary_restriction(grid, closure_stencil::Stencil, boundary)
 
@@ -13,7 +10,7 @@
 
 See also: [`boundary_operator`](@ref).
 """
-function boundary_restriction(grid, closure_stencil::Stencil, boundary)
+function boundary_restriction(grid, closure_stencil, boundary)
     converted_stencil = convert(Stencil{eltype(grid)}, closure_stencil)
     return SbpOperators.boundary_operator(grid, converted_stencil, boundary)
 end
@@ -21,7 +18,6 @@
 """
     boundary_restriction(grid, stencil_set, boundary)
 
-Creates a `boundary_restriction` operator on `grid` given a parsed TOML
-`stencil_set`.
+Creates a `boundary_restriction` operator on `grid` given a `stencil_set`.
 """
-boundary_restriction(grid, stencil_set, boundary) = boundary_restriction(grid, parse_stencil(stencil_set["e"]["closure"]), boundary)
+boundary_restriction(grid, stencil_set::StencilSet, boundary) = boundary_restriction(grid, parse_stencil(stencil_set["e"]["closure"]), boundary)
--- a/src/SbpOperators/boundaryops/normal_derivative.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/SbpOperators/boundaryops/normal_derivative.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -10,7 +10,7 @@
 
 See also: [`boundary_operator`](@ref).
 """
-function normal_derivative(grid, closure_stencil::Stencil, boundary)
+function normal_derivative(grid, closure_stencil, boundary)
     direction = dim(boundary)
     h_inv = inverse_spacing(grid)[direction]
     return SbpOperators.boundary_operator(grid, scale(closure_stencil,h_inv), boundary)
@@ -19,7 +19,6 @@
 """
     normal_derivative(grid, stencil_set, boundary)
 
-Creates a `normal_derivative` operator on `grid` given a parsed TOML
-`stencil_set`.
+Creates a `normal_derivative` operator on `grid` given a `stencil_set`.
 """
-normal_derivative(grid, stencil_set, boundary) = normal_derivative(grid, parse_stencil(stencil_set["d1"]["closure"]), boundary)
+normal_derivative(grid, stencil_set::StencilSet, boundary) = normal_derivative(grid, parse_stencil(stencil_set["d1"]["closure"]), boundary)
--- a/src/SbpOperators/readoperator.jl	Mon Mar 21 10:04:15 2022 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,160 +0,0 @@
-using TOML
-
-export read_stencil_set
-export get_stencil_set
-
-export parse_stencil
-export parse_scalar
-export parse_tuple
-
-export sbp_operators_path
-
-
-"""
-    read_stencil_set(filename; filters)
-
-Picks out a stencil set from a TOML file based on some key-value
-filters. If more than one set matches the filters an error is raised. The
-returned stencil set contains parsed TOML intended for functions like
-`parse_scalar` and `parse_stencil`.
-
-The stencil set is not parsed beyond the inital TOML parse. To get usable
-stencils use the `parse_stencil` functions on the fields of the stencil set.
-
-The reason for this is that since stencil sets are intended to be very
-general, and currently do not include any way to specify how to parse a given
-section, the exact parsing is left to the user.
-
-For more information see [Operator file format](@ref) in the documentation.
-
-See also [`sbp_operators_path`](@ref), [`get_stencil_set`](@ref), [`parse_stencil`](@ref), [`parse_scalar`](@ref), [`parse_tuple`](@ref),.
-"""
-read_stencil_set(filename; filters...) = get_stencil_set(TOML.parsefile(filename); filters...)
-
-"""
-    get_stencil_set(parsed_toml; filters...)
-
-Picks out a stencil set from an already parsed TOML based on some key-value
-filters.
-
-See also [`read_stencil_set`](@ref).
-"""
-function get_stencil_set(parsed_toml; filters...)
-    matches = findall(parsed_toml["stencil_set"]) do set
-        for (key, val) ∈ filters
-            if set[string(key)] != val
-                return false
-            end
-        end
-
-        return true
-    end
-
-    if length(matches) != 1
-        throw(ArgumentError("filters must pick out a single stencil set"))
-    end
-
-    i = matches[1]
-    return parsed_toml["stencil_set"][i]
-end
-
-"""
-    parse_stencil(parsed_toml)
-
-Accepts parsed TOML and reads it as a stencil.
-
-See also [`read_stencil_set`](@ref), [`parse_scalar`](@ref), [`parse_tuple`](@ref).
-"""
-function parse_stencil(parsed_toml)
-    check_stencil_toml(parsed_toml)
-
-    if parsed_toml isa Array
-        weights = parse_rational.(parsed_toml)
-        return CenteredStencil(weights...)
-    end
-
-    weights = parse_rational.(parsed_toml["s"])
-    return Stencil(weights..., center = parsed_toml["c"])
-end
-
-"""
-    parse_stencil(T, parsed_toml)
-
-Parses the input as a stencil with element type `T`.
-"""
-parse_stencil(T, parsed_toml) = Stencil{T}(parse_stencil(parsed_toml))
-
-function check_stencil_toml(parsed_toml)
-    if !(parsed_toml isa Dict || parsed_toml isa Vector{String})
-        throw(ArgumentError("the TOML for a stencil must be a vector of strings or a table."))
-    end
-
-    if parsed_toml isa Vector{String}
-        return
-    end
-
-    if !(haskey(parsed_toml, "s") && haskey(parsed_toml, "c"))
-        throw(ArgumentError("the table form of a stencil must have fields `s` and `c`."))
-    end
-
-    if !(parsed_toml["s"] isa Vector{String})
-        throw(ArgumentError("a stencil must be specified as a vector of strings."))
-    end
-
-    if !(parsed_toml["c"] isa Int)
-        throw(ArgumentError("the center of a stencil must be specified as an integer."))
-    end
-end
-
-"""
-    parse_scalar(parsed_toml)
-
-Parse a scalar, represented as a string or a number in the TOML, and return it as a `Rational`
-
-See also [`read_stencil_set`](@ref), [`parse_stencil`](@ref) [`parse_tuple`](@ref).
-"""
-function parse_scalar(parsed_toml)
-    try
-        return parse_rational(parsed_toml)
-    catch e
-        throw(ArgumentError("must be a number or a string representing a number."))
-    end
-end
-
-"""
-    parse_tuple(parsed_toml)
-
-Parse an array as a tuple of scalars.
-
-See also [`read_stencil_set`](@ref), [`parse_stencil`](@ref), [`parse_scalar`](@ref).
-"""
-function parse_tuple(parsed_toml)
-    if !(parsed_toml isa Array)
-        throw(ArgumentError("argument must be an array"))
-    end
-    return Tuple(parse_scalar.(parsed_toml))
-end
-
-
-"""
-    parse_rational(parsed_toml)
-
-Parse a string or a number as a rational.
-"""
-function parse_rational(parsed_toml)
-    if parsed_toml isa String
-        expr = Meta.parse(replace(parsed_toml, "/"=>"//"))
-        return eval(:(Rational($expr)))
-    else
-        return Rational(parsed_toml)
-    end
-end
-
-"""
-    sbp_operators_path()
-
-Calculate the path for the operators folder with included stencil sets.
-
-See also [`read_stencil_set`](@ref)
-"""
-sbp_operators_path() = (@__DIR__) * "/operators/"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/src/SbpOperators/stencil_set.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -0,0 +1,164 @@
+using TOML
+
+
+"""
+    StencilSet
+
+A `StencilSet` contains a set of associated stencils. The stencils
+are are stored in a table, and can be accesed by indexing into the `StencilSet`.
+"""
+struct StencilSet
+    table
+end
+Base.getindex(set::StencilSet,I...) = set.table[I...]
+
+
+"""
+read_stencil_set(filename; filters)
+
+Creates a `StencilSet` from a TOML file based on some key-value
+filters. If more than one set matches the filters an error is raised. The
+table of the `StencilSet` is a parsed TOML intended for functions like
+`parse_scalar` and `parse_stencil`.
+
+The `StencilSet` table is not parsed beyond the inital TOML parse. To get usable
+stencils use the `parse_stencil` functions on the fields of the stencil set.
+
+The reason for this is that since stencil sets are intended to be very
+general, and currently do not include any way to specify how to parse a given
+section, the exact parsing is left to the user.
+
+For more information see [Operator file format](@ref) in the documentation.
+
+See also [`StencilSet`](@ref), [`sbp_operators_path`](@ref), [`get_stencil_set`](@ref), [`parse_stencil`](@ref), [`parse_scalar`](@ref), [`parse_tuple`](@ref).
+"""
+read_stencil_set(filename; filters...) = StencilSet(get_stencil_set(TOML.parsefile(filename); filters...))
+
+
+"""
+    get_stencil_set(parsed_toml; filters...)
+
+Picks out a stencil set from an already parsed TOML based on some key-value
+filters.
+
+See also [`read_stencil_set`](@ref).
+"""
+function get_stencil_set(parsed_toml; filters...)
+    matches = findall(parsed_toml["stencil_set"]) do set
+        for (key, val) ∈ filters
+            if set[string(key)] != val
+                return false
+            end
+        end
+
+        return true
+    end
+
+    if length(matches) != 1
+        throw(ArgumentError("filters must pick out a single stencil set"))
+    end
+
+    i = matches[1]
+    return parsed_toml["stencil_set"][i]
+end
+
+"""
+    parse_stencil(parsed_toml)
+
+Accepts parsed TOML and reads it as a stencil.
+
+See also [`read_stencil_set`](@ref), [`parse_scalar`](@ref), [`parse_tuple`](@ref).
+"""
+function parse_stencil(parsed_toml)
+    check_stencil_toml(parsed_toml)
+
+    if parsed_toml isa Array
+        weights = parse_rational.(parsed_toml)
+        return CenteredStencil(weights...)
+    end
+
+    weights = parse_rational.(parsed_toml["s"])
+    return Stencil(weights..., center = parsed_toml["c"])
+end
+
+"""
+    parse_stencil(T, parsed_toml)
+
+Parses the input as a stencil with element type `T`.
+"""
+parse_stencil(T, parsed_toml) = Stencil{T}(parse_stencil(parsed_toml))
+
+function check_stencil_toml(parsed_toml)
+    if !(parsed_toml isa Dict || parsed_toml isa Vector{String})
+        throw(ArgumentError("the TOML for a stencil must be a vector of strings or a table."))
+    end
+
+    if parsed_toml isa Vector{String}
+        return
+    end
+
+    if !(haskey(parsed_toml, "s") && haskey(parsed_toml, "c"))
+        throw(ArgumentError("the table form of a stencil must have fields `s` and `c`."))
+    end
+
+    if !(parsed_toml["s"] isa Vector{String})
+        throw(ArgumentError("a stencil must be specified as a vector of strings."))
+    end
+
+    if !(parsed_toml["c"] isa Int)
+        throw(ArgumentError("the center of a stencil must be specified as an integer."))
+    end
+end
+
+"""
+    parse_scalar(parsed_toml)
+
+Parse a scalar, represented as a string or a number in the TOML, and return it as a `Rational`
+
+See also [`read_stencil_set`](@ref), [`parse_stencil`](@ref) [`parse_tuple`](@ref).
+"""
+function parse_scalar(parsed_toml)
+    try
+        return parse_rational(parsed_toml)
+    catch e
+        throw(ArgumentError("must be a number or a string representing a number."))
+    end
+end
+
+"""
+    parse_tuple(parsed_toml)
+
+Parse an array as a tuple of scalars.
+
+See also [`read_stencil_set`](@ref), [`parse_stencil`](@ref), [`parse_scalar`](@ref).
+"""
+function parse_tuple(parsed_toml)
+    if !(parsed_toml isa Array)
+        throw(ArgumentError("argument must be an array"))
+    end
+    return Tuple(parse_scalar.(parsed_toml))
+end
+
+
+"""
+    parse_rational(parsed_toml)
+
+Parse a string or a number as a rational.
+"""
+function parse_rational(parsed_toml)
+    if parsed_toml isa String
+        expr = Meta.parse(replace(parsed_toml, "/"=>"//"))
+        return eval(:(Rational($expr)))
+    else
+        return Rational(parsed_toml)
+    end
+end
+
+"""
+    sbp_operators_path()
+
+Calculate the path for the operators folder with included stencil sets.
+
+See also [`StencilSet`](@ref), [`read_stencil_set`](@ref).
+"""
+sbp_operators_path() = (@__DIR__) * "/operators/"
--- a/src/SbpOperators/volumeops/derivatives/first_derivative.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/SbpOperators/volumeops/derivatives/first_derivative.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -16,16 +16,31 @@
     h_inv = inverse_spacing(grid)[direction]
     return SbpOperators.volume_operator(grid, scale(inner_stencil,h_inv), scale.(closure_stencils,h_inv), odd, direction)
 end
-first_derivative(grid::EquidistantGrid{1}, inner_stencil::Stencil, closure_stencils) = first_derivative(grid,inner_stencil,closure_stencils,1)
+
 
 """
-    first_derivative(grid, stencil_set, direction)
+    first_derivative(grid, inner_stencil, closure_stencils)
+
+Creates a `first_derivative` operator on a 1D `grid` given `inner_stencil` and `closure_stencils`.
+"""
+first_derivative(grid::EquidistantGrid{1}, inner_stencil::Stencil, closure_stencils) = first_derivative(grid, inner_stencil, closure_stencils, 1)
+
 
-Creates a `first_derivative` operator on `grid` along coordinate dimension `direction` given a parsed TOML
-`stencil_set`.
 """
-function first_derivative(grid::EquidistantGrid, stencil_set, direction)
+    first_derivative(grid, stencil_set::StencilSet, direction)
+
+Creates a `first_derivative` operator on `grid` along coordinate dimension `direction` given a `stencil_set`.
+"""
+function first_derivative(grid::EquidistantGrid, stencil_set::StencilSet, direction)
     inner_stencil = parse_stencil(stencil_set["D1"]["inner_stencil"])
     closure_stencils = parse_stencil.(stencil_set["D1"]["closure_stencils"])
     first_derivative(grid,inner_stencil,closure_stencils,direction);
 end
+
+
+"""
+    first_derivative(grid, stencil_set)
+
+Creates a `first_derivative` operator on a 1D `grid` given a `stencil_set`.
+"""
+first_derivative(grid::EquidistantGrid{1}, stencil_set::StencilSet) = first_derivative(grid, stencil_set, 1)
--- a/src/SbpOperators/volumeops/derivatives/second_derivative.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/SbpOperators/volumeops/derivatives/second_derivative.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -16,16 +16,31 @@
     h_inv = inverse_spacing(grid)[direction]
     return SbpOperators.volume_operator(grid, scale(inner_stencil,h_inv^2), scale.(closure_stencils,h_inv^2), even, direction)
 end
-second_derivative(grid::EquidistantGrid{1}, inner_stencil::Stencil, closure_stencils) = second_derivative(grid,inner_stencil,closure_stencils,1)
+
+
+"""
+    second_derivative(grid, inner_stencil, closure_stencils)
+
+Creates a `second_derivative` operator on a 1D `grid` given `inner_stencil` and `closure_stencils`.
+"""
+second_derivative(grid::EquidistantGrid{1}, inner_stencil::Stencil, closure_stencils) = second_derivative(grid, inner_stencil, closure_stencils,1)
+
 
 """
     second_derivative(grid, stencil_set, direction)
 
-Creates a `second_derivative` operator on `grid` along coordinate dimension `direction` given a parsed TOML
-`stencil_set`.
+Creates a `second_derivative` operator on `grid` along coordinate dimension `direction` given a `stencil_set`.
 """
-function second_derivative(grid::EquidistantGrid, stencil_set, direction)
+function second_derivative(grid::EquidistantGrid, stencil_set::StencilSet, direction)
     inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"])
     closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"])
     second_derivative(grid,inner_stencil,closure_stencils,direction);
-end 
+end
+
+
+"""
+    second_derivative(grid, stencil_set)
+
+Creates a `second_derivative` operator on a 1D `grid` given a `stencil_set`.
+"""
+second_derivative(grid::EquidistantGrid{1}, stencil_set::StencilSet) = second_derivative(grid, stencil_set, 1)
\ No newline at end of file
--- a/src/SbpOperators/volumeops/inner_products/inner_product.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/SbpOperators/volumeops/inner_products/inner_product.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -37,10 +37,9 @@
 """
     inner_product(grid, stencil_set)
 
-Creates a `inner_product` operator on `grid` given a parsed TOML
-`stencil_set`.
+Creates a `inner_product` operator on `grid` given a `stencil_set`.
 """
-function inner_product(grid, stencil_set)
+function inner_product(grid, stencil_set::StencilSet)
     inner_stencil = parse_scalar(stencil_set["H"]["inner"])
     closure_stencils = parse_tuple(stencil_set["H"]["closure"])
     return inner_product(grid, inner_stencil, closure_stencils)
--- a/src/SbpOperators/volumeops/inner_products/inverse_inner_product.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/SbpOperators/volumeops/inner_products/inverse_inner_product.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -33,10 +33,9 @@
 """
     inverse_inner_product(grid, stencil_set)
 
-Creates a `inverse_inner_product` operator on `grid` given a parsed TOML
-`stencil_set`.
+Creates a `inverse_inner_product` operator on `grid` given a `stencil_set`.
 """
-function inverse_inner_product(grid, stencil_set)
+function inverse_inner_product(grid, stencil_set::StencilSet)
     inner_stencil = parse_scalar(stencil_set["H"]["inner"])
     closure_stencils = parse_tuple(stencil_set["H"]["closure"])
     return inverse_inner_product(grid, inner_stencil, closure_stencils)
--- a/src/SbpOperators/volumeops/laplace/laplace.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/src/SbpOperators/volumeops/laplace/laplace.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -2,21 +2,22 @@
     Laplace{T, Dim, TM} <: LazyTensor{T, Dim, Dim}
 
 Implements the Laplace operator, approximating ∑d²/xᵢ² , i = 1,...,`Dim` as a
-`LazyTensor`. Additionally `Laplace` stores the stencil set (parsed from TOML)
-used to construct the `LazyTensor`.
+`LazyTensor`. Additionally `Laplace` stores the `StencilSet`
+used to construct the `LazyTensor `.
 """
 struct Laplace{T, Dim, TM<:LazyTensor{T, Dim, Dim}} <: LazyTensor{T, Dim, Dim}
     D::TM       # Difference operator
-    stencil_set # Stencil set of the operator
+    stencil_set::StencilSet # Stencil set of the operator
 end
 
 """
     Laplace(grid::Equidistant, stencil_set)
 
-Creates the `Laplace` operator `Δ` on `grid` given a parsed TOML
-`stencil_set`. See also [`laplace`](@ref).
+Creates the `Laplace` operator `Δ` on `grid` given a `stencil_set`. 
+
+See also [`laplace`](@ref).
 """
-function Laplace(grid::EquidistantGrid, stencil_set)
+function Laplace(grid::EquidistantGrid, stencil_set::StencilSet)
     inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"])
     closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"])
     Δ = laplace(grid, inner_stencil,closure_stencils)
--- a/test/LazyTensors/lazy_tensor_operations_test.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/test/LazyTensors/lazy_tensor_operations_test.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -36,7 +36,7 @@
 end
 
 
-@testset "LazyTensorApplication" begin
+@testset "TensorApplication" begin
     m = SizeDoublingMapping{Int, 1, 1}((3,))
     mm = SizeDoublingMapping{Int, 1, 1}((6,))
     v = [0,1,2]
@@ -159,14 +159,14 @@
 end
 
 
-@testset "LazyTensorComposition" begin
+@testset "TensorComposition" begin
     A = rand(2,3)
     B = rand(3,4)
 
-    Ã = LazyLinearMap(A, (1,), (2,))
-    B̃ = LazyLinearMap(B, (1,), (2,))
+    Ã = DenseTensor(A, (1,), (2,))
+    B̃ = DenseTensor(B, (1,), (2,))
 
-    @test Ã∘B̃ isa LazyTensorComposition
+    @test Ã∘B̃ isa TensorComposition
     @test range_size(Ã∘B̃) == (2,)
     @test domain_size(Ã∘B̃) == (4,)
     @test_throws DomainSizeMismatch B̃∘Ã
@@ -184,38 +184,38 @@
 end
 
 
-@testset "InflatedLazyTensor" begin
+@testset "InflatedTensor" begin
     I(sz...) = IdentityTensor(sz...)
 
     Ã = rand(4,2)
     B̃ = rand(4,2,3)
     C̃ = rand(4,2,3)
 
-    A = LazyLinearMap(Ã,(1,),(2,))
-    B = LazyLinearMap(B̃,(1,2),(3,))
-    C = LazyLinearMap(C̃,(1,),(2,3))
+    A = DenseTensor(Ã,(1,),(2,))
+    B = DenseTensor(B̃,(1,2),(3,))
+    C = DenseTensor(C̃,(1,),(2,3))
 
     @testset "Constructors" begin
-        @test InflatedLazyTensor(I(3,2), A, I(4)) isa LazyTensor{Float64, 4, 4}
-        @test InflatedLazyTensor(I(3,2), B, I(4)) isa LazyTensor{Float64, 5, 4}
-        @test InflatedLazyTensor(I(3), C, I(2,3)) isa LazyTensor{Float64, 4, 5}
-        @test InflatedLazyTensor(C, I(2,3)) isa LazyTensor{Float64, 3, 4}
-        @test InflatedLazyTensor(I(3), C) isa LazyTensor{Float64, 2, 3}
-        @test InflatedLazyTensor(I(3), I(2,3)) isa LazyTensor{Float64, 3, 3}
+        @test InflatedTensor(I(3,2), A, I(4)) isa LazyTensor{Float64, 4, 4}
+        @test InflatedTensor(I(3,2), B, I(4)) isa LazyTensor{Float64, 5, 4}
+        @test InflatedTensor(I(3), C, I(2,3)) isa LazyTensor{Float64, 4, 5}
+        @test InflatedTensor(C, I(2,3)) isa LazyTensor{Float64, 3, 4}
+        @test InflatedTensor(I(3), C) isa LazyTensor{Float64, 2, 3}
+        @test InflatedTensor(I(3), I(2,3)) isa LazyTensor{Float64, 3, 3}
     end
 
     @testset "Range and domain size" begin
-        @test range_size(InflatedLazyTensor(I(3,2), A, I(4))) == (3,2,4,4)
-        @test domain_size(InflatedLazyTensor(I(3,2), A, I(4))) == (3,2,2,4)
+        @test range_size(InflatedTensor(I(3,2), A, I(4))) == (3,2,4,4)
+        @test domain_size(InflatedTensor(I(3,2), A, I(4))) == (3,2,2,4)
 
-        @test range_size(InflatedLazyTensor(I(3,2), B, I(4))) == (3,2,4,2,4)
-        @test domain_size(InflatedLazyTensor(I(3,2), B, I(4))) == (3,2,3,4)
+        @test range_size(InflatedTensor(I(3,2), B, I(4))) == (3,2,4,2,4)
+        @test domain_size(InflatedTensor(I(3,2), B, I(4))) == (3,2,3,4)
 
-        @test range_size(InflatedLazyTensor(I(3), C, I(2,3))) == (3,4,2,3)
-        @test domain_size(InflatedLazyTensor(I(3), C, I(2,3))) == (3,2,3,2,3)
+        @test range_size(InflatedTensor(I(3), C, I(2,3))) == (3,4,2,3)
+        @test domain_size(InflatedTensor(I(3), C, I(2,3))) == (3,2,3,2,3)
 
-        @inferred range_size(InflatedLazyTensor(I(3,2), A, I(4))) == (3,2,4,4)
-        @inferred domain_size(InflatedLazyTensor(I(3,2), A, I(4))) == (3,2,2,4)
+        @inferred range_size(InflatedTensor(I(3,2), A, I(4))) == (3,2,4,4)
+        @inferred domain_size(InflatedTensor(I(3,2), A, I(4))) == (3,2,2,4)
     end
 
     @testset "Application" begin
@@ -223,47 +223,47 @@
         # The inflated tensor mappings are chosen to preserve, reduce and increase the dimension of the result compared to the input.
         cases = [
             (
-                InflatedLazyTensor(I(3,2), A, I(4)),
+                InflatedTensor(I(3,2), A, I(4)),
                 (v-> @tullio res[a,b,c,d] := Ã[c,i]*v[a,b,i,d]), # Expected result of apply
                 (v-> @tullio res[a,b,c,d] := Ã[i,c]*v[a,b,i,d]), # Expected result of apply_transpose
             ),
             (
-                InflatedLazyTensor(I(3,2), B, I(4)),
+                InflatedTensor(I(3,2), B, I(4)),
                 (v-> @tullio res[a,b,c,d,e] := B̃[c,d,i]*v[a,b,i,e]),
                 (v-> @tullio res[a,b,c,d] := B̃[i,j,c]*v[a,b,i,j,d]),
             ),
             (
-                InflatedLazyTensor(I(3,2), C, I(4)),
+                InflatedTensor(I(3,2), C, I(4)),
                 (v-> @tullio res[a,b,c,d] := C̃[c,i,j]*v[a,b,i,j,d]),
                 (v-> @tullio res[a,b,c,d,e] := C̃[i,c,d]*v[a,b,i,e]),
             ),
             (
-                InflatedLazyTensor(I(3,2), A),
+                InflatedTensor(I(3,2), A),
                 (v-> @tullio res[a,b,c] := Ã[c,i]*v[a,b,i]),
                 (v-> @tullio res[a,b,c] := Ã[i,c]*v[a,b,i]),
             ),
             (
-                InflatedLazyTensor(I(3,2), B),
+                InflatedTensor(I(3,2), B),
                 (v-> @tullio res[a,b,c,d] := B̃[c,d,i]*v[a,b,i]),
                 (v-> @tullio res[a,b,c] := B̃[i,j,c]*v[a,b,i,j]),
             ),
             (
-                InflatedLazyTensor(I(3,2), C),
+                InflatedTensor(I(3,2), C),
                 (v-> @tullio res[a,b,c] := C̃[c,i,j]*v[a,b,i,j]),
                 (v-> @tullio res[a,b,c,d] := C̃[i,c,d]*v[a,b,i]),
             ),
             (
-                InflatedLazyTensor(A,I(4)),
+                InflatedTensor(A,I(4)),
                 (v-> @tullio res[a,b] := Ã[a,i]*v[i,b]),
                 (v-> @tullio res[a,b] := Ã[i,a]*v[i,b]),
             ),
             (
-                InflatedLazyTensor(B,I(4)),
+                InflatedTensor(B,I(4)),
                 (v-> @tullio res[a,b,c] := B̃[a,b,i]*v[i,c]),
                 (v-> @tullio res[a,b] := B̃[i,j,a]*v[i,j,b]),
             ),
             (
-                InflatedLazyTensor(C,I(4)),
+                InflatedTensor(C,I(4)),
                 (v-> @tullio res[a,b] := C̃[a,i,j]*v[i,j,b]),
                 (v-> @tullio res[a,b,c] := C̃[i,a,b]*v[i,c]),
             ),
@@ -278,7 +278,7 @@
         end
 
         @testset "application to other type" begin
-            tm = InflatedLazyTensor(I(3,2), A, I(4))
+            tm = InflatedTensor(I(3,2), A, I(4))
 
             v = rand(ComplexF64, domain_size(tm)...)
             @test (tm*v)[1,2,3,1] isa ComplexF64
@@ -288,7 +288,7 @@
         end
 
         @testset "Inference of application" begin
-            tm = InflatedLazyTensor(I(2,3),ScalingTensor(2.0, (3,2)),I(3,4))
+            tm = InflatedTensor(I(2,3),ScalingTensor(2.0, (3,2)),I(3,4))
             v = rand(domain_size(tm)...)
 
             @inferred apply(tm,v,1,2,3,2,2,4)
@@ -296,14 +296,14 @@
         end
     end
 
-    @testset "InflatedLazyTensor of InflatedLazyTensor" begin
+    @testset "InflatedTensor of InflatedTensor" begin
         A = ScalingTensor(2.0,(2,3))
-        itm = InflatedLazyTensor(I(3,2), A, I(4))
-        @test  InflatedLazyTensor(I(4), itm, I(2)) == InflatedLazyTensor(I(4,3,2), A, I(4,2))
-        @test  InflatedLazyTensor(itm, I(2)) == InflatedLazyTensor(I(3,2), A, I(4,2))
-        @test  InflatedLazyTensor(I(4), itm) == InflatedLazyTensor(I(4,3,2), A, I(4))
+        itm = InflatedTensor(I(3,2), A, I(4))
+        @test  InflatedTensor(I(4), itm, I(2)) == InflatedTensor(I(4,3,2), A, I(4,2))
+        @test  InflatedTensor(itm, I(2)) == InflatedTensor(I(3,2), A, I(4,2))
+        @test  InflatedTensor(I(4), itm) == InflatedTensor(I(4,3,2), A, I(4))
 
-        @test InflatedLazyTensor(I(2), I(2), I(2)) isa InflatedLazyTensor # The constructor should always return its type.
+        @test InflatedTensor(I(2), I(2), I(2)) isa InflatedTensor # The constructor should always return its type.
     end
 end
 
@@ -335,8 +335,8 @@
     v₁ = rand(2,4,3)
     v₂ = rand(4,3,2)
 
-    Ã = LazyLinearMap(A,(1,),(2,))
-    B̃ = LazyLinearMap(B,(1,),(2,3))
+    Ã = DenseTensor(A,(1,),(2,))
+    B̃ = DenseTensor(B,(1,),(2,3))
 
     ÃB̃ = LazyOuterProduct(Ã,B̃)
     @tullio ABv[i,k] := A[i,j]*B[k,l,m]*v₁[j,l,m]
@@ -349,13 +349,13 @@
     @testset "Indentity mapping arguments" begin
         @test LazyOuterProduct(IdentityTensor(3,2), IdentityTensor(1,2)) == IdentityTensor(3,2,1,2)
 
-        Ã = LazyLinearMap(A,(1,),(2,))
-        @test LazyOuterProduct(IdentityTensor(3,2), Ã) == InflatedLazyTensor(IdentityTensor(3,2),Ã)
-        @test LazyOuterProduct(Ã, IdentityTensor(3,2)) == InflatedLazyTensor(Ã,IdentityTensor(3,2))
+        Ã = DenseTensor(A,(1,),(2,))
+        @test LazyOuterProduct(IdentityTensor(3,2), Ã) == InflatedTensor(IdentityTensor(3,2),Ã)
+        @test LazyOuterProduct(Ã, IdentityTensor(3,2)) == InflatedTensor(Ã,IdentityTensor(3,2))
 
         I1 = IdentityTensor(3,2)
         I2 = IdentityTensor(4)
-        @test I1⊗Ã⊗I2 == InflatedLazyTensor(I1, Ã, I2)
+        @test I1⊗Ã⊗I2 == InflatedTensor(I1, Ã, I2)
     end
 end
 
@@ -365,9 +365,9 @@
     @test range_size(I) == (3,5,6)
     @test domain_size(I) == (3,5,6)
 
-    @test LazyTensors.inflate(ScalingTensor(1., (4,)),(3,4,5,6), 1) == InflatedLazyTensor(IdentityTensor{Float64}(),ScalingTensor(1., (4,)),IdentityTensor(4,5,6))
-    @test LazyTensors.inflate(ScalingTensor(2., (1,)),(3,4,5,6), 2) == InflatedLazyTensor(IdentityTensor(3),ScalingTensor(2., (1,)),IdentityTensor(5,6))
-    @test LazyTensors.inflate(ScalingTensor(3., (6,)),(3,4,5,6), 4) == InflatedLazyTensor(IdentityTensor(3,4,5),ScalingTensor(3., (6,)),IdentityTensor{Float64}())
+    @test LazyTensors.inflate(ScalingTensor(1., (4,)),(3,4,5,6), 1) == InflatedTensor(IdentityTensor{Float64}(),ScalingTensor(1., (4,)),IdentityTensor(4,5,6))
+    @test LazyTensors.inflate(ScalingTensor(2., (1,)),(3,4,5,6), 2) == InflatedTensor(IdentityTensor(3),ScalingTensor(2., (1,)),IdentityTensor(5,6))
+    @test LazyTensors.inflate(ScalingTensor(3., (6,)),(3,4,5,6), 4) == InflatedTensor(IdentityTensor(3,4,5),ScalingTensor(3., (6,)),IdentityTensor{Float64}())
 
     @test_throws BoundsError LazyTensors.inflate(ScalingTensor(1., (4,)),(3,4,5,6), 0)
     @test_throws BoundsError LazyTensors.inflate(ScalingTensor(1., (4,)),(3,4,5,6), 5)
--- a/test/LazyTensors/tensor_types_test.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/test/LazyTensors/tensor_types_test.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -32,7 +32,7 @@
     @inferred domain_dim(I)
 
     Ã = rand(4,2)
-    A = LazyLinearMap(Ã,(1,),(2,))
+    A = DenseTensor(Ã,(1,),(2,))
     I1 = IdentityTensor{Float64}(2)
     I2 = IdentityTensor{Float64}(4)
     @test A∘I1 == A
@@ -59,15 +59,15 @@
 end
 
 
-@testset "LazyLinearMap" begin
+@testset "DenseTensor" begin
     # Test a standard matrix-vector product
     # mapping vectors of size 4 to vectors of size 3.
     A = rand(3,4)
-    Ã = LazyLinearMap(A, (1,), (2,))
+    Ã = DenseTensor(A, (1,), (2,))
     v = rand(4)
     w = rand(3)
 
-    @test à isa LazyLinearMap{T,1,1} where T
+    @test à isa DenseTensor{T,1,1} where T
     @test à isa LazyTensor{T,1,1} where T
     @test range_size(Ã) == (3,)
     @test domain_size(Ã) == (4,)
@@ -77,12 +77,12 @@
     @test Ã'*w ≈ A'*w
 
     A = rand(2,3,4)
-    @test_throws DomainError LazyLinearMap(A, (3,1), (2,))
+    @test_throws DomainError DenseTensor(A, (3,1), (2,))
 
     # Test more exotic mappings
     B = rand(3,4,2)
     # Map vectors of size 2 to matrices of size (3,4)
-    B̃ = LazyLinearMap(B, (1,2), (3,))
+    B̃ = DenseTensor(B, (1,2), (3,))
     v = rand(2)
 
     @test range_size(B̃) == (3,4)
@@ -92,7 +92,7 @@
     @test B̃*v ≈ B[:,:,1]*v[1] + B[:,:,2]*v[2] atol=5e-13
 
     # Map matrices of size (3,2) to vectors of size 4
-    B̃ = LazyLinearMap(B, (2,), (1,3))
+    B̃ = DenseTensor(B, (2,), (1,3))
     v = rand(3,2)
 
     @test range_size(B̃) == (4,)
--- a/test/SbpOperators/boundaryops/boundary_operator_test.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/test/SbpOperators/boundaryops/boundary_operator_test.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -28,7 +28,7 @@
 
         @testset "2D" begin
             e_w = boundary_operator(g_2D,closure_stencil,CartesianBoundary{1,Upper}())
-            @test e_w isa InflatedLazyTensor
+            @test e_w isa InflatedTensor
             @test e_w isa LazyTensor{T,1,2} where T
         end
     end
--- a/test/SbpOperators/boundaryops/boundary_restriction_test.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/test/SbpOperators/boundaryops/boundary_restriction_test.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -4,7 +4,7 @@
 using Sbplib.Grids
 using Sbplib.LazyTensors
 using Sbplib.RegionIndices
-import Sbplib.SbpOperators.BoundaryOperator
+using Sbplib.SbpOperators: BoundaryOperator, Stencil
 
 @testset "boundary_restriction" begin
 	stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order = 4)
@@ -30,7 +30,7 @@
         @testset "2D" begin
             e_w = boundary_restriction(g_2D,e_closure,CartesianBoundary{1,Upper}())
             @test e_w == boundary_restriction(g_2D,stencil_set,CartesianBoundary{1,Upper}())
-            @test e_w isa InflatedLazyTensor
+            @test e_w isa InflatedTensor
             @test e_w isa LazyTensor{T,1,2} where T
         end
     end
--- a/test/SbpOperators/readoperator_test.jl	Mon Mar 21 10:04:15 2022 +0100
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,172 +0,0 @@
-using Test
-
-using TOML
-using Sbplib.SbpOperators
-
-import Sbplib.SbpOperators.Stencil
-
-@testset "readoperator" begin
-    toml_str = """
-        [meta]
-        authors = "Ken Mattson"
-        description = "Standard operators for equidistant grids"
-        type = "equidistant"
-        cite = "A paper a long time ago in a galaxy far far away."
-
-        [[stencil_set]]
-
-        order = 2
-        test = 2
-
-        H.inner = ["1"]
-        H.closure = ["1/2"]
-
-        D1.inner_stencil = ["-1/2", "0", "1/2"]
-        D1.closure_stencils = [
-            {s = ["-1", "1"], c = 1},
-        ]
-
-        D2.inner_stencil = ["1", "-2", "1"]
-        D2.closure_stencils = [
-            {s = ["1", "-2", "1"], c = 1},
-        ]
-
-        e.closure = ["1"]
-        d1.closure = {s = ["-3/2", "2", "-1/2"], c = 1}
-
-        [[stencil_set]]
-
-        order = 4
-        test = 1
-        H.inner = ["1"]
-        H.closure = ["17/48", "59/48", "43/48", "49/48"]
-
-        D2.inner_stencil = ["-1/12","4/3","-5/2","4/3","-1/12"]
-        D2.closure_stencils = [
-            {s = [     "2",    "-5",      "4",       "-1",     "0",     "0"], c = 1},
-            {s = [     "1",    "-2",      "1",        "0",     "0",     "0"], c = 2},
-            {s = [ "-4/43", "59/43", "-110/43",   "59/43", "-4/43",     "0"], c = 3},
-            {s = [ "-1/49",     "0",   "59/49", "-118/49", "64/49", "-4/49"], c = 4},
-        ]
-
-        e.closure = ["1"]
-        d1.closure = {s = ["-11/6", "3", "-3/2", "1/3"], c = 1}
-
-        [[stencil_set]]
-        order = 4
-        test = 2
-
-        H.closure = ["-1/49", "0", "59/49", "-118/49", "64/49", "-4/49"]
-    """
-
-    parsed_toml = TOML.parse(toml_str)
-
-    @testset "get_stencil_set" begin
-        @test get_stencil_set(parsed_toml; order = 2) isa Dict
-        @test get_stencil_set(parsed_toml; order = 2) == parsed_toml["stencil_set"][1]
-        @test get_stencil_set(parsed_toml; test = 1) == parsed_toml["stencil_set"][2]
-        @test get_stencil_set(parsed_toml; order = 4, test = 2) == parsed_toml["stencil_set"][3]
-
-        @test_throws ArgumentError get_stencil_set(parsed_toml; test = 2)
-        @test_throws ArgumentError get_stencil_set(parsed_toml; order = 4)
-    end
-
-    @testset "parse_stencil" begin
-        toml = """
-            s1 = ["-1/12","4/3","-5/2","4/3","-1/12"]
-            s2 = {s = ["2", "-5", "4", "-1", "0", "0"], c = 1}
-            s3 = {s = ["1", "-2", "1", "0", "0", "0"], c = 2}
-            s4 = "not a stencil"
-            s5 = [-1, 4, 3]
-            s6 = {k = ["1", "-2", "1", "0", "0", "0"], c = 2}
-            s7 = {s = [-1, 4, 3], c = 2}
-            s8 = {s = ["1", "-2", "1", "0", "0", "0"], c = [2,2]}
-        """
-
-        @test parse_stencil(TOML.parse(toml)["s1"]) == CenteredStencil(-1//12, 4//3, -5//2, 4//3, -1//12)
-        @test parse_stencil(TOML.parse(toml)["s2"]) == Stencil(2//1, -5//1, 4//1, -1//1, 0//1, 0//1; center=1)
-        @test parse_stencil(TOML.parse(toml)["s3"]) == Stencil(1//1, -2//1, 1//1, 0//1, 0//1, 0//1; center=2)
-
-        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s4"])
-        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s5"])
-        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s6"])
-        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s7"])
-        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s8"])
-
-        stencil_set = get_stencil_set(parsed_toml; order = 4, test = 1)
-
-        @test parse_stencil.(stencil_set["D2"]["closure_stencils"]) == [
-            Stencil(  2//1,  -5//1,     4//1,    -1//1,   0//1,   0//1; center=1),
-            Stencil(  1//1,  -2//1,     1//1,     0//1,   0//1,   0//1; center=2),
-            Stencil(-4//43, 59//43, -110//43,   59//43, -4//43,   0//1; center=3),
-            Stencil(-1//49,   0//1,   59//49, -118//49, 64//49, -4//49; center=4),
-        ]
-
-
-        @test parse_stencil(Float64, TOML.parse(toml)["s1"]) == CenteredStencil(-1/12, 4/3, -5/2, 4/3, -1/12)
-        @test parse_stencil(Float64, TOML.parse(toml)["s2"]) == Stencil(2/1, -5/1, 4/1, -1/1, 0/1, 0/1; center=1)
-        @test parse_stencil(Float64, TOML.parse(toml)["s3"]) == Stencil(1/1, -2/1, 1/1, 0/1, 0/1, 0/1; center=2)
-    end
-
-    @testset "parse_scalar" begin
-        toml = TOML.parse("""
-            a1 = 1
-            a2 = 1.5
-            a3 = 1.0
-            a4 = 10
-            a5 = "1/2"
-            a6 = "1.5"
-
-            e1 = [1,2,3]
-            e2 = "a string value"
-        """)
-
-        @test parse_scalar(toml["a1"]) == 1//1
-        @test parse_scalar(toml["a2"]) == 3//2
-        @test parse_scalar(toml["a3"]) == 1//1
-        @test parse_scalar(toml["a4"]) == 10//1
-        @test parse_scalar(toml["a5"]) == 1//2
-        @test parse_scalar(toml["a6"]) == 3//2
-
-        @test_throws ArgumentError parse_scalar(toml["e1"])
-        @test_throws ArgumentError parse_scalar(toml["e2"])
-    end
-
-    @testset "parse_tuple" begin
-        toml = TOML.parse("""
-            t1 = [1,3,4]
-            t2 = ["1/2","3/4","2/1"]
-
-            e1 = "not a tuple"
-            e2.a="1"
-            e3 = 1
-            e4 = ["1/2","3/4","not a number"]
-        """)
-
-        @test parse_tuple(toml["t1"]) == (1//1,3//1,4//1)
-        @test parse_tuple(toml["t2"]) == (1//2,3//4,2//1)
-
-        @test_throws ArgumentError parse_tuple(toml["e1"])
-        @test_throws ArgumentError parse_tuple(toml["e2"])
-        @test_throws ArgumentError parse_tuple(toml["e3"])
-        @test_throws ArgumentError parse_tuple(toml["e4"])
-    end
-end
-
-@testset "parse_rational" begin
-    @test SbpOperators.parse_rational("1") isa Rational
-    @test SbpOperators.parse_rational("1") == 1//1
-    @test SbpOperators.parse_rational("1/2") isa Rational
-    @test SbpOperators.parse_rational("1/2") == 1//2
-    @test SbpOperators.parse_rational("37/13") isa Rational
-    @test SbpOperators.parse_rational("37/13") == 37//13
-
-    @test SbpOperators.parse_rational(0.5) isa Rational
-    @test SbpOperators.parse_rational(0.5) == 1//2
-
-    @test SbpOperators.parse_rational("0.5") isa Rational
-    @test SbpOperators.parse_rational("0.5") == 1//2
-
-    @test SbpOperators.parse_rational(2) isa Rational
-    @test SbpOperators.parse_rational(2) == 2//1
-end
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/test/SbpOperators/stencil_set_test.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -0,0 +1,172 @@
+using Test
+
+using TOML
+using Sbplib.SbpOperators
+
+import Sbplib.SbpOperators.Stencil
+
+@testset "readoperator" begin
+    toml_str = """
+        [meta]
+        authors = "Ken Mattson"
+        description = "Standard operators for equidistant grids"
+        type = "equidistant"
+        cite = "A paper a long time ago in a galaxy far far away."
+
+        [[stencil_set]]
+
+        order = 2
+        test = 2
+
+        H.inner = ["1"]
+        H.closure = ["1/2"]
+
+        D1.inner_stencil = ["-1/2", "0", "1/2"]
+        D1.closure_stencils = [
+            {s = ["-1", "1"], c = 1},
+        ]
+
+        D2.inner_stencil = ["1", "-2", "1"]
+        D2.closure_stencils = [
+            {s = ["1", "-2", "1"], c = 1},
+        ]
+
+        e.closure = ["1"]
+        d1.closure = {s = ["-3/2", "2", "-1/2"], c = 1}
+
+        [[stencil_set]]
+
+        order = 4
+        test = 1
+        H.inner = ["1"]
+        H.closure = ["17/48", "59/48", "43/48", "49/48"]
+
+        D2.inner_stencil = ["-1/12","4/3","-5/2","4/3","-1/12"]
+        D2.closure_stencils = [
+            {s = [     "2",    "-5",      "4",       "-1",     "0",     "0"], c = 1},
+            {s = [     "1",    "-2",      "1",        "0",     "0",     "0"], c = 2},
+            {s = [ "-4/43", "59/43", "-110/43",   "59/43", "-4/43",     "0"], c = 3},
+            {s = [ "-1/49",     "0",   "59/49", "-118/49", "64/49", "-4/49"], c = 4},
+        ]
+
+        e.closure = ["1"]
+        d1.closure = {s = ["-11/6", "3", "-3/2", "1/3"], c = 1}
+
+        [[stencil_set]]
+        order = 4
+        test = 2
+
+        H.closure = ["-1/49", "0", "59/49", "-118/49", "64/49", "-4/49"]
+    """
+
+    parsed_toml = TOML.parse(toml_str)
+
+    @testset "get_stencil_set" begin
+        @test get_stencil_set(parsed_toml; order = 2) isa Dict
+        @test get_stencil_set(parsed_toml; order = 2) == parsed_toml["stencil_set"][1]
+        @test get_stencil_set(parsed_toml; test = 1) == parsed_toml["stencil_set"][2]
+        @test get_stencil_set(parsed_toml; order = 4, test = 2) == parsed_toml["stencil_set"][3]
+
+        @test_throws ArgumentError get_stencil_set(parsed_toml; test = 2)
+        @test_throws ArgumentError get_stencil_set(parsed_toml; order = 4)
+    end
+
+    @testset "parse_stencil" begin
+        toml = """
+            s1 = ["-1/12","4/3","-5/2","4/3","-1/12"]
+            s2 = {s = ["2", "-5", "4", "-1", "0", "0"], c = 1}
+            s3 = {s = ["1", "-2", "1", "0", "0", "0"], c = 2}
+            s4 = "not a stencil"
+            s5 = [-1, 4, 3]
+            s6 = {k = ["1", "-2", "1", "0", "0", "0"], c = 2}
+            s7 = {s = [-1, 4, 3], c = 2}
+            s8 = {s = ["1", "-2", "1", "0", "0", "0"], c = [2,2]}
+        """
+
+        @test parse_stencil(TOML.parse(toml)["s1"]) == CenteredStencil(-1//12, 4//3, -5//2, 4//3, -1//12)
+        @test parse_stencil(TOML.parse(toml)["s2"]) == Stencil(2//1, -5//1, 4//1, -1//1, 0//1, 0//1; center=1)
+        @test parse_stencil(TOML.parse(toml)["s3"]) == Stencil(1//1, -2//1, 1//1, 0//1, 0//1, 0//1; center=2)
+
+        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s4"])
+        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s5"])
+        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s6"])
+        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s7"])
+        @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s8"])
+
+        stencil_set = get_stencil_set(parsed_toml; order = 4, test = 1)
+
+        @test parse_stencil.(stencil_set["D2"]["closure_stencils"]) == [
+            Stencil(  2//1,  -5//1,     4//1,    -1//1,   0//1,   0//1; center=1),
+            Stencil(  1//1,  -2//1,     1//1,     0//1,   0//1,   0//1; center=2),
+            Stencil(-4//43, 59//43, -110//43,   59//43, -4//43,   0//1; center=3),
+            Stencil(-1//49,   0//1,   59//49, -118//49, 64//49, -4//49; center=4),
+        ]
+
+
+        @test parse_stencil(Float64, TOML.parse(toml)["s1"]) == CenteredStencil(-1/12, 4/3, -5/2, 4/3, -1/12)
+        @test parse_stencil(Float64, TOML.parse(toml)["s2"]) == Stencil(2/1, -5/1, 4/1, -1/1, 0/1, 0/1; center=1)
+        @test parse_stencil(Float64, TOML.parse(toml)["s3"]) == Stencil(1/1, -2/1, 1/1, 0/1, 0/1, 0/1; center=2)
+    end
+
+    @testset "parse_scalar" begin
+        toml = TOML.parse("""
+            a1 = 1
+            a2 = 1.5
+            a3 = 1.0
+            a4 = 10
+            a5 = "1/2"
+            a6 = "1.5"
+
+            e1 = [1,2,3]
+            e2 = "a string value"
+        """)
+
+        @test parse_scalar(toml["a1"]) == 1//1
+        @test parse_scalar(toml["a2"]) == 3//2
+        @test parse_scalar(toml["a3"]) == 1//1
+        @test parse_scalar(toml["a4"]) == 10//1
+        @test parse_scalar(toml["a5"]) == 1//2
+        @test parse_scalar(toml["a6"]) == 3//2
+
+        @test_throws ArgumentError parse_scalar(toml["e1"])
+        @test_throws ArgumentError parse_scalar(toml["e2"])
+    end
+
+    @testset "parse_tuple" begin
+        toml = TOML.parse("""
+            t1 = [1,3,4]
+            t2 = ["1/2","3/4","2/1"]
+
+            e1 = "not a tuple"
+            e2.a="1"
+            e3 = 1
+            e4 = ["1/2","3/4","not a number"]
+        """)
+
+        @test parse_tuple(toml["t1"]) == (1//1,3//1,4//1)
+        @test parse_tuple(toml["t2"]) == (1//2,3//4,2//1)
+
+        @test_throws ArgumentError parse_tuple(toml["e1"])
+        @test_throws ArgumentError parse_tuple(toml["e2"])
+        @test_throws ArgumentError parse_tuple(toml["e3"])
+        @test_throws ArgumentError parse_tuple(toml["e4"])
+    end
+end
+
+@testset "parse_rational" begin
+    @test SbpOperators.parse_rational("1") isa Rational
+    @test SbpOperators.parse_rational("1") == 1//1
+    @test SbpOperators.parse_rational("1/2") isa Rational
+    @test SbpOperators.parse_rational("1/2") == 1//2
+    @test SbpOperators.parse_rational("37/13") isa Rational
+    @test SbpOperators.parse_rational("37/13") == 37//13
+
+    @test SbpOperators.parse_rational(0.5) isa Rational
+    @test SbpOperators.parse_rational(0.5) == 1//2
+
+    @test SbpOperators.parse_rational("0.5") isa Rational
+    @test SbpOperators.parse_rational("0.5") == 1//2
+
+    @test SbpOperators.parse_rational(2) isa Rational
+    @test SbpOperators.parse_rational(2) == 2//1
+end
--- a/test/SbpOperators/volumeops/derivatives/first_derivative_test.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/test/SbpOperators/volumeops/derivatives/first_derivative_test.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -29,12 +29,14 @@
         
         @test first_derivative(g₁, stencil_set, 1) isa LazyTensor{Float64,1,1}
         @test first_derivative(g₂, stencil_set, 2) isa LazyTensor{Float64,2,2}
+        @test first_derivative(g₁, stencil_set, 1) == first_derivative(g₁, stencil_set)
 
         interior_stencil = CenteredStencil(-1,0,1)
         closure_stencils = [Stencil(-1,1, center=1)]
 
         @test first_derivative(g₁, interior_stencil, closure_stencils, 1) isa LazyTensor{Float64,1,1}
         @test first_derivative(g₁, interior_stencil, closure_stencils, 1) isa VolumeOperator
+        @test first_derivative(g₁, interior_stencil, closure_stencils, 1) == first_derivative(g₁, interior_stencil, closure_stencils)
         @test first_derivative(g₂, interior_stencil, closure_stencils, 2) isa LazyTensor{Float64,2,2}
     end
 
--- a/test/SbpOperators/volumeops/derivatives/second_derivative_test.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/test/SbpOperators/volumeops/derivatives/second_derivative_test.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -21,11 +21,12 @@
             Dₓₓ = second_derivative(g_1D,inner_stencil,closure_stencils,1)
             @test Dₓₓ == second_derivative(g_1D,inner_stencil,closure_stencils)
             @test Dₓₓ == second_derivative(g_1D,stencil_set,1)
+            @test Dₓₓ == second_derivative(g_1D,stencil_set)
             @test Dₓₓ isa VolumeOperator
         end
         @testset "2D" begin
             Dₓₓ = second_derivative(g_2D,inner_stencil,closure_stencils,1)
-            D2 = second_derivative(g_1D,inner_stencil,closure_stencils)
+            D2 = second_derivative(g_1D,inner_stencil,closure_stencils,1)
             I = IdentityTensor{Float64}(size(g_2D)[2])
             @test Dₓₓ == D2⊗I
             @test Dₓₓ == second_derivative(g_2D,stencil_set,1)
@@ -51,9 +52,7 @@
             # implies that L*v should be exact for monomials up to order 2.
             @testset "2nd order" begin
                 stencil_set = read_stencil_set(operator_path; order=2)
-                inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"])
-			    closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"])
-                Dₓₓ = second_derivative(g_1D,inner_stencil,closure_stencils)
+                Dₓₓ = second_derivative(g_1D,stencil_set)
                 @test Dₓₓ*monomials[1] ≈ zeros(Float64,size(g_1D)...) atol = 5e-10
                 @test Dₓₓ*monomials[2] ≈ zeros(Float64,size(g_1D)...) atol = 5e-10
                 @test Dₓₓ*monomials[3] ≈ monomials[1] atol = 5e-10
@@ -64,9 +63,7 @@
             # implies that L*v should be exact for monomials up to order 3.
             @testset "4th order" begin
                 stencil_set = read_stencil_set(operator_path; order=4)
-                inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"])
-			    closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"])
-                Dₓₓ = second_derivative(g_1D,inner_stencil,closure_stencils)
+                Dₓₓ = second_derivative(g_1D,stencil_set)
                 # NOTE: high tolerances for checking the "exact" differentiation
                 # due to accumulation of round-off errors/cancellation errors?
                 @test Dₓₓ*monomials[1] ≈ zeros(Float64,size(g_1D)...) atol = 5e-10
@@ -92,9 +89,7 @@
             # implies that L*v should be exact for binomials up to order 2.
             @testset "2nd order" begin
                 stencil_set = read_stencil_set(operator_path; order=2)
-                inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"])
-                closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"])
-                Dyy = second_derivative(g_2D,inner_stencil,closure_stencils,2)
+                Dyy = second_derivative(g_2D,stencil_set,2)
                 @test Dyy*binomials[1] ≈ zeros(Float64,size(g_2D)...) atol = 5e-9
                 @test Dyy*binomials[2] ≈ zeros(Float64,size(g_2D)...) atol = 5e-9
                 @test Dyy*binomials[3] ≈ evalOn(g_2D,(x,y)->1.) atol = 5e-9
@@ -105,9 +100,7 @@
             # implies that L*v should be exact for binomials up to order 3.
             @testset "4th order" begin
                 stencil_set = read_stencil_set(operator_path; order=4)
-                inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"])
-                closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"])
-                Dyy = second_derivative(g_2D,inner_stencil,closure_stencils,2)
+                Dyy = second_derivative(g_2D,stencil_set,2)
                 # NOTE: high tolerances for checking the "exact" differentiation
                 # due to accumulation of round-off errors/cancellation errors?
                 @test Dyy*binomials[1] ≈ zeros(Float64,size(g_2D)...) atol = 5e-9
--- a/test/SbpOperators/volumeops/laplace/laplace_test.jl	Mon Mar 21 10:04:15 2022 +0100
+++ b/test/SbpOperators/volumeops/laplace/laplace_test.jl	Tue Mar 22 22:05:34 2022 +0100
@@ -69,7 +69,7 @@
 @testset "laplace" begin
     @testset "1D" begin
         Δ = laplace(g_1D, inner_stencil, closure_stencils)
-        @test Δ == second_derivative(g_1D, inner_stencil, closure_stencils)
+        @test Δ == second_derivative(g_1D, inner_stencil, closure_stencils, 1)
         @test Δ isa LazyTensor{T,1,1}  where T
     end
     @testset "3D" begin