changeset 509:b7e42384053a feature/boundary_ops

Merge w. default
author Vidar Stiernström <vidar.stiernstrom@it.uu.se>
date Sun, 08 Nov 2020 16:01:39 +0100
parents 2ab687b1d221 (current diff) fbbb3733650c (diff)
children db64cfe4d9de
files
diffstat 7 files changed, 543 insertions(+), 12 deletions(-) [+]
line wrap: on
line diff
--- a/TODO.md	Mon Oct 19 19:02:48 2020 +0200
+++ b/TODO.md	Sun Nov 08 16:01:39 2020 +0100
@@ -13,6 +13,7 @@
  - [ ] Fix indexing signatures. We should make sure we are not too specific. For the "inbetween" layers we don't know what type of index is coming so we should use `I...` instead of `I::Vararg{Int,R}` or probably better `I::Vararg{Any,R}`
  - [ ] Use `@inferred` in a lot of tests.
  - [ ] Make sure we are setting tolerances in tests in a consistent way
+ - [ ] Add check for correct domain sizes to lazy tensor operations using SizeMismatch
 
 ## Repo
  - [ ] Add Vidar to the authors list
--- a/src/LazyTensors/lazy_tensor_operations.jl	Mon Oct 19 19:02:48 2020 +0200
+++ b/src/LazyTensors/lazy_tensor_operations.jl	Sun Nov 08 16:01:39 2020 +0100
@@ -16,14 +16,16 @@
 
 # TODO: Go through and remove unneccerary type parameters on functions
 
-Base.:*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o)
 Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Index,R}) where {T,R,D} = apply(ta.t, ta.o, I...)
 Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, Index{Unknown}.(I)...)
 Base.size(ta::LazyTensorMappingApplication) = range_size(ta.t)
 # TODO: What else is needed to implement the AbstractArray interface?
 
+Base.:*(a::TensorMapping, v::AbstractArray) = LazyTensorMappingApplication(a,v)
+Base.:*(a::TensorMapping, b::TensorMapping) = throw(MethodError(Base.:*,(a,b)))
+Base.:*(a::TensorMapping, args::Union{TensorMapping, AbstractArray}...) = foldr(*,(a,args...))
+
 # # We need the associativity to be a→b→c = a→(b→c), which is the case for '→'
-Base.:*(a::TensorMapping{T,R,D}, b::TensorMapping{T,D,K}, args::Union{TensorMapping{T}, AbstractArray{T}}...) where {T,R,D,K} = foldr(*,(a,b,args...))
 # # Should we overload some other infix binary opesrator?
 # →(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o)
 # TODO: We need to be really careful about good error messages.
@@ -38,8 +40,8 @@
 the transpose of mapping `m` by using `m'`. `m'` will work as a regular TensorMapping lazily calling
 the appropriate methods of `m`.
 """
-struct LazyTensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R}
-    tm::TensorMapping{T,R,D}
+struct LazyTensorMappingTranspose{T,R,D, TM<:TensorMapping{T,R,D}} <: TensorMapping{T,D,R}
+    tm::TM
 end
 export LazyTensorMappingTranspose
 
@@ -84,12 +86,9 @@
     t2::TM2
 
     @inline function TensorMappingComposition(t1::TensorMapping{T,R,K}, t2::TensorMapping{T,K,D}) where {T,R,K,D}
-        @boundscheck if domain_size(t1) != range_size(t2)
-            throw(DimensionMismatch("the first argument has domain size $(domain_size(t1)) while the second has range size $(range_size(t2)) "))
-        end
+        @boundscheck check_domain_size(t1, range_size(t2))
         return new{T,R,K,D, typeof(t1), typeof(t2)}(t1,t2)
     end
-    # Add check for matching sizes as a boundscheck
 end
 export TensorMappingComposition
 
@@ -145,3 +144,238 @@
 function apply_transpose(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D}
     apply(LazyLinearMap(llm.A, llm.domain_indicies, llm.range_indicies), v, I...)
 end
+
+
+"""
+    IdentityMapping{T,D} <: TensorMapping{T,D,D}
+
+The lazy identity TensorMapping for a given size. Usefull for building up higher dimensional tensor mappings from lower
+dimensional ones through outer products. Also used in the Implementation for InflatedTensorMapping.
+"""
+struct IdentityMapping{T,D} <: TensorMapping{T,D,D}
+    size::NTuple{D,Int}
+end
+export IdentityMapping
+
+IdentityMapping{T}(size::NTuple{D,Int}) where {T,D} = IdentityMapping{T,D}(size)
+IdentityMapping{T}(size::Vararg{Int,D}) where {T,D} = IdentityMapping{T,D}(size)
+IdentityMapping(size::Vararg{Int,D}) where D = IdentityMapping{Float64,D}(size)
+
+range_size(tmi::IdentityMapping) = tmi.size
+domain_size(tmi::IdentityMapping) = tmi.size
+
+apply(tmi::IdentityMapping{T,D}, v::AbstractArray{T,D}, I::Vararg{Any,D}) where {T,D} = v[I...]
+apply_transpose(tmi::IdentityMapping{T,D}, v::AbstractArray{T,D}, I::Vararg{Any,D}) where {T,D} = v[I...]
+
+"""
+Base.:∘(tm, tmi)
+Base.:∘(tmi, tm)
+
+Composes a `Tensormapping` `tm` with an `IdentityMapping` `tmi`, by returning `tm`
+"""
+@inline function Base.:∘(tm::TensorMapping{T,R,D}, tmi::IdentityMapping{T,D}) where {T,R,D}
+    @boundscheck check_domain_size(tm, range_size(tmi))
+    return tm
+end
+
+@inline function Base.:∘(tmi::IdentityMapping{T,R}, tm::TensorMapping{T,R,D}) where {T,R,D}
+    @boundscheck check_domain_size(tmi, range_size(tm))
+    return tm
+end
+# Specialization for the case where tm is an IdentityMapping. Required to resolve ambiguity.
+@inline function Base.:∘(tm::IdentityMapping{T,D}, tmi::IdentityMapping{T,D}) where {T,D}
+    @boundscheck check_domain_size(tm, range_size(tmi))
+    return tmi
+end
+
+
+"""
+    InflatedTensorMapping{T,R,D} <: TensorMapping{T,R,D}
+
+An inflated `TensorMapping` with dimensions added before and afer its actual dimensions.
+"""
+struct InflatedTensorMapping{T,R,D,D_before,R_middle,D_middle,D_after, TM<:TensorMapping{T,R_middle,D_middle}} <: TensorMapping{T,R,D}
+    before::IdentityMapping{T,D_before}
+    tm::TM
+    after::IdentityMapping{T,D_after}
+
+    function InflatedTensorMapping(before, tm::TensorMapping{T}, after) where T
+        R_before = range_dim(before)
+        R_middle = range_dim(tm)
+        R_after = range_dim(after)
+        R = R_before+R_middle+R_after
+
+        D_before = domain_dim(before)
+        D_middle = domain_dim(tm)
+        D_after = domain_dim(after)
+        D = D_before+D_middle+D_after
+        return new{T,R,D,D_before,R_middle,D_middle,D_after, typeof(tm)}(before, tm, after)
+    end
+end
+export InflatedTensorMapping
+"""
+    InflatedTensorMapping(before, tm, after)
+    InflatedTensorMapping(before,tm)
+    InflatedTensorMapping(tm,after)
+
+The outer product of `before`, `tm` and `after`, where `before` and `after` are `IdentityMapping`s.
+
+If one of `before` or `after` is left out, a 0-dimensional `IdentityMapping` is used as the default value.
+
+If `tm` already is an `InflatedTensorMapping`, `before` and `after` will be extended instead of
+creating a nested `InflatedTensorMapping`.
+"""
+InflatedTensorMapping(::IdentityMapping, ::TensorMapping, ::IdentityMapping)
+
+function InflatedTensorMapping(before, itm::InflatedTensorMapping, after)
+    return InflatedTensorMapping(
+        IdentityMapping(before.size...,  itm.before.size...),
+        itm.tm,
+        IdentityMapping(itm.after.size..., after.size...),
+    )
+end
+
+InflatedTensorMapping(before::IdentityMapping, tm::TensorMapping{T}) where T = InflatedTensorMapping(before,tm,IdentityMapping{T}())
+InflatedTensorMapping(tm::TensorMapping{T}, after::IdentityMapping) where T = InflatedTensorMapping(IdentityMapping{T}(),tm,after)
+# Resolve ambiguity between the two previous methods
+InflatedTensorMapping(I1::IdentityMapping{T}, I2::IdentityMapping{T}) where T = InflatedTensorMapping(I1,I2,IdentityMapping{T}())
+
+# TODO: Implement syntax and constructors for products of different combinations of InflatedTensorMapping and IdentityMapping
+
+# TODO: Implement some pretty printing in terms of ⊗. E.g InflatedTensorMapping(I(3),B,I(2)) -> I(3)⊗B⊗I(2)
+
+function range_size(itm::InflatedTensorMapping)
+    return flatten_tuple(
+        range_size(itm.before),
+        range_size(itm.tm),
+        range_size(itm.after),
+    )
+end
+
+function domain_size(itm::InflatedTensorMapping)
+    return flatten_tuple(
+        domain_size(itm.before),
+        domain_size(itm.tm),
+        domain_size(itm.after),
+    )
+end
+
+function apply(itm::InflatedTensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D}
+    view_index, inner_index = split_index(itm, I...)
+
+    v_inner = view(v, view_index...)
+    return apply(itm.tm, v_inner, inner_index...)
+end
+
+
+"""
+    split_index(...)
+
+Splits the multi-index into two parts. One part for the view that the inner TensorMapping acts on, and one part for indexing the result
+Eg.
+```
+(1,2,3,4) -> (1,:,:,4), (2,3)
+```
+"""
+function split_index(itm::InflatedTensorMapping{T,R,D}, I::Vararg{Any,R}) where {T,R,D}
+    I_before = slice_tuple(I, Val(1), Val(range_dim(itm.before)))
+    I_after = slice_tuple(I, Val(R-range_dim(itm.after)+1), Val(R))
+
+    view_index = (I_before..., ntuple((i)->:,domain_dim(itm.tm))..., I_after...)
+    inner_index = slice_tuple(I, Val(range_dim(itm.before)+1), Val(R-range_dim(itm.after)))
+
+    return (view_index, inner_index)
+end
+
+# TODO: Can this be replaced by something more elegant while still being type stable? 2020-10-21
+# See:
+# https://github.com/JuliaLang/julia/issues/34884
+# https://github.com/JuliaLang/julia/issues/30386
+"""
+    slice_tuple(t, Val(l), Val(u))
+
+Get a slice of a tuple in a type stable way.
+Equivalent to t[l:u] but type stable.
+"""
+function slice_tuple(t,::Val{L},::Val{U}) where {L,U}
+    return ntuple(i->t[i+L-1], U-L+1)
+end
+
+"""
+    flatten_tuple(t)
+
+Takes a nested tuple and flattens the whole structure
+"""
+flatten_tuple(t::NTuple{N, Number} where N) = t
+flatten_tuple(t::Tuple) = ((flatten_tuple.(t)...)...,) # simplify?
+flatten_tuple(ts::Vararg) = flatten_tuple(ts)
+
+"""
+   LazyOuterProduct(tms...)
+
+Creates a `TensorMappingComposition` for the outerproduct of `tms...`.
+This is done by separating the outer product into regular products of outer products involving only identity mappings and one non-identity mapping.
+
+First let
+```math
+A = A_{I,J}
+B = B_{M,N}
+C = C_{P,Q}
+```
+
+where ``I``, ``M``, ``P`` are  multi-indexes for the ranges of ``A``, ``B``, ``C``, and ``J``, ``N``, ``Q`` are multi-indexes of the domains.
+
+We use ``⊗`` to denote the outer product
+```math
+(A⊗B)_{IM,JN} = A_{I,J}B_{M,N}
+```
+
+We note that
+```math
+A⊗B⊗C = (A⊗B⊗C)_{IMP,JNQ} = A_{I,J}B_{M,N}C_{P,Q}
+```
+And that
+```math
+A⊗B⊗C = (A⊗I_{|M|}⊗I_{|P|})(I_{|J|}⊗B⊗I_{|P|})(I_{|J|}⊗I_{|N|}⊗C)
+```
+where |.| of a multi-index is a vector of sizes for each dimension. ``I_v`` denotes the identity tensor of size ``v[i]`` in each direction
+To apply ``A⊗B⊗C`` we evaluate
+
+(A⊗B⊗C)v = [(A⊗I_{|M|}⊗I_{|P|})  [(I_{|J|}⊗B⊗I_{|P|}) [(I_{|J|}⊗I_{|N|}⊗C)v]]]
+"""
+function LazyOuterProduct end
+export LazyOuterProduct
+
+function LazyOuterProduct(tm1::TensorMapping{T}, tm2::TensorMapping{T}) where T
+    itm1 = InflatedTensorMapping(tm1, IdentityMapping{T}(range_size(tm2)))
+    itm2 = InflatedTensorMapping(IdentityMapping{T}(domain_size(tm1)),tm2)
+
+    return itm1∘itm2
+end
+
+LazyOuterProduct(t1::IdentityMapping{T}, t2::IdentityMapping{T}) where T = IdentityMapping{T}(t1.size...,t2.size...)
+LazyOuterProduct(t1::TensorMapping, t2::IdentityMapping) = InflatedTensorMapping(t1, t2)
+LazyOuterProduct(t1::IdentityMapping, t2::TensorMapping) = InflatedTensorMapping(t1, t2)
+
+LazyOuterProduct(tms::Vararg{TensorMapping}) = foldl(LazyOuterProduct, tms)
+
+⊗(a::TensorMapping, b::TensorMapping) = LazyOuterProduct(a,b)
+export ⊗
+
+
+function check_domain_size(tm::TensorMapping, sz)
+    if domain_size(tm) != sz
+        throw(SizeMismatch(tm,sz))
+    end
+end
+
+struct SizeMismatch <: Exception
+    tm::TensorMapping
+    sz
+end
+export SizeMismatch
+
+function Base.showerror(io::IO, err::SizeMismatch)
+    print(io, "SizeMismatch: ")
+    print(io, "domain size $(domain_size(err.tm)) of TensorMapping not matching size $(err.sz)")
+end
--- a/src/LazyTensors/tensor_mapping.jl	Mon Oct 19 19:02:48 2020 +0200
+++ b/src/LazyTensors/tensor_mapping.jl	Sun Nov 08 16:01:39 2020 +0100
@@ -64,4 +64,11 @@
 
 export range_size, domain_size
 
+"""
+    eltype(::TensorMapping{T})
+
+The type of elements the TensorMapping acts on.
+"""
+Base.eltype(::TensorMapping{T}) where T = T
+
 # TODO: Think about boundschecking!
--- a/test/Manifest.toml	Mon Oct 19 19:02:48 2020 +0200
+++ b/test/Manifest.toml	Sun Nov 08 16:01:39 2020 +0100
@@ -1,13 +1,35 @@
 # This file is machine-generated - editing it directly is not advised
 
+[[Artifacts]]
+deps = ["Pkg"]
+git-tree-sha1 = "c30985d8821e0cd73870b17b0ed0ce6dc44cb744"
+uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33"
+version = "1.3.0"
+
 [[Base64]]
 uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
 
+[[CompilerSupportLibraries_jll]]
+deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"]
+git-tree-sha1 = "8e695f735fca77e9708e795eda62afdb869cbb70"
+uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae"
+version = "0.3.4+0"
+
+[[Dates]]
+deps = ["Printf"]
+uuid = "ade2ca70-3891-5945-98fb-dc099432e06a"
+
 [[DeepDiffs]]
 git-tree-sha1 = "9824894295b62a6a4ab6adf1c7bf337b3a9ca34c"
 uuid = "ab62b9b5-e342-54a8-a765-a90f495de1a6"
 version = "1.2.0"
 
+[[DiffRules]]
+deps = ["NaNMath", "Random", "SpecialFunctions"]
+git-tree-sha1 = "eb0c34204c8410888844ada5359ac8b96292cfd1"
+uuid = "b552c78f-8df3-52c6-915a-8e097449b14b"
+version = "1.0.1"
+
 [[Distributed]]
 deps = ["Random", "Serialization", "Sockets"]
 uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
@@ -16,6 +38,15 @@
 deps = ["Markdown"]
 uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
 
+[[JLLWrappers]]
+git-tree-sha1 = "7cec881362e5b4e367ff0279dd99a06526d51a55"
+uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210"
+version = "1.1.2"
+
+[[LibGit2]]
+deps = ["Printf"]
+uuid = "76f85450-5226-5b5a-8eaa-529ad045b433"
+
 [[Libdl]]
 uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
 
@@ -30,16 +61,54 @@
 deps = ["Base64"]
 uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
 
+[[NaNMath]]
+git-tree-sha1 = "c84c576296d0e2fbb3fc134d3e09086b3ea617cd"
+uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
+version = "0.3.4"
+
+[[OpenSpecFun_jll]]
+deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"]
+git-tree-sha1 = "9db77584158d0ab52307f8c04f8e7c08ca76b5b3"
+uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e"
+version = "0.5.3+4"
+
+[[Pkg]]
+deps = ["Dates", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "UUIDs"]
+uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f"
+
+[[Printf]]
+deps = ["Unicode"]
+uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7"
+
+[[REPL]]
+deps = ["InteractiveUtils", "Markdown", "Sockets"]
+uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb"
+
 [[Random]]
 deps = ["Serialization"]
 uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
 
+[[Requires]]
+deps = ["UUIDs"]
+git-tree-sha1 = "28faf1c963ca1dc3ec87f166d92982e3c4a1f66d"
+uuid = "ae029012-a4dd-5104-9daa-d747884805df"
+version = "1.1.0"
+
+[[SHA]]
+uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce"
+
 [[Serialization]]
 uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
 
 [[Sockets]]
 uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
 
+[[SpecialFunctions]]
+deps = ["OpenSpecFun_jll"]
+git-tree-sha1 = "d8d8b8a9f4119829410ecd706da4cc8594a1e020"
+uuid = "276daf66-3868-5448-9aa4-cd146d93841b"
+version = "0.10.3"
+
 [[Test]]
 deps = ["Distributed", "InteractiveUtils", "Logging", "Random"]
 uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
@@ -49,3 +118,16 @@
 git-tree-sha1 = "3a2919a78b04c29a1a57b05e1618e473162b15d0"
 uuid = "98d24dd4-01ad-11ea-1b02-c9a08f80db04"
 version = "2.0.0"
+
+[[Tullio]]
+deps = ["DiffRules", "LinearAlgebra", "Requires"]
+git-tree-sha1 = "b27ec3ce782f69c1c24f373bfb6aa60300ed57c7"
+uuid = "bc48ee85-29a4-5162-ae0b-a64e1601d4bc"
+version = "0.2.8"
+
+[[UUIDs]]
+deps = ["Random", "SHA"]
+uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4"
+
+[[Unicode]]
+uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5"
--- a/test/Project.toml	Mon Oct 19 19:02:48 2020 +0200
+++ b/test/Project.toml	Sun Nov 08 16:01:39 2020 +0100
@@ -2,3 +2,4 @@
 LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
 Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
 TestSetExtensions = "98d24dd4-01ad-11ea-1b02-c9a08f80db04"
+Tullio = "bc48ee85-29a4-5162-ae0b-a64e1601d4bc"
--- a/test/runtests.jl	Mon Oct 19 19:02:48 2020 +0200
+++ b/test/runtests.jl	Sun Nov 08 16:01:39 2020 +0100
@@ -1,6 +1,6 @@
 using Test
 using TestSetExtensions
 
-@testset ExtendedTestSet "All" begin
+@testset "All" begin
     @includetests ARGS
 end
--- a/test/testLazyTensors.jl	Mon Oct 19 19:02:48 2020 +0200
+++ b/test/testLazyTensors.jl	Sun Nov 08 16:01:39 2020 +0100
@@ -2,6 +2,8 @@
 using Sbplib.LazyTensors
 using Sbplib.RegionIndices
 
+using Tullio
+
 @testset "LazyTensors" begin
 
 @testset "Generic Mapping methods" begin
@@ -10,6 +12,8 @@
     @test range_dim(DummyMapping{Int,2,3}()) == 2
     @test domain_dim(DummyMapping{Int,2,3}()) == 3
     @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),(Index{Unknown}(0),Index{Unknown}(0))) == :apply
+    @test eltype(DummyMapping{Int,2,3}()) == Int
+    @test eltype(DummyMapping{Float64,2,3}()) == Float64
 end
 
 @testset "Mapping transpose" begin
@@ -58,6 +62,7 @@
     @test (m*m*v)[6] == (:apply,m*v,(Index{Unknown}(6),))
     @test_broken BoundsError == (m*m*v)[0]
     @test_broken BoundsError == (m*m*v)[7]
+    @test_throws MethodError m*m
 
     m = SizeDoublingMapping{Int, 2, 1}((3,))
     @test_throws MethodError m*ones(Int,2,2)
@@ -174,6 +179,7 @@
     @test_throws BoundsError (v1 +̃  v2)[4]
     v2 = [1., 2, 3, 4]
     # Test that size of arrays is asserted when not specified inbounds
+    # TODO: Replace these errors with SizeMismatch
     @test_throws DimensionMismatch v1 +̃ v2
 
     # Test operations on LazyArray
@@ -190,6 +196,7 @@
     @test_throws BoundsError (v1 + v2)[4]
     v2 = [1., 2, 3, 4]
     # Test that size of arrays is asserted when not specified inbounds
+    # TODO: Replace these errors with SizeMismatch
     @test_throws DimensionMismatch v1 + v2
 end
 
@@ -223,15 +230,15 @@
     @test Ã∘B̃ isa TensorMappingComposition
     @test range_size(Ã∘B̃) == (2,)
     @test domain_size(Ã∘B̃) == (4,)
-    @test_throws DimensionMismatch B̃∘Ã
+    @test_throws SizeMismatch B̃∘Ã
 
     # @test @inbounds B̃∘Ã # Should not error even though dimensions don't match. (Since ]test runs with forced boundschecking this is currently not testable 2020-10-16)
 
     v = rand(4)
-    @test Ã∘B̃*v ≈ A*B*v rtol=1e-16
+    @test Ã∘B̃*v ≈ A*B*v rtol=1e-14
 
     v = rand(2)
-    @test (Ã∘B̃)'*v ≈ B'*A'*v rtol=1e-16
+    @test (Ã∘B̃)'*v ≈ B'*A'*v rtol=1e-14
 end
 
 @testset "LazyLinearMap" begin
@@ -278,6 +285,205 @@
     @test B̃*v ≈ B[1,:,1]*v[1,1] + B[2,:,1]*v[2,1] + B[3,:,1]*v[3,1] +
                 B[1,:,2]v[1,2] + B[2,:,2]*v[2,2] + B[3,:,2]*v[3,2] atol=5e-13
 
+
+    # TODO:
+    # @inferred (B̃*v)[2]
+end
+
+
+@testset "IdentityMapping" begin
+    @test IdentityMapping{Float64}((4,5)) isa IdentityMapping{T,2} where T
+    @test IdentityMapping{Float64}((4,5)) isa TensorMapping{T,2,2} where T
+    @test IdentityMapping{Float64}((4,5)) == IdentityMapping{Float64}(4,5)
+
+    @test IdentityMapping(3,2) isa IdentityMapping{Float64,2}
+
+    for sz ∈ [(4,5),(3,),(5,6,4)]
+        I = IdentityMapping{Float64}(sz)
+        v = rand(sz...)
+        @test I*v == v
+        @test I'*v == v
+
+        @test range_size(I) == sz
+        @test domain_size(I) == sz
+    end
+
+    I = IdentityMapping{Float64}((4,5))
+    v = rand(4,5)
+    @inferred (I*v)[3,2]
+    @inferred (I'*v)[3,2]
+    @inferred range_size(I)
+
+    @inferred range_dim(I)
+    @inferred domain_dim(I)
+
+    Ã = rand(4,2)
+    A = LazyLinearMap(Ã,(1,),(2,))
+    I1 = IdentityMapping{Float64}(2)
+    I2 = IdentityMapping{Float64}(4)
+    @test A∘I1 == A
+    @test I2∘A == A
+    @test I1∘I1 == I1
+    @test_throws SizeMismatch I1∘A
+    @test_throws SizeMismatch A∘I2
+    @test_throws SizeMismatch I1∘I2
+end
+
+@testset "InflatedTensorMapping" begin
+    I(sz...) = IdentityMapping(sz...)
+
+    Ã = rand(4,2)
+    B̃ = rand(4,2,3)
+    C̃ = rand(4,2,3)
+
+    A = LazyLinearMap(Ã,(1,),(2,))
+    B = LazyLinearMap(B̃,(1,2),(3,))
+    C = LazyLinearMap(C̃,(1,),(2,3))
+
+    @test InflatedTensorMapping(I(3,2), A, I(4)) isa TensorMapping{Float64, 4, 4}
+    @test InflatedTensorMapping(I(3,2), B, I(4)) isa TensorMapping{Float64, 5, 4}
+    @test InflatedTensorMapping(I(3), C, I(2,3)) isa TensorMapping{Float64, 4, 5}
+    @test InflatedTensorMapping(C, I(2,3)) isa TensorMapping{Float64, 3, 4}
+    @test InflatedTensorMapping(I(3), C) isa TensorMapping{Float64, 2, 3}
+    @test InflatedTensorMapping(I(3), I(2,3)) isa TensorMapping{Float64, 3, 3}
+
+    @test range_size(InflatedTensorMapping(I(3,2), A, I(4))) == (3,2,4,4)
+    @test domain_size(InflatedTensorMapping(I(3,2), A, I(4))) == (3,2,2,4)
+
+    @test range_size(InflatedTensorMapping(I(3,2), B, I(4))) == (3,2,4,2,4)
+    @test domain_size(InflatedTensorMapping(I(3,2), B, I(4))) == (3,2,3,4)
+
+    @test range_size(InflatedTensorMapping(I(3), C, I(2,3))) == (3,4,2,3)
+    @test domain_size(InflatedTensorMapping(I(3), C, I(2,3))) == (3,2,3,2,3)
+
+    @inferred range_size(InflatedTensorMapping(I(3,2), A, I(4))) == (3,2,4,4)
+    @inferred domain_size(InflatedTensorMapping(I(3,2), A, I(4))) == (3,2,2,4)
+
+    # Test InflatedTensorMapping mapping w. before and after
+    tm = InflatedTensorMapping(I(3,2), A, I(4))
+    v = rand(domain_size(tm)...)
+    @tullio IAIv[a,b,c,d] := Ã[c,i]*v[a,b,i,d]
+    @test tm*v ≈ IAIv rtol=1e-14
+    @inferred LazyTensors.split_index(tm,1,1,1,1)
+
+    # Test InflatedTensorMapping mapping w. before
+    tm = InflatedTensorMapping(I(3,2), A)
+    v = rand(domain_size(tm)...)
+    @tullio IAIv[a,b,c] := Ã[c,i]*v[a,b,i]
+    @test tm*v ≈ IAIv rtol=1e-14
+    @inferred LazyTensors.split_index(tm,1,1,1)
+
+    # Test InflatedTensorMapping mapping w. after
+    tm = InflatedTensorMapping(A,I(4))
+    v = rand(domain_size(tm)...)
+    @tullio IAIv[c,d] := Ã[c,i]*v[i,d]
+    @test tm*v ≈ IAIv rtol=1e-14
+    @inferred LazyTensors.split_index(tm,1,1)
+
+    struct ScalingOperator{T,D} <: TensorMapping{T,D,D}
+        λ::T
+        size::NTuple{D,Int}
+    end
+
+    LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I]
+    LazyTensors.range_size(m::ScalingOperator) = m.size
+    LazyTensors.domain_size(m::ScalingOperator) = m.size
+
+    tm = InflatedTensorMapping(I(2,3),ScalingOperator(2.0, (3,2)),I(3,4))
+    v = rand(domain_size(tm)...)
+
+    @inferred LazyTensors.split_index(tm,1,2,3,2,2,4)
+    @inferred apply(tm,v,Index{Unknown}.((1,2,3,2,2,4))...)
+    @inferred (tm*v)[1,2,3,2,2,4]
+
+    @testset "InflatedTensorMapping of InflatedTensorMapping" begin
+        A = ScalingOperator(2.0,(2,3))
+        itm = InflatedTensorMapping(I(3,2), A, I(4))
+        @test  InflatedTensorMapping(I(4), itm, I(2)) == InflatedTensorMapping(I(4,3,2), A, I(4,2))
+        @test  InflatedTensorMapping(itm, I(2)) == InflatedTensorMapping(I(3,2), A, I(4,2))
+        @test  InflatedTensorMapping(I(4), itm) == InflatedTensorMapping(I(4,3,2), A, I(4))
+
+        @test InflatedTensorMapping(I(2), I(2), I(2)) isa InflatedTensorMapping # The constructor should always return its type.
+    end
+
+end
+
+@testset "slice_tuple" begin
+    @test LazyTensors.slice_tuple((1,2,3),Val(1), Val(3)) == (1,2,3)
+    @test LazyTensors.slice_tuple((1,2,3,4,5,6),Val(2), Val(5)) == (2,3,4,5)
+    @test LazyTensors.slice_tuple((1,2,3,4,5,6),Val(1), Val(3)) == (1,2,3)
+    @test LazyTensors.slice_tuple((1,2,3,4,5,6),Val(4), Val(6)) == (4,5,6)
+end
+
+@testset "flatten_tuple" begin
+    @test LazyTensors.flatten_tuple((1,)) == (1,)
+    @test LazyTensors.flatten_tuple((1,2,3,4,5,6)) == (1,2,3,4,5,6)
+    @test LazyTensors.flatten_tuple((1,2,(3,4),5,6)) == (1,2,3,4,5,6)
+    @test LazyTensors.flatten_tuple((1,2,(3,(4,5)),6)) == (1,2,3,4,5,6)
+    @test LazyTensors.flatten_tuple(((1,2),(3,4),(5,),6)) == (1,2,3,4,5,6)
+end
+
+
+@testset "LazyOuterProduct" begin
+    struct ScalingOperator{T,D} <: TensorMapping{T,D,D}
+        λ::T
+        size::NTuple{D,Int}
+    end
+
+    LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I]
+    LazyTensors.range_size(m::ScalingOperator) = m.size
+    LazyTensors.domain_size(m::ScalingOperator) = m.size
+
+    A = ScalingOperator(2.0, (5,))
+    B = ScalingOperator(3.0, (3,))
+    C = ScalingOperator(5.0, (3,2))
+
+    AB = LazyOuterProduct(A,B)
+    @test AB isa TensorMapping{T,2,2} where T
+    @test range_size(AB) == (5,3)
+    @test domain_size(AB) == (5,3)
+
+    v = rand(range_size(AB)...)
+    @test AB*v == 6*v
+
+    ABC = LazyOuterProduct(A,B,C)
+
+    @test ABC isa TensorMapping{T,4,4} where T
+    @test range_size(ABC) == (5,3,3,2)
+    @test domain_size(ABC) == (5,3,3,2)
+
+    @test A⊗B == AB
+    @test A⊗B⊗C == ABC
+
+    A = rand(3,2)
+    B = rand(2,4,3)
+
+    v₁ = rand(2,4,3)
+    v₂ = rand(4,3,2)
+
+    Ã = LazyLinearMap(A,(1,),(2,))
+    B̃ = LazyLinearMap(B,(1,),(2,3))
+
+    ÃB̃ = LazyOuterProduct(Ã,B̃)
+    @tullio ABv[i,k] := A[i,j]*B[k,l,m]*v₁[j,l,m]
+    @test ÃB̃*v₁ ≈ ABv
+
+    B̃Ã = LazyOuterProduct(B̃,Ã)
+    @tullio BAv[k,i] := A[i,j]*B[k,l,m]*v₂[l,m,j]
+    @test B̃Ã*v₂ ≈ BAv
+
+    @testset "Indentity mapping arguments" begin
+        @test LazyOuterProduct(IdentityMapping(3,2), IdentityMapping(1,2)) == IdentityMapping(3,2,1,2)
+
+        Ã = LazyLinearMap(A,(1,),(2,))
+        @test LazyOuterProduct(IdentityMapping(3,2), Ã) == InflatedTensorMapping(IdentityMapping(3,2),Ã)
+        @test LazyOuterProduct(Ã, IdentityMapping(3,2)) == InflatedTensorMapping(Ã,IdentityMapping(3,2))
+
+        I1 = IdentityMapping(3,2)
+        I2 = IdentityMapping(4)
+        @test I1⊗Ã⊗I2 == InflatedTensorMapping(I1, Ã, I2)
+    end
+
 end
 
 end