Mercurial > repos > public > sbplib_julia
diff src/LazyTensors/lazy_tensor_operations.jl @ 995:1ba8a398af9c refactor/lazy_tensors
Rename types
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Fri, 18 Mar 2022 21:14:47 +0100 |
parents | 55ab7801c45f |
children | 20c376dffe84 |
line wrap: on
line diff
--- a/src/LazyTensors/lazy_tensor_operations.jl Fri Mar 18 20:44:17 2022 +0100 +++ b/src/LazyTensors/lazy_tensor_operations.jl Fri Mar 18 21:14:47 2022 +0100 @@ -1,19 +1,19 @@ # TBD: Is there a good way to split this file? """ - LazyTensorMappingApplication{T,R,D} <: LazyArray{T,R} + LazyTensorApplication{T,R,D} <: LazyArray{T,R} -Struct for lazy application of a TensorMapping. Created using `*`. +Struct for lazy application of a LazyTensor. Created using `*`. -Allows the result of a `TensorMapping` applied to a vector to be treated as an `AbstractArray`. -With a mapping `m` and a vector `v` the LazyTensorMappingApplication object can be created by `m*v`. +Allows the result of a `LazyTensor` applied to a vector to be treated as an `AbstractArray`. +With a mapping `m` and a vector `v` the LazyTensorApplication object can be created by `m*v`. The actual result will be calcualted when indexing into `m*v`. """ -struct LazyTensorMappingApplication{T,R,D, TM<:TensorMapping{<:Any,R,D}, AA<:AbstractArray{<:Any,D}} <: LazyArray{T,R} +struct LazyTensorApplication{T,R,D, TM<:LazyTensor{<:Any,R,D}, AA<:AbstractArray{<:Any,D}} <: LazyArray{T,R} t::TM o::AA - function LazyTensorMappingApplication(t::TensorMapping{<:Any,R,D}, o::AbstractArray{<:Any,D}) where {R,D} + function LazyTensorApplication(t::LazyTensor{<:Any,R,D}, o::AbstractArray{<:Any,D}) where {R,D} I = ntuple(i->1, range_dim(t)) T = typeof(apply(t,o,I...)) return new{T,R,D,typeof(t), typeof(o)}(t,o) @@ -21,103 +21,103 @@ end # TODO: Do boundschecking on creation! -Base.getindex(ta::LazyTensorMappingApplication{T,R}, I::Vararg{Any,R}) where {T,R} = apply(ta.t, ta.o, I...) -Base.getindex(ta::LazyTensorMappingApplication{T,1}, I::CartesianIndex{1}) where {T} = apply(ta.t, ta.o, I.I...) # Would otherwise be caught in the previous method. -Base.size(ta::LazyTensorMappingApplication) = range_size(ta.t) +Base.getindex(ta::LazyTensorApplication{T,R}, I::Vararg{Any,R}) where {T,R} = apply(ta.t, ta.o, I...) +Base.getindex(ta::LazyTensorApplication{T,1}, I::CartesianIndex{1}) where {T} = apply(ta.t, ta.o, I.I...) # Would otherwise be caught in the previous method. +Base.size(ta::LazyTensorApplication) = range_size(ta.t) # TODO: What else is needed to implement the AbstractArray interface? -Base.:*(a::TensorMapping, v::AbstractArray) = LazyTensorMappingApplication(a,v) -Base.:*(a::TensorMapping, b::TensorMapping) = throw(MethodError(Base.:*,(a,b))) -Base.:*(a::TensorMapping, args::Union{TensorMapping, AbstractArray}...) = foldr(*,(a,args...)) +Base.:*(a::LazyTensor, v::AbstractArray) = LazyTensorApplication(a,v) +Base.:*(a::LazyTensor, b::LazyTensor) = throw(MethodError(Base.:*,(a,b))) +Base.:*(a::LazyTensor, args::Union{LazyTensor, AbstractArray}...) = foldr(*,(a,args...)) # # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' # # Should we overload some other infix binary opesrator? -# →(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) +# →(tm::LazyTensor{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorApplication(tm,o) # TODO: We need to be really careful about good error messages. -# For example what happens if you try to multiply LazyTensorMappingApplication with a TensorMapping(wrong order)? +# For example what happens if you try to multiply LazyTensorApplication with a LazyTensor(wrong order)? """ - LazyTensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R} + LazyTensorTranspose{T,R,D} <: LazyTensor{T,D,R} -Struct for lazy transpose of a TensorMapping. +Struct for lazy transpose of a LazyTensor. If a mapping implements the the `apply_transpose` method this allows working with -the transpose of mapping `m` by using `m'`. `m'` will work as a regular TensorMapping lazily calling +the transpose of mapping `m` by using `m'`. `m'` will work as a regular LazyTensor lazily calling the appropriate methods of `m`. """ -struct LazyTensorMappingTranspose{T,R,D, TM<:TensorMapping{T,R,D}} <: TensorMapping{T,D,R} +struct LazyTensorTranspose{T,R,D, TM<:LazyTensor{T,R,D}} <: LazyTensor{T,D,R} tm::TM end # # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors? -# Jonatan 2020-09-25: Is the problem that you can take the transpose of any TensorMapping even if it doesn't implement `apply_transpose`? -Base.adjoint(tm::TensorMapping) = LazyTensorMappingTranspose(tm) -Base.adjoint(tmt::LazyTensorMappingTranspose) = tmt.tm +# Jonatan 2020-09-25: Is the problem that you can take the transpose of any LazyTensor even if it doesn't implement `apply_transpose`? +Base.adjoint(tm::LazyTensor) = LazyTensorTranspose(tm) +Base.adjoint(tmt::LazyTensorTranspose) = tmt.tm -apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) -apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmt.tm, v, I...) +apply(tmt::LazyTensorTranspose{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) +apply_transpose(tmt::LazyTensorTranspose{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmt.tm, v, I...) -range_size(tmt::LazyTensorMappingTranspose) = domain_size(tmt.tm) -domain_size(tmt::LazyTensorMappingTranspose) = range_size(tmt.tm) +range_size(tmt::LazyTensorTranspose) = domain_size(tmt.tm) +domain_size(tmt::LazyTensorTranspose) = range_size(tmt.tm) -struct LazyTensorMappingBinaryOperation{Op,T,R,D,T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} <: TensorMapping{T,D,R} +struct LazyLazyTensorBinaryOperation{Op,T,R,D,T1<:LazyTensor{T,R,D},T2<:LazyTensor{T,R,D}} <: LazyTensor{T,D,R} tm1::T1 tm2::T2 - @inline function LazyTensorMappingBinaryOperation{Op,T,R,D}(tm1::T1,tm2::T2) where {Op,T,R,D, T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} + @inline function LazyLazyTensorBinaryOperation{Op,T,R,D}(tm1::T1,tm2::T2) where {Op,T,R,D, T1<:LazyTensor{T,R,D},T2<:LazyTensor{T,R,D}} return new{Op,T,R,D,T1,T2}(tm1,tm2) end end # TODO: Boundschecking in constructor. -apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) -apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) +apply(tmBinOp::LazyLazyTensorBinaryOperation{:+,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) +apply(tmBinOp::LazyLazyTensorBinaryOperation{:-,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) -range_size(tmBinOp::LazyTensorMappingBinaryOperation) = range_size(tmBinOp.tm1) -domain_size(tmBinOp::LazyTensorMappingBinaryOperation) = domain_size(tmBinOp.tm1) +range_size(tmBinOp::LazyLazyTensorBinaryOperation) = range_size(tmBinOp.tm1) +domain_size(tmBinOp::LazyLazyTensorBinaryOperation) = domain_size(tmBinOp.tm1) -Base.:+(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(tm1,tm2) -Base.:-(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:-,T,R,D}(tm1,tm2) +Base.:+(tm1::LazyTensor{T,R,D}, tm2::LazyTensor{T,R,D}) where {T,R,D} = LazyLazyTensorBinaryOperation{:+,T,R,D}(tm1,tm2) +Base.:-(tm1::LazyTensor{T,R,D}, tm2::LazyTensor{T,R,D}) where {T,R,D} = LazyLazyTensorBinaryOperation{:-,T,R,D}(tm1,tm2) """ - TensorMappingComposition{T,R,K,D} + LazyTensorComposition{T,R,K,D} -Lazily compose two `TensorMapping`s, so that they can be handled as a single `TensorMapping`. +Lazily compose two `LazyTensor`s, so that they can be handled as a single `LazyTensor`. """ -struct TensorMappingComposition{T,R,K,D, TM1<:TensorMapping{T,R,K}, TM2<:TensorMapping{T,K,D}} <: TensorMapping{T,R,D} +struct LazyTensorComposition{T,R,K,D, TM1<:LazyTensor{T,R,K}, TM2<:LazyTensor{T,K,D}} <: LazyTensor{T,R,D} t1::TM1 t2::TM2 - @inline function TensorMappingComposition(t1::TensorMapping{T,R,K}, t2::TensorMapping{T,K,D}) where {T,R,K,D} + @inline function LazyTensorComposition(t1::LazyTensor{T,R,K}, t2::LazyTensor{T,K,D}) where {T,R,K,D} @boundscheck check_domain_size(t1, range_size(t2)) return new{T,R,K,D, typeof(t1), typeof(t2)}(t1,t2) end end -range_size(tm::TensorMappingComposition) = range_size(tm.t1) -domain_size(tm::TensorMappingComposition) = domain_size(tm.t2) +range_size(tm::LazyTensorComposition) = range_size(tm.t1) +domain_size(tm::LazyTensorComposition) = domain_size(tm.t2) -function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,K,D} +function apply(c::LazyTensorComposition{T,R,K,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,K,D} apply(c.t1, c.t2*v, I...) end -function apply_transpose(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,K,D} +function apply_transpose(c::LazyTensorComposition{T,R,K,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,K,D} apply_transpose(c.t2, c.t1'*v, I...) end -Base.@propagate_inbounds Base.:∘(s::TensorMapping, t::TensorMapping) = TensorMappingComposition(s,t) +Base.@propagate_inbounds Base.:∘(s::LazyTensor, t::LazyTensor) = LazyTensorComposition(s,t) """ LazyLinearMap{T,R,D,...}(A, range_indicies, domain_indicies) -TensorMapping defined by the AbstractArray A. `range_indicies` and `domain_indicies` define which indicies of A should -be considerd the range and domain of the TensorMapping. Each set of indices must be ordered in ascending order. +LazyTensor defined by the AbstractArray A. `range_indicies` and `domain_indicies` define which indicies of A should +be considerd the range and domain of the LazyTensor. Each set of indices must be ordered in ascending order. For instance, if A is a m x n matrix, and range_size = (1,), domain_size = (2,), then the LazyLinearMap performs the standard matrix-vector product on vectors of size n. """ -struct LazyLinearMap{T,R,D, RD, AA<:AbstractArray{T,RD}} <: TensorMapping{T,R,D} +struct LazyLinearMap{T,R,D, RD, AA<:AbstractArray{T,RD}} <: LazyTensor{T,R,D} A::AA range_indicies::NTuple{R,Int} domain_indicies::NTuple{D,Int} @@ -149,54 +149,54 @@ """ - IdentityMapping{T,D} <: TensorMapping{T,D,D} + IdentityTensor{T,D} <: LazyTensor{T,D,D} -The lazy identity TensorMapping for a given size. Usefull for building up higher dimensional tensor mappings from lower -dimensional ones through outer products. Also used in the Implementation for InflatedTensorMapping. +The lazy identity LazyTensor for a given size. Usefull for building up higher dimensional tensor mappings from lower +dimensional ones through outer products. Also used in the Implementation for InflatedLazyTensor. """ -struct IdentityMapping{T,D} <: TensorMapping{T,D,D} +struct IdentityTensor{T,D} <: LazyTensor{T,D,D} size::NTuple{D,Int} end -IdentityMapping{T}(size::NTuple{D,Int}) where {T,D} = IdentityMapping{T,D}(size) -IdentityMapping{T}(size::Vararg{Int,D}) where {T,D} = IdentityMapping{T,D}(size) -IdentityMapping(size::Vararg{Int,D}) where D = IdentityMapping{Float64,D}(size) +IdentityTensor{T}(size::NTuple{D,Int}) where {T,D} = IdentityTensor{T,D}(size) +IdentityTensor{T}(size::Vararg{Int,D}) where {T,D} = IdentityTensor{T,D}(size) +IdentityTensor(size::Vararg{Int,D}) where D = IdentityTensor{Float64,D}(size) -range_size(tmi::IdentityMapping) = tmi.size -domain_size(tmi::IdentityMapping) = tmi.size +range_size(tmi::IdentityTensor) = tmi.size +domain_size(tmi::IdentityTensor) = tmi.size -apply(tmi::IdentityMapping{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = v[I...] -apply_transpose(tmi::IdentityMapping{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = v[I...] +apply(tmi::IdentityTensor{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = v[I...] +apply_transpose(tmi::IdentityTensor{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = v[I...] """ Base.:∘(tm, tmi) Base.:∘(tmi, tm) -Composes a `Tensormapping` `tm` with an `IdentityMapping` `tmi`, by returning `tm` +Composes a `Tensormapping` `tm` with an `IdentityTensor` `tmi`, by returning `tm` """ -@inline function Base.:∘(tm::TensorMapping{T,R,D}, tmi::IdentityMapping{T,D}) where {T,R,D} +@inline function Base.:∘(tm::LazyTensor{T,R,D}, tmi::IdentityTensor{T,D}) where {T,R,D} @boundscheck check_domain_size(tm, range_size(tmi)) return tm end -@inline function Base.:∘(tmi::IdentityMapping{T,R}, tm::TensorMapping{T,R,D}) where {T,R,D} +@inline function Base.:∘(tmi::IdentityTensor{T,R}, tm::LazyTensor{T,R,D}) where {T,R,D} @boundscheck check_domain_size(tmi, range_size(tm)) return tm end -# Specialization for the case where tm is an IdentityMapping. Required to resolve ambiguity. -@inline function Base.:∘(tm::IdentityMapping{T,D}, tmi::IdentityMapping{T,D}) where {T,D} +# Specialization for the case where tm is an IdentityTensor. Required to resolve ambiguity. +@inline function Base.:∘(tm::IdentityTensor{T,D}, tmi::IdentityTensor{T,D}) where {T,D} @boundscheck check_domain_size(tm, range_size(tmi)) return tmi end -# TODO: Implement the above as TensorMappingComposition instead +# TODO: Implement the above as LazyTensorComposition instead # TODO: Move the operator definitions to one place """ - ScalingTensor{T,D} <: TensorMapping{T,D,D} + ScalingTensor{T,D} <: LazyTensor{T,D,D} A lazy tensor that scales its input with `λ`. """ -struct ScalingTensor{T,D} <: TensorMapping{T,D,D} +struct ScalingTensor{T,D} <: LazyTensor{T,D,D} λ::T size::NTuple{D,Int} end @@ -211,16 +211,16 @@ # TODO: Remove ScalingOperator from tests """ - InflatedTensorMapping{T,R,D} <: TensorMapping{T,R,D} + InflatedLazyTensor{T,R,D} <: LazyTensor{T,R,D} -An inflated `TensorMapping` with dimensions added before and afer its actual dimensions. +An inflated `LazyTensor` with dimensions added before and afer its actual dimensions. """ -struct InflatedTensorMapping{T,R,D,D_before,R_middle,D_middle,D_after, TM<:TensorMapping{T,R_middle,D_middle}} <: TensorMapping{T,R,D} - before::IdentityMapping{T,D_before} +struct InflatedLazyTensor{T,R,D,D_before,R_middle,D_middle,D_after, TM<:LazyTensor{T,R_middle,D_middle}} <: LazyTensor{T,R,D} + before::IdentityTensor{T,D_before} tm::TM - after::IdentityMapping{T,D_after} + after::IdentityTensor{T,D_after} - function InflatedTensorMapping(before, tm::TensorMapping{T}, after) where T + function InflatedLazyTensor(before, tm::LazyTensor{T}, after) where T R_before = range_dim(before) R_middle = range_dim(tm) R_after = range_dim(after) @@ -234,35 +234,35 @@ end end """ - InflatedTensorMapping(before, tm, after) - InflatedTensorMapping(before,tm) - InflatedTensorMapping(tm,after) + InflatedLazyTensor(before, tm, after) + InflatedLazyTensor(before,tm) + InflatedLazyTensor(tm,after) -The outer product of `before`, `tm` and `after`, where `before` and `after` are `IdentityMapping`s. +The outer product of `before`, `tm` and `after`, where `before` and `after` are `IdentityTensor`s. -If one of `before` or `after` is left out, a 0-dimensional `IdentityMapping` is used as the default value. +If one of `before` or `after` is left out, a 0-dimensional `IdentityTensor` is used as the default value. -If `tm` already is an `InflatedTensorMapping`, `before` and `after` will be extended instead of -creating a nested `InflatedTensorMapping`. +If `tm` already is an `InflatedLazyTensor`, `before` and `after` will be extended instead of +creating a nested `InflatedLazyTensor`. """ -InflatedTensorMapping(::IdentityMapping, ::TensorMapping, ::IdentityMapping) +InflatedLazyTensor(::IdentityTensor, ::LazyTensor, ::IdentityTensor) -function InflatedTensorMapping(before, itm::InflatedTensorMapping, after) - return InflatedTensorMapping( - IdentityMapping(before.size..., itm.before.size...), +function InflatedLazyTensor(before, itm::InflatedLazyTensor, after) + return InflatedLazyTensor( + IdentityTensor(before.size..., itm.before.size...), itm.tm, - IdentityMapping(itm.after.size..., after.size...), + IdentityTensor(itm.after.size..., after.size...), ) end -InflatedTensorMapping(before::IdentityMapping, tm::TensorMapping{T}) where T = InflatedTensorMapping(before,tm,IdentityMapping{T}()) -InflatedTensorMapping(tm::TensorMapping{T}, after::IdentityMapping) where T = InflatedTensorMapping(IdentityMapping{T}(),tm,after) +InflatedLazyTensor(before::IdentityTensor, tm::LazyTensor{T}) where T = InflatedLazyTensor(before,tm,IdentityTensor{T}()) +InflatedLazyTensor(tm::LazyTensor{T}, after::IdentityTensor) where T = InflatedLazyTensor(IdentityTensor{T}(),tm,after) # Resolve ambiguity between the two previous methods -InflatedTensorMapping(I1::IdentityMapping{T}, I2::IdentityMapping{T}) where T = InflatedTensorMapping(I1,I2,IdentityMapping{T}()) +InflatedLazyTensor(I1::IdentityTensor{T}, I2::IdentityTensor{T}) where T = InflatedLazyTensor(I1,I2,IdentityTensor{T}()) -# TODO: Implement some pretty printing in terms of ⊗. E.g InflatedTensorMapping(I(3),B,I(2)) -> I(3)⊗B⊗I(2) +# TODO: Implement some pretty printing in terms of ⊗. E.g InflatedLazyTensor(I(3),B,I(2)) -> I(3)⊗B⊗I(2) -function range_size(itm::InflatedTensorMapping) +function range_size(itm::InflatedLazyTensor) return flatten_tuple( range_size(itm.before), range_size(itm.tm), @@ -270,7 +270,7 @@ ) end -function domain_size(itm::InflatedTensorMapping) +function domain_size(itm::InflatedLazyTensor) return flatten_tuple( domain_size(itm.before), domain_size(itm.tm), @@ -278,7 +278,7 @@ ) end -function apply(itm::InflatedTensorMapping{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} +function apply(itm::InflatedLazyTensor{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} dim_before = range_dim(itm.before) dim_domain = domain_dim(itm.tm) dim_range = range_dim(itm.tm) @@ -290,7 +290,7 @@ return apply(itm.tm, v_inner, inner_index...) end -function apply_transpose(itm::InflatedTensorMapping{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D} +function apply_transpose(itm::InflatedLazyTensor{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D} dim_before = range_dim(itm.before) dim_domain = domain_dim(itm.tm) dim_range = range_dim(itm.tm) @@ -383,7 +383,7 @@ @doc raw""" LazyOuterProduct(tms...) -Creates a `TensorMappingComposition` for the outerproduct of `tms...`. +Creates a `LazyTensorComposition` for the outerproduct of `tms...`. This is done by separating the outer product into regular products of outer products involving only identity mappings and one non-identity mapping. First let @@ -419,34 +419,34 @@ """ function LazyOuterProduct end -function LazyOuterProduct(tm1::TensorMapping{T}, tm2::TensorMapping{T}) where T - itm1 = InflatedTensorMapping(tm1, IdentityMapping{T}(range_size(tm2))) - itm2 = InflatedTensorMapping(IdentityMapping{T}(domain_size(tm1)),tm2) +function LazyOuterProduct(tm1::LazyTensor{T}, tm2::LazyTensor{T}) where T + itm1 = InflatedLazyTensor(tm1, IdentityTensor{T}(range_size(tm2))) + itm2 = InflatedLazyTensor(IdentityTensor{T}(domain_size(tm1)),tm2) return itm1∘itm2 end -LazyOuterProduct(t1::IdentityMapping{T}, t2::IdentityMapping{T}) where T = IdentityMapping{T}(t1.size...,t2.size...) -LazyOuterProduct(t1::TensorMapping, t2::IdentityMapping) = InflatedTensorMapping(t1, t2) -LazyOuterProduct(t1::IdentityMapping, t2::TensorMapping) = InflatedTensorMapping(t1, t2) +LazyOuterProduct(t1::IdentityTensor{T}, t2::IdentityTensor{T}) where T = IdentityTensor{T}(t1.size...,t2.size...) +LazyOuterProduct(t1::LazyTensor, t2::IdentityTensor) = InflatedLazyTensor(t1, t2) +LazyOuterProduct(t1::IdentityTensor, t2::LazyTensor) = InflatedLazyTensor(t1, t2) -LazyOuterProduct(tms::Vararg{TensorMapping}) = foldl(LazyOuterProduct, tms) +LazyOuterProduct(tms::Vararg{LazyTensor}) = foldl(LazyOuterProduct, tms) -⊗(a::TensorMapping, b::TensorMapping) = LazyOuterProduct(a,b) +⊗(a::LazyTensor, b::LazyTensor) = LazyOuterProduct(a,b) -function check_domain_size(tm::TensorMapping, sz) +function check_domain_size(tm::LazyTensor, sz) if domain_size(tm) != sz throw(SizeMismatch(tm,sz)) end end struct SizeMismatch <: Exception - tm::TensorMapping + tm::LazyTensor sz end function Base.showerror(io::IO, err::SizeMismatch) print(io, "SizeMismatch: ") - print(io, "domain size $(domain_size(err.tm)) of TensorMapping not matching size $(err.sz)") + print(io, "domain size $(domain_size(err.tm)) of LazyTensor not matching size $(err.sz)") end