Mercurial > repos > public > sbplib_julia
changeset 189:e8e21db70112 boundary_conditions
Merge
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Thu, 20 Jun 2019 22:22:43 +0200 |
parents | 715ff09bb2ce (diff) 4558789b5948 (current diff) |
children | 8964b3165097 |
files | LazyTensors/src/Lazy.jl |
diffstat | 12 files changed, 313 insertions(+), 369 deletions(-) [+] |
line wrap: on
line diff
--- a/LazyTensor/Manifest.toml Thu Jun 20 21:36:31 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,33 +0,0 @@ -# This file is machine-generated - editing it directly is not advised - -[[Base64]] -uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" - -[[Distributed]] -deps = ["Random", "Serialization", "Sockets"] -uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" - -[[InteractiveUtils]] -deps = ["Markdown"] -uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" - -[[Logging]] -uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" - -[[Markdown]] -deps = ["Base64"] -uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" - -[[Random]] -deps = ["Serialization"] -uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" - -[[Serialization]] -uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" - -[[Sockets]] -uuid = "6462fe0b-24de-5631-8697-dd941f90decc" - -[[Test]] -deps = ["Distributed", "InteractiveUtils", "Logging", "Random"] -uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
--- a/LazyTensor/Project.toml Thu Jun 20 21:36:31 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,10 +0,0 @@ -name = "LazyTensor" -uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3" -authors = ["Jonatan Werpers <jonatan.werpers@it.uu.se>"] -version = "0.1.0" - -[extras] -Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[targets] -test = ["Test"]
--- a/LazyTensor/src/Lazy.jl Thu Jun 20 21:36:31 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,73 +0,0 @@ -module Lazy - -# Struct allowing for lazy evaluation of operations on AbstractArrays -# A LazyElementwiseOperation is defined by two same-sized AbstractArrays -# together with an operation. The operations are carried out when the -# LazyElementwiseOperation is indexed. -struct LazyElementwiseOperation{T,D,Op, T1<:AbstractArray{T,D}, T2 <: AbstractArray{T,D}} <: AbstractArray{T,D} - a::T1 - b::T2 - - function LazyElementwiseOperation{T,D,Op}(a::T1,b::T2) where {T,D,Op, T1<:AbstractArray{T,D}, T2<:AbstractArray{T,D}} - #TODO: Remove assert? Asserts are not removed when compiling with - # optimization flags. If so, need to handle boundschecking proparly. - @assert size(a) == size(b) - return new{T,D,Op,T1,T2}(a,b) - end -end - -Base.size(v::LazyElementwiseOperation) = size(v.a) - -# NOTE: Boundschecking in getindex functions now assumes that the size of the -# vectors in the LazyElementwiseOperation are the same size. If we remove the -# size assertion in the constructor we might have to handle -# boundschecking differently. -Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:+}, I...) where {T,D} - @boundscheck if !checkbounds(Bool,leo.a,I...) - throw(BoundsError([leo],[I...])) - end - return leo.a[I...] + leo.b[I...] -end -Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:-}, I...) where {T,D} - @boundscheck if !checkbounds(Bool,leo.a,I...) - throw(BoundsError([leo],[I...])) - end - return leo.a[I...] - leo.b[I...] -end -Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:*}, I...) where {T,D} - @boundscheck if !checkbounds(Bool,leo.a,I...) - throw(BoundsError([leo],[I...])) - end - return leo.a[I...] * leo.b[I...] -end -Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:/}, I...) where {T,D} - @boundscheck if !checkbounds(Bool,leo.a,I...) - throw(BoundsError([leo],[I...])) - end - return leo.a[I...] / leo.b[I...] -end - -# Define lazy operations for AbstractArrays. Operations constructs a LazyElementwiseOperation which -# can later be indexed into. Lazy operations are denoted by the usual operator followed by a tilde -@inline +̃(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:+}(a,b) -@inline -̃(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:-}(a,b) -@inline *̃(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:*}(a,b) -@inline /̃(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:/}(a,b) - -# Abstract type for which the normal operations are defined by their -# lazy counterparts -abstract type LazyArray{T,D} <: AbstractArray{T,D} end; - -Base.:+(a::LazyArray{T,D},b::AbstractArray{T,D}) where {T,D} = a +̃ b -Base.:+(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = b + a -Base.:-(a::LazyArray{T,D},b::AbstractArray{T,D}) where {T,D} = a -̃ b -Base.:-(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = a -̃ b -Base.:*(a::LazyArray{T,D},b::AbstractArray{T,D}) where {T,D} = a *̃ b -Base.:*(a::AbstractArray{T,D},b::LazyArray{T,D}) where {T,D} = b * a -# TODO: / seems to be ambiguous -# Base.:/(a::LazyArray{T,D},b::AbstractArray{T,D}) where {T,D} = a /̃ b -# Base.:/(a::AbstractArray{T,D},b::LazyArray{T,D}) where {T,D} = a /̃ b - -export +̃, -̃, *̃, /̃, +, -, * #, / - -end
--- a/LazyTensor/src/LazyTensor.jl Thu Jun 20 21:36:31 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,165 +0,0 @@ -module LazyTensor - - -""" - Mapping{T,R,D} - -Describes a mapping of a D dimension tensor to an R dimension tensor. -The action of the mapping is implemented through the method - - apply(t::Mapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} - -The size of range tensor should be dependent on the size of the domain tensor -and the type should implement the methods - - range_size(::Mapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} - domain_size(::Mapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} - -to allow querying for one or the other. - -Optionally the action of the transpose may be defined through - apply_transpose(t::Mapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} -""" -abstract type Mapping{T,R,D} end - -""" - apply(t::Mapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} - -Return the result of the mapping for a given index. -""" -function apply end -export apply - -""" - apply_transpose(t::Mapping{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {R,D,T} - -Return the result of the transposed mapping for a given index. -""" -function apply_transpose end -export apply_transpose - -""" -Return the dimension of the range space of a given mapping -""" -range_dim(::Mapping{T,R,D}) where {T,R,D} = R - -""" -Return the dimension of the domain space of a given mapping -""" -domain_dim(::Mapping{T,R,D}) where {T,R,D} = D - -export range_dim, domain_dim - -""" - range_size(M::Mapping, domain_size) - -Return the resulting range size for the mapping applied to a given domain_size -""" -function range_size end - -""" - domain_size(M::Mapping, range_size) - -Return the resulting domain size for the mapping applied to a given range_size -""" -function domain_size end - -export range_size, domain_size -# TODO: Think about boundschecking! - - -""" - Operator{T,D} - -A `Mapping{T,D,D}` where the range and domain tensor have the same number of -dimensions and the same size. -""" -abstract type Operator{T,D} <: Mapping{T,D,D} end -domain_size(::Operator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size -range_size(::Operator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size - - - -""" - MappingTranspose{T,R,D} <: Mapping{T,D,R} - -Struct for lazy transpose of a Mapping. - -If a mapping implements the the `apply_transpose` method this allows working with -the transpose of mapping `m` by using `m'`. `m'` will work as a regular Mapping lazily calling -the appropriate methods of `m`. -""" -struct MappingTranspose{T,R,D} <: Mapping{T,D,R} - tm::Mapping{T,R,D} -end - -# # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors? -Base.adjoint(t::Mapping) = MappingTranspose(t) -Base.adjoint(t::MappingTranspose) = t.tm - -apply(tm::MappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {T,R,D} = apply_transpose(tm.tm, v, I...) -apply_transpose(tm::MappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(tm.tm, v, I...) - -range_size(tmt::MappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, domain_size) -domain_size(tmt::MappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, range_size) - - -""" - Application{T,R,D} <: AbstractArray{T,R} - -Struct for lazy application of a Mapping. Created using `*`. - -Allows the result of a `Mapping` applied to a vector to be treated as an `AbstractArray`. -With a mapping `m` and a vector `v` the Application object can be created by `m*v`. -The actual result will be calcualted when indexing into `m*v`. -""" -struct Application{T,R,D} <: AbstractArray{T,R} - t::Mapping{T,R,D} - o::AbstractArray{T,D} -end - -Base.:*(tm::Mapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = Application(tm,o) - -Base.getindex(ta::Application{T,R,D}, I::Vararg) where {T,R,D} = apply(ta.t, ta.o, I...) -Base.size(ta::Application{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o)) -# TODO: What else is needed to implement the AbstractArray interface? - - -# # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' -Base.:*(args::Union{Mapping{T}, AbstractArray{T}}...) where T = foldr(*,args) -# # Should we overload some other infix binary operator? -# →(tm::Mapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = Application(tm,o) -# TODO: We need to be really careful about good error messages. -# For example what happens if you try to multiply Application with a Mapping(wrong order)? - - - -# struct TensorMappingComposition{T,R,K,D} <: Mapping{T,R,D} -# t1::Mapping{T,R,K} -# t2::Mapping{T,K,D} -# end - -# Base.:∘(s::Mapping{T,R,K}, t::Mapping{T,K,D}) where {T,R,K,D} = TensorMappingComposition(s,t) - -# function range_size(tm::TensorMappingComposition{T,R,K,D}, domain_size::NTuple{D,Integer}) where {T,R,K,D} -# range_size(tm.t1, domain_size(tm.t2, domain_size)) -# end - -# function domain_size(tm::TensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,K,D} -# domain_size(tm.t1, domain_size(tm.t2, range_size)) -# end - -# function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D} -# apply(c.t1, Application(c.t2,v), I...) -# end - -# function apply_transpose(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D} -# apply_transpose(c.t2, Application(c.t1',v), I...) -# end - -# # Have i gone too crazy with the type parameters? Maybe they aren't all needed? - -# export → - - -end # module
--- a/LazyTensor/test/runtests.jl Thu Jun 20 21:36:31 2019 +0200 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,56 +0,0 @@ -using Test -using LazyTensor - - - -@testset "Generic Mapping methods" begin - struct DummyMapping{T,R,D} <: LazyTensor.Mapping{T,R,D} end - LazyTensor.apply(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply - @test range_dim(DummyMapping{Int,2,3}()) == 2 - @test domain_dim(DummyMapping{Int,2,3}()) == 3 - @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),0) == :apply -end - -struct DummyOperator{T,D} <: LazyTensor.Operator{T,D} end -@testset "Generic Operator methods" begin - @test range_size(DummyOperator{Int,2}(), (3,5)) == (3,5) - @test domain_size(DummyOperator{Float64, 3}(), (3,3,1)) == (3,3,1) -end - -@testset "Mapping transpose" begin - struct DummyMapping{T,R,D} <: LazyTensor.Mapping{T,R,D} end - - LazyTensor.apply(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply - LazyTensor.apply_transpose(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply_transpose - - LazyTensor.range_size(m::DummyMapping{T,R,D}, domain_size) where {T,R,D} = :range_size - LazyTensor.domain_size(m::DummyMapping{T,R,D}, range_size) where {T,R,D} = :domain_size - - m = DummyMapping{Float64,2,3}() - @test m'' == m - @test apply(m',zeros(Float64,(0,0)),0) == :apply_transpose - @test apply(m'',zeros(Float64,(0,0,0)),0) == :apply - @test apply_transpose(m', zeros(Float64,(0,0,0)),0) == :apply - - @test range_size(m', (0,0)) == :domain_size - @test domain_size(m', (0,0,0)) == :range_size -end - -@testset "TensorApplication" begin - struct DummyMapping{T,R,D} <: LazyTensor.Mapping{T,R,D} end - - LazyTensor.apply(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = (:apply,v,i) - LazyTensor.apply_transpose(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply_transpose - - LazyTensor.range_size(m::DummyMapping{T,R,D}, domain_size) where {T,R,D} = 2 .* domain_size - LazyTensor.domain_size(m::DummyMapping{T,R,D}, range_size) where {T,R,D} = range_size.÷2 - - - m = DummyMapping{Int, 1, 1}() - v = [0,1,2] - @test m*v isa AbstractVector{Int} - @test size(m*v) == 2 .*size(v) - @test (m*v)[0] == (:apply,v,0) - @test m*m*v isa AbstractVector{Int} - @test (m*m*v)[0] == (:apply,m*v,0) -end \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/LazyTensors/Manifest.toml Thu Jun 20 22:22:43 2019 +0200 @@ -0,0 +1,2 @@ +# This file is machine-generated - editing it directly is not advised +
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/LazyTensors/Project.toml Thu Jun 20 22:22:43 2019 +0200 @@ -0,0 +1,10 @@ +name = "LazyTensors" +uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3" +authors = ["Jonatan Werpers <jonatan.werpers@it.uu.se>"] +version = "0.1.0" + +[extras] +Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[targets] +test = ["Test"]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/LazyTensors/src/Lazy.jl Thu Jun 20 22:22:43 2019 +0200 @@ -0,0 +1,73 @@ +module Lazy + +# Struct allowing for lazy evaluation of operations on AbstractArrays +# A LazyElementwiseOperation is defined by two same-sized AbstractArrays +# together with an operation. The operations are carried out when the +# LazyElementwiseOperation is indexed. +struct LazyElementwiseOperation{T,D,Op, T1<:AbstractArray{T,D}, T2 <: AbstractArray{T,D}} <: AbstractArray{T,D} + a::T1 + b::T2 + + function LazyElementwiseOperation{T,D,Op}(a::T1,b::T2) where {T,D,Op, T1<:AbstractArray{T,D}, T2<:AbstractArray{T,D}} + #TODO: Remove assert? Asserts are not removed when compiling with + # optimization flags. If so, need to handle boundschecking proparly. + @assert size(a) == size(b) + return new{T,D,Op,T1,T2}(a,b) + end +end + +Base.size(v::LazyElementwiseOperation) = size(v.a) + +# NOTE: Boundschecking in getindex functions now assumes that the size of the +# vectors in the LazyElementwiseOperation are the same size. If we remove the +# size assertion in the constructor we might have to handle +# boundschecking differently. +Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:+}, I...) where {T,D} + @boundscheck if !checkbounds(Bool,leo.a,I...) + throw(BoundsError([leo],[I...])) + end + return leo.a[I...] + leo.b[I...] +end +Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:-}, I...) where {T,D} + @boundscheck if !checkbounds(Bool,leo.a,I...) + throw(BoundsError([leo],[I...])) + end + return leo.a[I...] - leo.b[I...] +end +Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:*}, I...) where {T,D} + @boundscheck if !checkbounds(Bool,leo.a,I...) + throw(BoundsError([leo],[I...])) + end + return leo.a[I...] * leo.b[I...] +end +Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:/}, I...) where {T,D} + @boundscheck if !checkbounds(Bool,leo.a,I...) + throw(BoundsError([leo],[I...])) + end + return leo.a[I...] / leo.b[I...] +end + +# Define lazy operations for AbstractArrays. Operations constructs a LazyElementwiseOperation which +# can later be indexed into. Lazy operations are denoted by the usual operator followed by a tilde +@inline +̃(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:+}(a,b) +@inline -̃(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:-}(a,b) +@inline *̃(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:*}(a,b) +@inline /̃(a::AbstractArray{T,D},b::AbstractArray{T,D}) where {T,D} = LazyElementwiseOperation{T,D,:/}(a,b) + +# Abstract type for which the normal operations are defined by their +# lazy counterparts +abstract type LazyArray{T,D} <: AbstractArray{T,D} end; + +Base.:+(a::LazyArray{T,D},b::AbstractArray{T,D}) where {T,D} = a +̃ b +Base.:+(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = b + a +Base.:-(a::LazyArray{T,D},b::AbstractArray{T,D}) where {T,D} = a -̃ b +Base.:-(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = a -̃ b +Base.:*(a::LazyArray{T,D},b::AbstractArray{T,D}) where {T,D} = a *̃ b +Base.:*(a::AbstractArray{T,D},b::LazyArray{T,D}) where {T,D} = b * a +# TODO: / seems to be ambiguous +# Base.:/(a::LazyArray{T,D},b::AbstractArray{T,D}) where {T,D} = a /̃ b +# Base.:/(a::AbstractArray{T,D},b::LazyArray{T,D}) where {T,D} = a /̃ b + +export +̃, -̃, *̃, /̃, +, -, * #, / + +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/LazyTensors/src/LazyTensors.jl Thu Jun 20 22:22:43 2019 +0200 @@ -0,0 +1,169 @@ +module LazyTensors + + +""" + TensorMapping{T,R,D} + +Describes a mapping of a D dimension tensor to an R dimension tensor. +The action of the mapping is implemented through the method + + apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} + +The size of range tensor should be dependent on the size of the domain tensor +and the type should implement the methods + + range_size(::TensorMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} + domain_size(::TensorMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} + +to allow querying for one or the other. + +Optionally the action of the transpose may be defined through + apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} +""" +abstract type TensorMapping{T,R,D} end +export TensorMapping + +""" + apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} + +Return the result of the mapping for a given index. +""" +function apply end +export apply + +""" + apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {R,D,T} + +Return the result of the transposed mapping for a given index. +""" +function apply_transpose end +export apply_transpose + +""" +Return the dimension of the range space of a given mapping +""" +range_dim(::TensorMapping{T,R,D}) where {T,R,D} = R + +""" +Return the dimension of the domain space of a given mapping +""" +domain_dim(::TensorMapping{T,R,D}) where {T,R,D} = D + +export range_dim, domain_dim + +""" + range_size(M::TensorMapping, domain_size) + +Return the resulting range size for the mapping applied to a given domain_size +""" +function range_size end + +""" + domain_size(M::TensorMapping, range_size) + +Return the resulting domain size for the mapping applied to a given range_size +""" +function domain_size end + +export range_size, domain_size +# TODO: Think about boundschecking! + + +""" + TensorOperator{T,D} + +A `TensorMapping{T,D,D}` where the range and domain tensor have the same number of +dimensions and the same size. +""" +abstract type TensorOperator{T,D} <: TensorMapping{T,D,D} end +export TensorOperator +domain_size(::TensorOperator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size +range_size(::TensorOperator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size + + + +""" + LazyTensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R} + +Struct for lazy transpose of a TensorMapping. + +If a mapping implements the the `apply_transpose` method this allows working with +the transpose of mapping `m` by using `m'`. `m'` will work as a regular TensorMapping lazily calling +the appropriate methods of `m`. +""" +struct LazyTensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R} + tm::TensorMapping{T,R,D} +end +export LazyTensorMappingTranspose + +# # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors? +Base.adjoint(t::TensorMapping) = LazyTensorMappingTranspose(t) +Base.adjoint(t::LazyTensorMappingTranspose) = t.tm + +apply(tm::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {T,R,D} = apply_transpose(tm.tm, v, I...) +apply_transpose(tm::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(tm.tm, v, I...) + +range_size(tmt::LazyTensorMappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, domain_size) +domain_size(tmt::LazyTensorMappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, range_size) + + +""" + LazyTensorMappingApplication{T,R,D} <: AbstractArray{T,R} + +Struct for lazy application of a TensorMapping. Created using `*`. + +Allows the result of a `TensorMapping` applied to a vector to be treated as an `AbstractArray`. +With a mapping `m` and a vector `v` the LazyTensorMappingApplication object can be created by `m*v`. +The actual result will be calcualted when indexing into `m*v`. +""" +struct LazyTensorMappingApplication{T,R,D} <: AbstractArray{T,R} + t::TensorMapping{T,R,D} + o::AbstractArray{T,D} +end +export LazyTensorMappingApplication + +Base.:*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) + +Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg) where {T,R,D} = apply(ta.t, ta.o, I...) +Base.size(ta::LazyTensorMappingApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o)) +# TODO: What else is needed to implement the AbstractArray interface? + + +# # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' +Base.:*(args::Union{TensorMapping{T}, AbstractArray{T}}...) where T = foldr(*,args) +# # Should we overload some other infix binary operator? +# →(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) +# TODO: We need to be really careful about good error messages. +# For example what happens if you try to multiply LazyTensorMappingApplication with a TensorMapping(wrong order)? + + + +# struct LazyTensorMappingComposition{T,R,K,D} <: TensorMapping{T,R,D} +# t1::TensorMapping{T,R,K} +# t2::TensorMapping{T,K,D} +# end + +# Base.:∘(s::TensorMapping{T,R,K}, t::TensorMapping{T,K,D}) where {T,R,K,D} = LazyTensorMappingComposition(s,t) + +# function range_size(tm::LazyTensorMappingComposition{T,R,K,D}, domain_size::NTuple{D,Integer}) where {T,R,K,D} +# range_size(tm.t1, domain_size(tm.t2, domain_size)) +# end + +# function domain_size(tm::LazyTensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,K,D} +# domain_size(tm.t1, domain_size(tm.t2, range_size)) +# end + +# function apply(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D} +# apply(c.t1, LazyTensorMappingApplication(c.t2,v), I...) +# end + +# function apply_transpose(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D} +# apply_transpose(c.t2, LazyTensorMappingApplication(c.t1',v), I...) +# end + +# # Have i gone too crazy with the type parameters? Maybe they aren't all needed? + +# export → + + +end # module
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/LazyTensors/test/runtests.jl Thu Jun 20 22:22:43 2019 +0200 @@ -0,0 +1,56 @@ +using Test +using LazyTensors + + + +@testset "Generic Mapping methods" begin + struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end + LazyTensors.apply(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply + @test range_dim(DummyMapping{Int,2,3}()) == 2 + @test domain_dim(DummyMapping{Int,2,3}()) == 3 + @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),0) == :apply +end + +@testset "Generic Operator methods" begin + struct DummyOperator{T,D} <: TensorOperator{T,D} end + @test range_size(DummyOperator{Int,2}(), (3,5)) == (3,5) + @test domain_size(DummyOperator{Float64, 3}(), (3,3,1)) == (3,3,1) +end + +@testset "Mapping transpose" begin + struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end + + LazyTensors.apply(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply + LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply_transpose + + LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size) where {T,R,D} = :range_size + LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size) where {T,R,D} = :domain_size + + m = DummyMapping{Float64,2,3}() + @test m'' == m + @test apply(m',zeros(Float64,(0,0)),0) == :apply_transpose + @test apply(m'',zeros(Float64,(0,0,0)),0) == :apply + @test apply_transpose(m', zeros(Float64,(0,0,0)),0) == :apply + + @test range_size(m', (0,0)) == :domain_size + @test domain_size(m', (0,0,0)) == :range_size +end + +@testset "TensorApplication" begin + struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end + + LazyTensors.apply(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = (:apply,v,i) + LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply_transpose + + LazyTensors.range_size(m::DummyMapping{T,R,D}, domain_size) where {T,R,D} = 2 .* domain_size + LazyTensors.domain_size(m::DummyMapping{T,R,D}, range_size) where {T,R,D} = range_size.÷2 + + + m = DummyMapping{Int, 1, 1}() + v = [0,1,2] + @test m*v isa AbstractVector{Int} + @test size(m*v) == 2 .*size(v) + @test (m*v)[0] == (:apply,v,0) + @test m*m*v isa AbstractVector{Int} + @test (m*m*v)[0] == (:apply,m*v,0) +end \ No newline at end of file
--- a/Manifest.toml Thu Jun 20 21:36:31 2019 +0200 +++ b/Manifest.toml Thu Jun 20 22:22:43 2019 +0200 @@ -1,34 +1,6 @@ # This file is machine-generated - editing it directly is not advised -[[LazyTensor]] -path = "LazyTensor" +[[LazyTensors]] +path = "LazyTensors" uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3" version = "0.1.0" - -[[Libdl]] -uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" - -[[LinearAlgebra]] -deps = ["Libdl"] -uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" - -[[Random]] -deps = ["Serialization"] -uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" - -[[Serialization]] -uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" - -[[SparseArrays]] -deps = ["LinearAlgebra", "Random"] -uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" - -[[StaticArrays]] -deps = ["LinearAlgebra", "Random", "Statistics"] -git-tree-sha1 = "db23bbf50064c582b6f2b9b043c8e7e98ea8c0c6" -uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "0.11.0" - -[[Statistics]] -deps = ["LinearAlgebra", "SparseArrays"] -uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"