changeset 187:156bb18a2252 boundary_conditions

Merge with tip
author Vidar Stiernström <vidar.stiernstrom@it.uu.se>
date Thu, 20 Jun 2019 21:22:44 +0200
parents 64b9751b3cb2 (current diff) b7397ae8afaf (diff)
children 4558789b5948
files TensorMappings.jl
diffstat 7 files changed, 301 insertions(+), 134 deletions(-) [+]
line wrap: on
line diff
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/LazyTensor/Manifest.toml	Thu Jun 20 21:22:44 2019 +0200
@@ -0,0 +1,33 @@
+# This file is machine-generated - editing it directly is not advised
+
+[[Base64]]
+uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f"
+
+[[Distributed]]
+deps = ["Random", "Serialization", "Sockets"]
+uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b"
+
+[[InteractiveUtils]]
+deps = ["Markdown"]
+uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
+
+[[Logging]]
+uuid = "56ddb016-857b-54e1-b83d-db4d58db5568"
+
+[[Markdown]]
+deps = ["Base64"]
+uuid = "d6f4376e-aef5-505a-96c1-9c027394607a"
+
+[[Random]]
+deps = ["Serialization"]
+uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+
+[[Serialization]]
+uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
+
+[[Sockets]]
+uuid = "6462fe0b-24de-5631-8697-dd941f90decc"
+
+[[Test]]
+deps = ["Distributed", "InteractiveUtils", "Logging", "Random"]
+uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/LazyTensor/Project.toml	Thu Jun 20 21:22:44 2019 +0200
@@ -0,0 +1,10 @@
+name = "LazyTensor"
+uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3"
+authors = ["Jonatan Werpers <jonatan.werpers@it.uu.se>"]
+version = "0.1.0"
+
+[extras]
+Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
+
+[targets]
+test = ["Test"]
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/LazyTensor/src/LazyTensor.jl	Thu Jun 20 21:22:44 2019 +0200
@@ -0,0 +1,165 @@
+module LazyTensor
+
+
+"""
+    Mapping{T,R,D}
+
+Describes a mapping of a D dimension tensor to an R dimension tensor.
+The action of the mapping is implemented through the method
+
+    apply(t::Mapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T}
+
+The size of range tensor should be dependent on the size of the domain tensor
+and the type should implement the methods
+
+    range_size(::Mapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D}
+    domain_size(::Mapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D}
+
+to allow querying for one or the other.
+
+Optionally the action of the transpose may be defined through
+    apply_transpose(t::Mapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T}
+"""
+abstract type Mapping{T,R,D} end
+
+"""
+    apply(t::Mapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T}
+
+Return the result of the mapping for a given index.
+"""
+function apply end
+export apply
+
+"""
+    apply_transpose(t::Mapping{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {R,D,T}
+
+Return the result of the transposed mapping for a given index.
+"""
+function apply_transpose end
+export apply_transpose
+
+"""
+Return the dimension of the range space of a given mapping
+"""
+range_dim(::Mapping{T,R,D}) where {T,R,D} = R
+
+"""
+Return the dimension of the domain space of a given mapping
+"""
+domain_dim(::Mapping{T,R,D}) where {T,R,D} = D
+
+export range_dim, domain_dim
+
+"""
+    range_size(M::Mapping, domain_size)
+
+Return the resulting range size for the mapping applied to a given domain_size
+"""
+function range_size end
+
+"""
+    domain_size(M::Mapping, range_size)
+
+Return the resulting domain size for the mapping applied to a given range_size
+"""
+function domain_size end
+
+export range_size, domain_size
+# TODO: Think about boundschecking!
+
+
+"""
+    Operator{T,D}
+
+A `Mapping{T,D,D}` where the range and domain tensor have the same number of
+dimensions and the same size.
+"""
+abstract type Operator{T,D} <: Mapping{T,D,D} end
+domain_size(::Operator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size
+range_size(::Operator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size
+
+
+
+"""
+    MappingTranspose{T,R,D} <: Mapping{T,D,R}
+
+Struct for lazy transpose of a Mapping.
+
+If a mapping implements the the `apply_transpose` method this allows working with
+the transpose of mapping `m` by using `m'`. `m'` will work as a regular Mapping lazily calling
+the appropriate methods of `m`.
+"""
+struct MappingTranspose{T,R,D} <: Mapping{T,D,R}
+    tm::Mapping{T,R,D}
+end
+
+# # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors?
+Base.adjoint(t::Mapping) = MappingTranspose(t)
+Base.adjoint(t::MappingTranspose) = t.tm
+
+apply(tm::MappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {T,R,D} = apply_transpose(tm.tm, v, I...)
+apply_transpose(tm::MappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(tm.tm, v, I...)
+
+range_size(tmt::MappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, domain_size)
+domain_size(tmt::MappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, range_size)
+
+
+"""
+    Application{T,R,D} <: AbstractArray{T,R}
+
+Struct for lazy application of a Mapping. Created using `*`.
+
+Allows the result of a `Mapping` applied to a vector to be treated as an `AbstractArray`.
+With a mapping `m` and a vector `v` the Application object can be created by `m*v`.
+The actual result will be calcualted when indexing into `m*v`.
+"""
+struct Application{T,R,D} <: AbstractArray{T,R}
+    t::Mapping{T,R,D}
+    o::AbstractArray{T,D}
+end
+
+Base.:*(tm::Mapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = Application(tm,o)
+
+Base.getindex(ta::Application{T,R,D}, I::Vararg) where {T,R,D} = apply(ta.t, ta.o, I...)
+Base.size(ta::Application{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o))
+# TODO: What else is needed to implement the AbstractArray interface?
+
+
+# # We need the associativity to be a→b→c = a→(b→c), which is the case for '→'
+Base.:*(args::Union{Mapping{T}, AbstractArray{T}}...) where T = foldr(*,args)
+# # Should we overload some other infix binary operator?
+# →(tm::Mapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = Application(tm,o)
+# TODO: We need to be really careful about good error messages.
+# For example what happens if you try to multiply Application with a Mapping(wrong order)?
+
+
+
+# struct TensorMappingComposition{T,R,K,D} <: Mapping{T,R,D}
+#     t1::Mapping{T,R,K}
+#     t2::Mapping{T,K,D}
+# end
+
+# Base.:∘(s::Mapping{T,R,K}, t::Mapping{T,K,D}) where {T,R,K,D} = TensorMappingComposition(s,t)
+
+# function range_size(tm::TensorMappingComposition{T,R,K,D}, domain_size::NTuple{D,Integer}) where {T,R,K,D}
+#     range_size(tm.t1, domain_size(tm.t2, domain_size))
+# end
+
+# function domain_size(tm::TensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,K,D}
+#     domain_size(tm.t1, domain_size(tm.t2, range_size))
+# end
+
+# function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D}
+#     apply(c.t1, Application(c.t2,v), I...)
+# end
+
+# function apply_transpose(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D}
+#     apply_transpose(c.t2, Application(c.t1',v), I...)
+# end
+
+# # Have i gone too crazy with the type parameters? Maybe they aren't all needed?
+
+# export →
+
+
+end # module
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/LazyTensor/test/runtests.jl	Thu Jun 20 21:22:44 2019 +0200
@@ -0,0 +1,56 @@
+using Test
+using LazyTensor
+
+
+
+@testset "Generic Mapping methods" begin
+    struct DummyMapping{T,R,D} <: LazyTensor.Mapping{T,R,D} end
+    LazyTensor.apply(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply
+    @test range_dim(DummyMapping{Int,2,3}()) == 2
+    @test domain_dim(DummyMapping{Int,2,3}()) == 3
+    @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),0) == :apply
+end
+
+struct DummyOperator{T,D} <: LazyTensor.Operator{T,D} end
+@testset "Generic Operator methods" begin
+    @test range_size(DummyOperator{Int,2}(), (3,5)) == (3,5)
+    @test domain_size(DummyOperator{Float64, 3}(), (3,3,1)) == (3,3,1)
+end
+
+@testset "Mapping transpose" begin
+    struct DummyMapping{T,R,D} <: LazyTensor.Mapping{T,R,D} end
+
+    LazyTensor.apply(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply
+    LazyTensor.apply_transpose(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply_transpose
+
+    LazyTensor.range_size(m::DummyMapping{T,R,D}, domain_size) where {T,R,D} = :range_size
+    LazyTensor.domain_size(m::DummyMapping{T,R,D}, range_size) where {T,R,D} = :domain_size
+
+    m = DummyMapping{Float64,2,3}()
+    @test m'' == m
+    @test apply(m',zeros(Float64,(0,0)),0) == :apply_transpose
+    @test apply(m'',zeros(Float64,(0,0,0)),0) == :apply
+    @test apply_transpose(m', zeros(Float64,(0,0,0)),0) == :apply
+
+    @test range_size(m', (0,0)) == :domain_size
+    @test domain_size(m', (0,0,0)) == :range_size
+end
+
+@testset "TensorApplication" begin
+    struct DummyMapping{T,R,D} <: LazyTensor.Mapping{T,R,D} end
+
+    LazyTensor.apply(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = (:apply,v,i)
+    LazyTensor.apply_transpose(m::DummyMapping{T,R,D}, v, i) where {T,R,D} = :apply_transpose
+
+    LazyTensor.range_size(m::DummyMapping{T,R,D}, domain_size) where {T,R,D} = 2 .* domain_size
+    LazyTensor.domain_size(m::DummyMapping{T,R,D}, range_size) where {T,R,D} = range_size.÷2
+
+
+    m = DummyMapping{Int, 1, 1}()
+    v = [0,1,2]
+    @test m*v isa AbstractVector{Int}
+    @test size(m*v) == 2 .*size(v)
+    @test (m*v)[0] == (:apply,v,0)
+    @test m*m*v isa AbstractVector{Int}
+    @test (m*m*v)[0] == (:apply,m*v,0)
+end
\ No newline at end of file
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Manifest.toml	Thu Jun 20 21:22:44 2019 +0200
@@ -0,0 +1,34 @@
+# This file is machine-generated - editing it directly is not advised
+
+[[LazyTensor]]
+path = "LazyTensor"
+uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3"
+version = "0.1.0"
+
+[[Libdl]]
+uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb"
+
+[[LinearAlgebra]]
+deps = ["Libdl"]
+uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
+
+[[Random]]
+deps = ["Serialization"]
+uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
+
+[[Serialization]]
+uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b"
+
+[[SparseArrays]]
+deps = ["LinearAlgebra", "Random"]
+uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf"
+
+[[StaticArrays]]
+deps = ["LinearAlgebra", "Random", "Statistics"]
+git-tree-sha1 = "db23bbf50064c582b6f2b9b043c8e7e98ea8c0c6"
+uuid = "90137ffa-7385-5640-81b9-e52037218182"
+version = "0.11.0"
+
+[[Statistics]]
+deps = ["LinearAlgebra", "SparseArrays"]
+uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2"
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/Project.toml	Thu Jun 20 21:22:44 2019 +0200
@@ -0,0 +1,3 @@
+[deps]
+LazyTensor = "62fbed2c-918d-11e9-279b-eb3a325b37d3"
+StaticArrays = "90137ffa-7385-5640-81b9-e52037218182"
--- a/TensorMappings.jl	Mon Jun 17 10:41:20 2019 +0200
+++ /dev/null	Thu Jan 01 00:00:00 1970 +0000
@@ -1,134 +0,0 @@
-module TensorMappings
-# Needs a better name ImplicitTensorMappings? Get rid of "Tensor" in the name_
-
-abstract type TensorMapping{T,R,D} end
-
-range_dim(::TensorMapping{T,R,D}) where {T,R,D} = R
-domain_dim(::TensorMapping{T,R,D}) where {T,R,D} = D
-# range_size(::TensorMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D}
-# domain_size(::TensorMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D}
-
-# apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} =
-# apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} =
-# Implementing apply_transpose and domain_size is only needed if you want to take transposes of the TensorMapping.
-# TODO: Think about boundschecking!
-
-abstract type TensorOperator{T,D} <: TensorMapping{T,D,D} end
-domain_size(::TensorOperator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size
-range_size(::TensorOperator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size
-
-
-
-# Allow using the ' operator:
-struct TensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R}
-	tm::TensorMapping{T,R,D}
-end
-
-Base.adjoint(t::TensorMapping) = TensorMappingTranspose(t)
-# TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors?
-Base.adjoint(t::TensorMappingTranspose) = t.tm
-
-apply(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {T,R,D} = apply_transpose(tm.tm, v, I...)
-apply_transpose(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(tm.tm, v, I...)
-
-range_size(tmt::TensorMappingTranspose{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = domain_size(tmt.tm, domain_size)
-domain_size(tmt::TensorMappingTranspose{T,R,D}, range_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, range_size)
-
-
-
-struct TensorApplication{T,R,D} <: AbstractArray{T,R}
-	t::TensorMapping{R,D}
-	o::AbstractArray{T,D}
-end
-
-Base.size(ta::TensorApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o))
-Base.getindex(ta::TensorApplication{T,R,D}, I::Vararg) where {T,R,D} = apply(ta.t, ta.o, I...)
-# TODO: What else is needed to implement the AbstractArray interface?
-import Base.*
-→(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(tm,o)
-# Should we overload some other infix binary operator?
-# We need the associativity to be a→b→c = a→(b→c), which is the case for '→'
-*(args::Union{TensorMapping{T}, AbstractArray{T}}...) where T = foldr(*,args)
-*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(tm,o)
-*(scalar, ta::TensorApplication{T,R,D}) where {T,R,D} = scalar*ta.o
-*(ta::TensorApplication{T,R,D}, scalar::Number) where {T,R,D} = scalar*ta
-# We need to be really careful about good error messages.
-# For example what happens if you try to multiply TensorApplication with a TensorMapping(wrong order)?
-
-# NOTE: TensorApplicationExpressions attempt to handle the situation when a TensorMapping
-# acts on a TensorApplication +- AbstractArray, such that the expression still can be
-# evaluated lazily per index.
-# TODO: Better naming of both struct and members
-# Since this is a lower layer which shouldnt be exposed, my opinion is that
-# we can afford to be quite verbose.
-struct TensorApplicationExpression{T,R,D} <: AbstractArray{T,R}
-	ta::TensorApplication{R,D}
-	o::AbstractArray{T,D}
-end
-Base.size(tae::TensorApplicationExpression) = size(tae.ta) #TODO: Not sure how to handle this
-Base.getindex(tae::TensorApplicationExpression, I::Vararg) = tae.ta[I...] + tae.o[I...]
-import Base.+
-import Base.-
-+(ta::TensorApplication{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplicationExpression(ta,o)
-+(o::AbstractArray{T,D},ta::TensorApplication{T,R,D}) where {T,R,D} = ta + o
--(ta::TensorApplication{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = ta + -o
--(o::AbstractArray{T,D},ta::TensorApplication{T,R,D}) where {T,R,D} = -ta + o
-
-# NOTE: Another (quite neat) way to handle lazy evaluation of
-# TensorApplication + AbstractArray is by using broadcasting.
-# However, with the drafted implementation below a
-# TensorApplication+AbstractArray now results in a generic function and we would
-# then need to define TensorMapping*generic function which does not seem like a
-# good idea.
-# NOTE: Could one use MappedArrays.jl instead?
-#
-# # Lazy evaluations of expressions on TensorApplications
-# # TODO: Need to decide on some good naming here.
-# +(ta::TensorApplication,o::AbstractArray) = I -> ta[I] + o[I]
-# +(o::AbstractArray,ta::TensorApplication) = ta+o
-# *(scalar::Number,ta::TensorApplication) = I -> scalar*ta[I]
-# *(ta::TensorApplication,scalar::Number) = scalar*ta
-# -(ta::TensorApplication,o::AbstractArray) = ta + -o
-# -(o::AbstractArray + ta::TensorApplication) = -ta + o
-
-struct TensorMappingComposition{T,R,K,D} <: TensorMapping{T,R,D} where K<:typeof(R)
-	t1::TensorMapping{T,R,K}
-	t2::TensorMapping{T,K,D}
-end
-
-import Base.∘
-∘(s::TensorMapping{T,R,K}, t::TensorMapping{T,K,D}) where {T,R,K,D} = TensorMappingComposition(s,t)
-
-function range_size(tm::TensorMappingComposition{T,R,K,D}, domain_size::NTuple{D,Integer}) where {T,R,K,D}
-	range_size(tm.t1, domain_size(tm.t2, domain_size))
-end
-
-function domain_size(tm::TensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,K,D}
-	domain_size(tm.t1, domain_size(tm.t2, range_size))
-end
-
-function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D}
-	apply(c.t1, TensorApplication(c.t2,v), I...)
-end
-
-function apply_transpose(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D}
-	apply_transpose(c.t2, TensorApplication(c.t1',v), I...)
-end
-
-# Have i gone too crazy with the type parameters? Maybe they aren't all needed?
-
-
-export apply
-export apply_transpose
-export range_dim
-export domain_dim
-export range_size
-export →
-
-
-# # Automatic dimension expansion?
-# struct TensorOperator1dAd2d{T,I} <: TensorOperator{T,2}
-# 	t::TensorOperator{T,1}
-# end
-
-end #module