Mercurial > repos > public > sbplib_julia
changeset 541:62d96e2cd165 refactor/tensor_index_coupling
Make the coupling between all the LazyTensors code and the Index type much weaker to make the module more flexible
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Thu, 26 Nov 2020 21:35:34 +0100 |
parents | 013ca4892540 |
children | 011ca1639153 |
files | src/LazyTensors/lazy_tensor_operations.jl test/testLazyTensors.jl |
diffstat | 2 files changed, 26 insertions(+), 34 deletions(-) [+] |
line wrap: on
line diff
--- a/src/LazyTensors/lazy_tensor_operations.jl Thu Nov 26 17:53:40 2020 +0100 +++ b/src/LazyTensors/lazy_tensor_operations.jl Thu Nov 26 21:35:34 2020 +0100 @@ -16,8 +16,7 @@ # TODO: Go through and remove unneccerary type parameters on functions -Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Index,R}) where {T,R,D} = apply(ta.t, ta.o, I...) -Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, Index{Unknown}.(I)...) +Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Any,R}) where {T,R,D} = apply(ta.t, ta.o, I...) Base.size(ta::LazyTensorMappingApplication) = range_size(ta.t) # TODO: What else is needed to implement the AbstractArray interface? @@ -50,8 +49,8 @@ Base.adjoint(tm::TensorMapping) = LazyTensorMappingTranspose(tm) Base.adjoint(tmt::LazyTensorMappingTranspose) = tmt.tm -apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) -apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmt.tm, v, I...) +apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) +apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmt.tm, v, I...) range_size(tmt::LazyTensorMappingTranspose) = domain_size(tmt.tm) domain_size(tmt::LazyTensorMappingTranspose) = range_size(tmt.tm) @@ -67,8 +66,8 @@ end # TODO: Boundschecking in constructor. -apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) -apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) +apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) +apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = range_size(tmBinOp.tm1) domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = domain_size(tmBinOp.tm1) @@ -132,7 +131,7 @@ range_size(llm::LazyLinearMap) = size(llm.A)[[llm.range_indicies...]] domain_size(llm::LazyLinearMap) = size(llm.A)[[llm.domain_indicies...]] -function apply(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} +function apply(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} view_index = ntuple(i->:,ndims(llm.A)) for i ∈ 1:R view_index = Base.setindex(view_index, Int(I[i]), llm.range_indicies[i]) @@ -141,7 +140,7 @@ return sum(A_view.*v) end -function apply_transpose(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D} +function apply_transpose(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,D} apply(LazyLinearMap(llm.A, llm.domain_indicies, llm.range_indicies), v, I...) end
--- a/test/testLazyTensors.jl Thu Nov 26 17:53:40 2020 +0100 +++ b/test/testLazyTensors.jl Thu Nov 26 21:35:34 2020 +0100 @@ -8,10 +8,10 @@ @testset "Generic Mapping methods" begin struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end - LazyTensors.apply(m::DummyMapping{T,R,D}, v, i::NTuple{R,Index{<:Region}}) where {T,R,D} = :apply + LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Any,R}) where {T,R,D} = :apply @test range_dim(DummyMapping{Int,2,3}()) == 2 @test domain_dim(DummyMapping{Int,2,3}()) == 3 - @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),(Index{Unknown}(0),Index{Unknown}(0))) == :apply + @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),0,0) == :apply @test eltype(DummyMapping{Int,2,3}()) == Int @test eltype(DummyMapping{Float64,2,3}()) == Float64 end @@ -19,19 +19,18 @@ @testset "Mapping transpose" begin struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end - LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},R}) where {T,R,D} = :apply - LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},D}) where {T,R,D} = :apply_transpose + LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Any,R}) where {T,R,D} = :apply + LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Any,D}) where {T,R,D} = :apply_transpose LazyTensors.range_size(m::DummyMapping{T,R,D}) where {T,R,D} = :range_size LazyTensors.domain_size(m::DummyMapping{T,R,D}) where {T,R,D} = :domain_size m = DummyMapping{Float64,2,3}() - I = Index{Unknown}(0) @test m' isa TensorMapping{Float64, 3,2} @test m'' == m - @test apply(m',zeros(Float64,(0,0)), I, I, I) == :apply_transpose - @test apply(m'',zeros(Float64,(0,0,0)), I, I) == :apply - @test apply_transpose(m', zeros(Float64,(0,0,0)), I, I) == :apply + @test apply(m',zeros(Float64,(0,0)), 0, 0, 0) == :apply_transpose + @test apply(m'',zeros(Float64,(0,0,0)), 0, 0) == :apply + @test apply_transpose(m', zeros(Float64,(0,0,0)), 0, 0) == :apply @test range_size(m') == :domain_size @test domain_size(m') == :range_size @@ -42,7 +41,7 @@ domain_size::NTuple{D,Int} end - LazyTensors.apply(m::SizeDoublingMapping{T,R,D}, v, i::Vararg{Index{<:Region},R}) where {T,R,D} = (:apply,v,i) + LazyTensors.apply(m::SizeDoublingMapping{T,R,D}, v, i::Vararg{Any,R}) where {T,R,D} = (:apply,v,i) LazyTensors.range_size(m::SizeDoublingMapping) = 2 .* m.domain_size LazyTensors.domain_size(m::SizeDoublingMapping) = m.domain_size @@ -51,15 +50,11 @@ v = [0,1,2] @test m*v isa AbstractVector{Int} @test size(m*v) == 2 .*size(v) - @test (m*v)[Index{Upper}(0)] == (:apply,v,(Index{Upper}(0),)) - @test (m*v)[0] == (:apply,v,(Index{Unknown}(0),)) + @test (m*v)[0] == (:apply,v,(0,)) @test m*m*v isa AbstractVector{Int} - @test (m*m*v)[Index{Upper}(1)] == (:apply,m*v,(Index{Upper}(1),)) - @test (m*m*v)[1] == (:apply,m*v,(Index{Unknown}(1),)) - @test (m*m*v)[Index{Interior}(3)] == (:apply,m*v,(Index{Interior}(3),)) - @test (m*m*v)[3] == (:apply,m*v,(Index{Unknown}(3),)) - @test (m*m*v)[Index{Lower}(6)] == (:apply,m*v,(Index{Lower}(6),)) - @test (m*m*v)[6] == (:apply,m*v,(Index{Unknown}(6),)) + @test (m*m*v)[1] == (:apply,m*v,(1,)) + @test (m*m*v)[3] == (:apply,m*v,(3,)) + @test (m*m*v)[6] == (:apply,m*v,(6,)) @test_broken BoundsError == (m*m*v)[0] @test_broken BoundsError == (m*m*v)[7] @test_throws MethodError m*m @@ -70,16 +65,15 @@ m = SizeDoublingMapping{Float64, 2, 2}((3,3)) v = ones(3,3) - I = (Index{Lower}(1),Index{Interior}(2)); @test size(m*v) == 2 .*size(v) - @test (m*v)[I] == (:apply,v,I) + @test (m*v)[1,2] == (:apply,v,(1,2)) struct ScalingOperator{T,D} <: TensorMapping{T,D,D} λ::T size::NTuple{D,Int} end - LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I] + LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Any,D}) where {T,D} = m.λ*v[I...] LazyTensors.range_size(m::ScalingOperator) = m.size LazyTensors.domain_size(m::ScalingOperator) = m.size @@ -91,8 +85,7 @@ m = ScalingOperator{Int,2}(2,(2,2)) v = [[1 2];[3 4]] @test m*v == [[2 4];[6 8]] - I = (Index{Upper}(2),Index{Lower}(1)) - @test (m*v)[I] == 6 + @test (m*v)[2,1] == 6 end @testset "TensorMapping binary operations" begin @@ -102,7 +95,7 @@ domain_size::NTuple{D,Int} end - LazyTensors.apply(m::ScalarMapping{T,R,D}, v, I::Vararg{Index{<:Region}}) where {T,R,D} = m.λ*v[I...] + LazyTensors.apply(m::ScalarMapping{T,R,D}, v, I::Vararg{Any,R}) where {T,R,D} = m.λ*v[I...] LazyTensors.range_size(m::ScalarMapping) = m.domain_size LazyTensors.domain_size(m::ScalarMapping) = m.range_size @@ -438,14 +431,14 @@ size::NTuple{D,Int} end - LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I] + LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Any,D}) where {T,D} = m.λ*v[I...] LazyTensors.range_size(m::ScalingOperator) = m.size LazyTensors.domain_size(m::ScalingOperator) = m.size tm = InflatedTensorMapping(I(2,3),ScalingOperator(2.0, (3,2)),I(3,4)) v = rand(domain_size(tm)...) - @inferred apply(tm,v,Index{Unknown}.((1,2,3,2,2,4))...) + @inferred apply(tm,v,1,2,3,2,2,4) @inferred (tm*v)[1,2,3,2,2,4] end end @@ -523,7 +516,7 @@ size::NTuple{D,Int} end - LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I] + LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Any,D}) where {T,D} = m.λ*v[I...] LazyTensors.range_size(m::ScalingOperator) = m.size LazyTensors.domain_size(m::ScalingOperator) = m.size