Mercurial > repos > public > sbplib_julia
changeset 550:dfcf0e506ba9 refactor/tensor_index_coupling
Merge
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Sun, 29 Nov 2020 12:57:15 +0100 |
parents | ace46ca8513f (current diff) 9330338d6ab5 (diff) |
children | 11637080e409 |
files | |
diffstat | 3 files changed, 17 insertions(+), 20 deletions(-) [+] |
line wrap: on
line diff
--- a/src/LazyTensors/lazy_tensor_operations.jl Fri Nov 27 17:53:26 2020 +0100 +++ b/src/LazyTensors/lazy_tensor_operations.jl Sun Nov 29 12:57:15 2020 +0100 @@ -14,9 +14,7 @@ # TODO: Do boundschecking on creation! export LazyTensorMappingApplication -# TODO: Go through and remove unneccerary type parameters on functions - -Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Any,R}) where {T,R,D} = apply(ta.t, ta.o, I...) +Base.getindex(ta::LazyTensorMappingApplication{T,R}, I::Vararg{Any,R}) where {T,R} = apply(ta.t, ta.o, I...) Base.size(ta::LazyTensorMappingApplication) = range_size(ta.t) # TODO: What else is needed to implement the AbstractArray interface? @@ -69,8 +67,8 @@ apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) -range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = range_size(tmBinOp.tm1) -domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = domain_size(tmBinOp.tm1) +range_size(tmBinOp::LazyTensorMappingBinaryOperation) = range_size(tmBinOp.tm1) +domain_size(tmBinOp::LazyTensorMappingBinaryOperation) = domain_size(tmBinOp.tm1) Base.:+(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(tm1,tm2) Base.:-(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:-,T,R,D}(tm1,tm2) @@ -94,11 +92,11 @@ range_size(tm::TensorMappingComposition) = range_size(tm.t1) domain_size(tm::TensorMappingComposition) = domain_size(tm.t2) -function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg{S,R} where S) where {T,R,K,D} +function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,K,D} apply(c.t1, c.t2*v, I...) end -function apply_transpose(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,R}, I::Vararg{S,D} where S) where {T,R,K,D} +function apply_transpose(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,K,D} apply_transpose(c.t2, c.t1'*v, I...) end
--- a/src/SbpOperators/laplace/secondderivative.jl Fri Nov 27 17:53:26 2020 +0100 +++ b/src/SbpOperators/laplace/secondderivative.jl Sun Nov 29 12:57:15 2020 +0100 @@ -21,24 +21,23 @@ LazyTensors.domain_size(D2::SecondDerivative) = D2.size # Apply for different regions Lower/Interior/Upper or Unknown region -function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Lower}) where T - return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.closureStencils[Int(I)], v, Int(I)) +function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, i::Index{Lower}) where T + return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.closureStencils[Int(i)], v, Int(i)) end -function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Interior}) where T - return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.innerStencil, v, Int(I)) +function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, i::Index{Interior}) where T + return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.innerStencil, v, Int(i)) end -function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Upper}) where T +function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, i::Index{Upper}) where T N = length(v) # TODO: Use domain_size here instead? N = domain_size(D2,size(v)) - return @inbounds D2.h_inv*D2.h_inv*apply_stencil_backwards(D2.closureStencils[N-Int(I)+1], v, Int(I)) + return @inbounds D2.h_inv*D2.h_inv*apply_stencil_backwards(D2.closureStencils[N-Int(i)+1], v, Int(i)) end function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, i) where T N = length(v) # TODO: Use domain_size here instead? r = getregion(i, closuresize(D2), N) - I = Index(i, r) - return LazyTensors.apply(D2, v, I) + return LazyTensors.apply(D2, v, Index(i, r)) end closuresize(D2::SecondDerivative{T,N,M,K}) where {T<:Real,N,M,K} = M
--- a/test/testLazyTensors.jl Fri Nov 27 17:53:26 2020 +0100 +++ b/test/testLazyTensors.jl Sun Nov 29 12:57:15 2020 +0100 @@ -19,11 +19,11 @@ @testset "Mapping transpose" begin struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end - LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Any,R}) where {T,R,D} = :apply + LazyTensors.apply(m::DummyMapping{T,R}, v, I::Vararg{Any,R}) where {T,R} = :apply LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Any,D}) where {T,R,D} = :apply_transpose - LazyTensors.range_size(m::DummyMapping{T,R,D}) where {T,R,D} = :range_size - LazyTensors.domain_size(m::DummyMapping{T,R,D}) where {T,R,D} = :domain_size + LazyTensors.range_size(m::DummyMapping) = :range_size + LazyTensors.domain_size(m::DummyMapping) = :domain_size m = DummyMapping{Float64,2,3}() @test m' isa TensorMapping{Float64, 3,2} @@ -41,7 +41,7 @@ domain_size::NTuple{D,Int} end - LazyTensors.apply(m::SizeDoublingMapping{T,R,D}, v, i::Vararg{Any,R}) where {T,R,D} = (:apply,v,i) + LazyTensors.apply(m::SizeDoublingMapping{T,R}, v, i::Vararg{Any,R}) where {T,R} = (:apply,v,i) LazyTensors.range_size(m::SizeDoublingMapping) = 2 .* m.domain_size LazyTensors.domain_size(m::SizeDoublingMapping) = m.domain_size @@ -95,7 +95,7 @@ domain_size::NTuple{D,Int} end - LazyTensors.apply(m::ScalarMapping{T,R,D}, v, I::Vararg{Any,R}) where {T,R,D} = m.λ*v[I...] + LazyTensors.apply(m::ScalarMapping{T,R}, v, I::Vararg{Any,R}) where {T,R} = m.λ*v[I...] LazyTensors.range_size(m::ScalarMapping) = m.domain_size LazyTensors.domain_size(m::ScalarMapping) = m.range_size