Mercurial > repos > public > sbplib_julia
comparison LazyTensors/src/lazy_operations.jl @ 237:1c6afdcfd657 boundary_conditions
Regretsies on the CartesianIndex stuff. Use Vararg instead
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Wed, 26 Jun 2019 19:51:36 +0200 |
parents | 856caf960d89 |
children | d4cd4882ee9f |
comparison
equal
deleted
inserted
replaced
236:856caf960d89 | 237:1c6afdcfd657 |
---|---|
25 end | 25 end |
26 export LazyTensorMappingApplication | 26 export LazyTensorMappingApplication |
27 | 27 |
28 Base.:*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) | 28 Base.:*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) |
29 | 29 |
30 Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg) where {T,R,D} = apply(ta.t, ta.o, I...) | 30 Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, I) |
31 Base.size(ta::LazyTensorMappingApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o)) | 31 Base.size(ta::LazyTensorMappingApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o)) |
32 # TODO: What else is needed to implement the AbstractArray interface? | 32 # TODO: What else is needed to implement the AbstractArray interface? |
33 | 33 |
34 # # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' | 34 # # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' |
35 Base.:*(args::Union{TensorMapping{T}, AbstractArray{T}}...) where T = foldr(*,args) | 35 Base.:*(args::Union{TensorMapping{T}, AbstractArray{T}}...) where T = foldr(*,args) |
67 # TODO: Make sure boundschecking is done properly and that the lenght of the vectors are equal | 67 # TODO: Make sure boundschecking is done properly and that the lenght of the vectors are equal |
68 # NOTE: Boundschecking in getindex functions now assumes that the size of the | 68 # NOTE: Boundschecking in getindex functions now assumes that the size of the |
69 # vectors in the LazyElementwiseOperation are the same size. If we remove the | 69 # vectors in the LazyElementwiseOperation are the same size. If we remove the |
70 # size assertion in the constructor we might have to handle | 70 # size assertion in the constructor we might have to handle |
71 # boundschecking differently. | 71 # boundschecking differently. |
72 Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:+}, I...) where {T,D} | 72 Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:+}, I::Vararg{Int,D}) where {T,D} |
73 @boundscheck if !checkbounds(Bool,leo.a,I...) | 73 @boundscheck if !checkbounds(Bool,leo.a,I...) |
74 throw(BoundsError([leo],[I...])) | 74 throw(BoundsError([leo],I...)) |
75 end | 75 end |
76 return leo.a[I...] + leo.b[I...] | 76 return leo.a[I...] + leo.b[I...] |
77 end | 77 end |
78 Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:-}, I...) where {T,D} | 78 Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:-}, I::Vararg{Int,D}) where {T,D} |
79 @boundscheck if !checkbounds(Bool,leo.a,I...) | 79 @boundscheck if !checkbounds(Bool,leo.a,I...) |
80 throw(BoundsError([leo],[I...])) | 80 throw(BoundsError([leo],I...)) |
81 end | 81 end |
82 return leo.a[I...] - leo.b[I...] | 82 return leo.a[I...] - leo.b[I...] |
83 end | 83 end |
84 Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:*}, I...) where {T,D} | 84 Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:*}, I::Vararg{Int,D}) where {T,D} |
85 @boundscheck if !checkbounds(Bool,leo.a,I...) | 85 @boundscheck if !checkbounds(Bool,leo.a,I...) |
86 throw(BoundsError([leo],[I...])) | 86 throw(BoundsError([leo],I...)) |
87 end | 87 end |
88 return leo.a[I...] * leo.b[I...] | 88 return leo.a[I...] * leo.b[I...] |
89 end | 89 end |
90 Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:/}, I...) where {T,D} | 90 Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D,:/}, I::Vararg{Int,D}) where {T,D} |
91 @boundscheck if !checkbounds(Bool,leo.a,I...) | 91 @boundscheck if !checkbounds(Bool,leo.a,I...) |
92 throw(BoundsError([leo],[I...])) | 92 throw(BoundsError([leo],I...)) |
93 end | 93 end |
94 return leo.a[I...] / leo.b[I...] | 94 return leo.a[I...] / leo.b[I...] |
95 end | 95 end |
96 | 96 |
97 # Define lazy operations for AbstractArrays. Operations constructs a LazyElementwiseOperation which | 97 # Define lazy operations for AbstractArrays. Operations constructs a LazyElementwiseOperation which |
134 | 134 |
135 # # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors? | 135 # # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors? |
136 Base.adjoint(t::TensorMapping) = LazyTensorMappingTranspose(t) | 136 Base.adjoint(t::TensorMapping) = LazyTensorMappingTranspose(t) |
137 Base.adjoint(t::LazyTensorMappingTranspose) = t.tm | 137 Base.adjoint(t::LazyTensorMappingTranspose) = t.tm |
138 | 138 |
139 apply(tm::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::CartesianIndex{D}) where {T,R,D} = apply_transpose(tm.tm, v, I) | 139 apply(tm::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::NTuple{D,Int}) where {T,R,D} = apply_transpose(tm.tm, v, I) |
140 apply_transpose(tm::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::CartesianIndex{R}) where {T,R,D} = apply(tm.tm, v, I) | 140 apply_transpose(tm::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::NTuple{R,Int}) where {T,R,D} = apply(tm.tm, v, I) |
141 | 141 |
142 range_size(tmt::LazyTensorMappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, d_size) | 142 range_size(tmt::LazyTensorMappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, d_size) |
143 domain_size(tmt::LazyTensorMappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, r_size) | 143 domain_size(tmt::LazyTensorMappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, r_size) |
144 | 144 |
145 | 145 |
152 @inline function LazyTensorMappingBinaryOperation{Op,T,R,D}(A::T1,B::T2) where {Op,T,R,D, T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} | 152 @inline function LazyTensorMappingBinaryOperation{Op,T,R,D}(A::T1,B::T2) where {Op,T,R,D, T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} |
153 return new{Op,T,R,D,T1,T2}(A,B) | 153 return new{Op,T,R,D,T1,T2}(A,B) |
154 end | 154 end |
155 end | 155 end |
156 | 156 |
157 apply(mb::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(mb.A, v, I...) + apply(mb.B,v,I...) | 157 apply(mb::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::NTuple{R,Int}) where {T,R,D} = apply(mb.A, v, I...) + apply(mb.B,v,I...) |
158 apply(mb::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(mb.A, v, I...) - apply(mb.B,v,I...) | 158 apply(mb::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::NTuple{R,Int}) where {T,R,D} = apply(mb.A, v, I...) - apply(mb.B,v,I...) |
159 | 159 |
160 range_size(mp::LazyTensorMappingBinaryOperation{Op,T,R,D}, domain_size::NTuple{D,Integer}) where {Op,T,R,D} = range_size(mp.A, domain_size) | 160 range_size(mp::LazyTensorMappingBinaryOperation{Op,T,R,D}, domain_size::NTuple{D,Integer}) where {Op,T,R,D} = range_size(mp.A, domain_size) |
161 domain_size(mp::LazyTensorMappingBinaryOperation{Op,T,R,D}, range_size::NTuple{R,Integer}) where {Op,T,R,D} = domain_size(mp.A, range_size) | 161 domain_size(mp::LazyTensorMappingBinaryOperation{Op,T,R,D}, range_size::NTuple{R,Integer}) where {Op,T,R,D} = domain_size(mp.A, range_size) |
162 | 162 |
163 Base.:+(A::TensorMapping{T,R,D}, B::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(A,B) | 163 Base.:+(A::TensorMapping{T,R,D}, B::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(A,B) |
178 | 178 |
179 # function domain_size(tm::LazyTensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,K,D} | 179 # function domain_size(tm::LazyTensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,K,D} |
180 # domain_size(tm.t1, domain_size(tm.t2, range_size)) | 180 # domain_size(tm.t1, domain_size(tm.t2, range_size)) |
181 # end | 181 # end |
182 | 182 |
183 # function apply(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D} | 183 # function apply(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::NTuple{R,Int}) where {T,R,K,D} |
184 # apply(c.t1, LazyTensorMappingApplication(c.t2,v), I...) | 184 # apply(c.t1, LazyTensorMappingApplication(c.t2,v), I...) |
185 # end | 185 # end |
186 | 186 |
187 # function apply_transpose(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D} | 187 # function apply_transpose(c::LazyTensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::NTuple{D,Int}) where {T,R,K,D} |
188 # apply_transpose(c.t2, LazyTensorMappingApplication(c.t1',v), I...) | 188 # apply_transpose(c.t2, LazyTensorMappingApplication(c.t1',v), I...) |
189 # end | 189 # end |
190 | 190 |
191 # # Have i gone too crazy with the type parameters? Maybe they aren't all needed? | 191 # # Have i gone too crazy with the type parameters? Maybe they aren't all needed? |
192 | 192 |