comparison TensorMappings.jl @ 160:d33b13d2d92b boundary_conditions

Move things around in TensorMappings and improve the comments
author Jonatan Werpers <jonatan@werpers.com>
date Fri, 10 May 2019 16:08:31 +0200
parents b790082032da
children ea01b5550ff6
comparison
equal deleted inserted replaced
159:b790082032da 160:d33b13d2d92b
1 module TensorMappings 1 module TensorMappings
2 # Needs a better name ImplicitTensorMappings? Get rid of "Tensor" in the name_ 2 # Needs a better name ImplicitTensorMappings? Get rid of "Tensor" in the name_
3 3
4 abstract type TensorMapping{T,R,D} end 4 abstract type TensorMapping{T,R,D} end
5 abstract type TensorOperator{T,D} <: TensorMapping{T,D,D} end # Does this help?
6 5
7 range_dim(::TensorMapping{T,R,D}) where {T,R,D} = R 6 range_dim(::TensorMapping{T,R,D}) where {T,R,D} = R
8 domain_dim(::TensorMapping{T,R,D}) where {T,R,D} = D 7 domain_dim(::TensorMapping{T,R,D}) where {T,R,D} = D
8 # range_size(::TensorMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D}
9 # domain_size(::TensorMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D}
9 10
10 range_size(::TensorOperator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size
11 domain_size(::TensorOperator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size
12 # More precise domain_size/range_size type?
13
14 # Should be implemented by a TensorMapping
15 # ========================================
16 # apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} = 11 # apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} =
17 # apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} = 12 # apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} =
18 # Does it make sense that apply should work for any size of v? And the application adapts? 13 # Implementing apply_transpose and domain_size is only needed if you want to take transposes of the TensorMapping.
19 # Think about boundschecking! 14 # TODO: Think about boundschecking!
20 15
21 # range_size(::TensorMapping{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = 16 abstract type TensorOperator{T,D} <: TensorMapping{T,D,D} end
22 # More prciese domain_size type? 17 domain_size(::TensorOperator{T,D}, range_size::NTuple{D,Integer}) where {T,D} = range_size
23 # domain_size(::TensorMapping{T,R,D}, range_size::NTuple{R,Integer}) where {T,R,D} = 18 range_size(::TensorOperator{T,D}, domain_size::NTuple{D,Integer}) where {T,D} = domain_size
24 19
25 # Implementing apply_transpose and domain_size is only needed if you want to take transposes of the TensorMapping.
26
27 # What does a TensorMapping apply() to?
28 # =====================================
29 # Is it too strict that TensorMappings apply to AbstractArrays? Maybe we don't need
30 # to know the operands size. That could simplify the handeling of the range_size...
31 # It would just fail if apply does something out of bounds..
32 # No i think knowing the size is a requirement. The TensorMapping must be able to do
33 # different things for different indecies based for example on how close to the boundary we are.
34 20
35 21
36 # Allow using the ' operator: 22 # Allow using the ' operator:
37 struct TensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R} 23 struct TensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R}
38 tm::TensorMapping{T,R,D} 24 tm::TensorMapping{T,R,D}
39 end 25 end
40 26
41 Base.adjoint(t::TensorMapping) = TensorMappingTranspose(t) # Maybe this should be implemented on a type by type basis or through a trait to provide earlier errors. 27 Base.adjoint(t::TensorMapping) = TensorMappingTranspose(t)
28 # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors?
42 Base.adjoint(t::TensorMappingTranspose) = t.tm 29 Base.adjoint(t::TensorMappingTranspose) = t.tm
43 30
44 apply(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {T,R,D} = apply_transpose(tm.tm, v, I...) 31 apply(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {T,R,D} = apply_transpose(tm.tm, v, I...)
45 apply_transpose(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(tm.tm, v, I...) 32 apply_transpose(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(tm.tm, v, I...)
46 33
52 t::TensorMapping{R,D} 39 t::TensorMapping{R,D}
53 o::AbstractArray{T,D} 40 o::AbstractArray{T,D}
54 end 41 end
55 42
56 Base.size(ta::TensorApplication) = range_size(ta.t,size(ta.o)) 43 Base.size(ta::TensorApplication) = range_size(ta.t,size(ta.o))
57 ## What else is needed so that we have a proper AbstractArray?
58
59 Base.getindex(tm::TensorApplication, I::Vararg) = apply(tm.t, tm.o, I...) 44 Base.getindex(tm::TensorApplication, I::Vararg) = apply(tm.t, tm.o, I...)
45 # TODO: What else is needed to implement the AbstractArray interface?
60 46
61 →(t::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(t,o) 47 →(t::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(t,o)
62 # Should we overload some other infix binary operator? 48 # Should we overload some other infix binary operator?
63 # * has the wrong parsing properties... a*b*c is parsed to (a*b)*c (through a*b*c = *(a,b,c)) 49 # We need the associativity to be a→b→c = a→(b→c), which is the case for '→'
64 # while a→b→c is parsed as a→(b→c)
65 # The associativity of the operators might be fixed somehow... (rfold/lfold?)
66 # ∘ also is an option but that has the same problem as * (but is not n-ary) (or is this best used for composition of Mappings?)
67 50
68 # If we want to use * it would be something like this:
69 import Base.* 51 import Base.*
70 *(args::Union{TensorMapping{T}, AbstractArray{T}}...) where T = foldr(*,args) 52 *(args::Union{TensorMapping{T}, AbstractArray{T}}...) where T = foldr(*,args)
71 *(t::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(t,o) 53 *(t::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(t,o)
72 # We need to be really careful about good error messages. 54 # We need to be really careful about good error messages.
73 # For example what happens if you try to multiply TensorApplication with a TensorMapping(wrong order)? 55 # For example what happens if you try to multiply TensorApplication with a TensorMapping(wrong order)?