Mercurial > repos > public > sbplib_julia
comparison TensorMappings.jl @ 176:24779d423243 boundary_conditions
Attempt to handle TensorMappings acting on expressions of TensorApplications and AbstractArrays
- Draft TensorApplicationExpression
- Add note on how broadcasting could be used for lazy evaluations of expressions
author | Vidar Stiernström <vidar.stiernstrom@it.uu.se> |
---|---|
date | Mon, 17 Jun 2019 09:02:19 +0200 |
parents | 766403c677b1 |
children | 64b9751b3cb2 |
comparison
equal
deleted
inserted
replaced
175:bcd2029c590d | 176:24779d423243 |
---|---|
29 Base.adjoint(t::TensorMappingTranspose) = t.tm | 29 Base.adjoint(t::TensorMappingTranspose) = t.tm |
30 | 30 |
31 apply(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {T,R,D} = apply_transpose(tm.tm, v, I...) | 31 apply(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {T,R,D} = apply_transpose(tm.tm, v, I...) |
32 apply_transpose(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(tm.tm, v, I...) | 32 apply_transpose(tm::TensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,D} = apply(tm.tm, v, I...) |
33 | 33 |
34 range_size(tmt::TensorMappingTranspose{T,R,D}, domain_size::NTuple{D,Integer}) = domain_size(tmt.tm, domain_size) | 34 range_size(tmt::TensorMappingTranspose{T,R,D}, domain_size::NTuple{D,Integer}) where {T,R,D} = domain_size(tmt.tm, domain_size) |
35 domain_size(tmt::TensorMappingTranspose{T,R,D}, range_size::NTuple{D,Integer}) = range_size(tmt.tm, range_size) | 35 domain_size(tmt::TensorMappingTranspose{T,R,D}, range_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, range_size) |
36 | 36 |
37 | 37 |
38 | 38 |
39 struct TensorApplication{T,R,D} <: AbstractArray{T,R} | 39 struct TensorApplication{T,R,D} <: AbstractArray{T,R} |
40 t::TensorMapping{R,D} | 40 t::TensorMapping{R,D} |
41 o::AbstractArray{T,D} | 41 o::AbstractArray{T,D} |
42 end | 42 end |
43 | 43 |
44 Base.size(ta::TensorApplication) = range_size(ta.t,size(ta.o)) | 44 Base.size(ta::TensorApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o)) |
45 Base.getindex(tm::TensorApplication, I::Vararg) = apply(tm.t, tm.o, I...) | 45 Base.getindex(ta::TensorApplication{T,R,D}, I::Vararg) where {T,R,D} = apply(ta.t, ta.o, I...) |
46 # TODO: What else is needed to implement the AbstractArray interface? | 46 # TODO: What else is needed to implement the AbstractArray interface? |
47 | 47 import Base.* |
48 →(t::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(t,o) | 48 →(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(tm,o) |
49 # Should we overload some other infix binary operator? | 49 # Should we overload some other infix binary operator? |
50 # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' | 50 # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' |
51 | |
52 import Base.* | |
53 *(args::Union{TensorMapping{T}, AbstractArray{T}}...) where T = foldr(*,args) | 51 *(args::Union{TensorMapping{T}, AbstractArray{T}}...) where T = foldr(*,args) |
54 *(t::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(t,o) | 52 *(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplication(tm,o) |
53 *(scalar, ta::TensorApplication{T,R,D}) where {T,R,D} = scalar*ta.o | |
54 *(ta::TensorApplication{T,R,D}, scalar::Number) where {T,R,D} = scalar*ta | |
55 # We need to be really careful about good error messages. | 55 # We need to be really careful about good error messages. |
56 # For example what happens if you try to multiply TensorApplication with a TensorMapping(wrong order)? | 56 # For example what happens if you try to multiply TensorApplication with a TensorMapping(wrong order)? |
57 | 57 |
58 # NOTE: TensorApplicationExpressions attempt to handle the situation when a TensorMapping | |
59 # acts on a TensorApplication +- AbstractArray, such that the expression still can be | |
60 # evaluated lazily per index. | |
61 # TODO: Better naming of both struct and members | |
62 # Since this is a lower layer which shouldnt be exposed, my opinion is that | |
63 # we can afford to be quite verbose. | |
64 struct TensorApplicationExpression{T,R,D} <: AbstractArray{T,R} | |
65 ta::TensorApplication{R,D} | |
66 o::AbstractArray{T,D} | |
67 end | |
68 Base.size(tae::TensorApplicationExpression) = size(tae.ta) #TODO: Not sure how to handle this | |
69 Base.getindex(tae::TensorApplicationExpression, I::Vararg) = apply(tae.ta, ta.o, I...) + o[I...] | |
70 import Base.+ | |
71 import Base.- | |
72 +(ta::TensorApplication{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = TensorApplicationExpression(ta,o) | |
73 +(o::AbstractArray{T,D},ta::TensorApplication{T,R,D}) where {T,R,D} = ta + o | |
74 -(ta::TensorApplication{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = ta + -o | |
75 -(o::AbstractArray{T,D},ta::TensorApplication{T,R,D}) where {T,R,D} = -ta + o | |
58 | 76 |
77 # NOTE: Another (quite neat) way to handle lazy evaluation of | |
78 # TensorApplication + AbstractArray is by using broadcasting. | |
79 # However, with the drafted implementation below a | |
80 # TensorApplication+AbstractArray now results in a generic function and we would | |
81 # then need to define TensorMapping*generic function which does not seem like a | |
82 # good idea. | |
83 # NOTE: Could one use MappedArrays.jl instead? | |
84 # | |
85 # # Lazy evaluations of expressions on TensorApplications | |
86 # # TODO: Need to decide on some good naming here. | |
87 # +(ta::TensorApplication,o::AbstractArray) = I -> ta[I] + o[I] | |
88 # +(o::AbstractArray,ta::TensorApplication) = ta+o | |
89 # *(scalar::Number,ta::TensorApplication) = I -> scalar*ta[I] | |
90 # *(ta::TensorApplication,scalar::Number) = scalar*ta | |
91 # -(ta::TensorApplication,o::AbstractArray) = ta + -o | |
92 # -(o::AbstractArray + ta::TensorApplication) = -ta + o | |
59 | 93 |
60 struct TensorMappingComposition{T,R,K,D} <: TensorMapping{T,R,D} | 94 struct TensorMappingComposition{T,R,K,D} <: TensorMapping{T,R,D} where K<:typeof(R) |
61 t1::TensorMapping{T,R,K} | 95 t1::TensorMapping{T,R,K} |
62 t2::TensorMapping{T,K,D} | 96 t2::TensorMapping{T,K,D} |
63 end | 97 end |
64 | 98 |
65 import Base.∘ | 99 import Base.∘ |
66 ∘(s::TensorMapping{T,R,K}, t::TensorMapping{T,K,D}) where {T,R,K,D} = TensorMappingComposition(s,t) | 100 ∘(s::TensorMapping{T,R,K}, t::TensorMapping{T,K,D}) where {T,R,K,D} = TensorMappingComposition(s,t) |
67 | 101 |
68 function range_size(tm::TensorMappingComposition{T,R,K,D}, domain_size::NTuple{D,Integer}) where {T,R,D} | 102 function range_size(tm::TensorMappingComposition{T,R,K,D}, domain_size::NTuple{D,Integer}) where {T,R,K,D} |
69 range_size(tm.t1, domain_size(tm.t2, domain_size)) | 103 range_size(tm.t1, domain_size(tm.t2, domain_size)) |
70 end | 104 end |
71 | 105 |
72 function domain_size(tm::TensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,D} | 106 function domain_size(tm::TensorMappingComposition{T,R,K,D}, range_size::NTuple{R,Integer}) where {T,R,K,D} |
73 domain_size(tm.t1, domain_size(tm.t2, range_size)) | 107 domain_size(tm.t1, domain_size(tm.t2, range_size)) |
74 end | 108 end |
75 | 109 |
76 function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D} | 110 function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg) where {T,R,K,D} |
77 apply(c.t1, TensorApplication(c.t2,v), I...) | 111 apply(c.t1, TensorApplication(c.t2,v), I...) |