Mercurial > repos > public > sbplib_julia
changeset 317:75c61d927153
Merge
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Wed, 09 Sep 2020 22:00:13 +0200 |
parents | d244f2e5f822 (current diff) 7a7d9daa9eb7 (diff) |
children | 0c8d4a734c4f |
files | Manifest.toml |
diffstat | 13 files changed, 626 insertions(+), 235 deletions(-) [+] |
line wrap: on
line diff
--- a/LazyTensors/src/lazy_tensor_operations.jl Wed Sep 09 21:06:59 2020 +0200 +++ b/LazyTensors/src/lazy_tensor_operations.jl Wed Sep 09 22:00:13 2020 +0200 @@ -14,8 +14,8 @@ export LazyTensorMappingApplication Base.:*(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) -Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Index,R}) where {T,R,D} = apply(ta.t, ta.o, I) -Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, Index{Unknown}.(I)) +Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Index,R}) where {T,R,D} = apply(ta.t, ta.o, I...) +Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, Index{Unknown}.(I)...) Base.size(ta::LazyTensorMappingApplication{T,R,D}) where {T,R,D} = range_size(ta.t,size(ta.o)) # TODO: What else is needed to implement the AbstractArray interface? @@ -44,8 +44,8 @@ Base.adjoint(tm::TensorMapping) = LazyTensorMappingTranspose(tm) Base.adjoint(tmt::LazyTensorMappingTranspose) = tmt.tm -apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::NTuple{D,Index}) where {T,R,D} = apply_transpose(tmt.tm, v, I) -apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::NTuple{R,Index}) where {T,R,D} = apply(tmt.tm, v, I) +apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) +apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmt.tm, v, I...) range_size(tmt::LazyTensorMappingTranspose{T,R,D}, d_size::NTuple{R,Integer}) where {T,R,D} = domain_size(tmt.tm, d_size) domain_size(tmt::LazyTensorMappingTranspose{T,R,D}, r_size::NTuple{D,Integer}) where {T,R,D} = range_size(tmt.tm, r_size) @@ -62,8 +62,8 @@ end end -apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::NTuple{R,Index}) where {T,R,D} = apply(tmBinOp.tm1, v, I) + apply(tmBinOp.tm2, v, I) -apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::NTuple{R,Index}) where {T,R,D} = apply(tmBinOp.tm1, v, I) - apply(tmBinOp.tm2, v, I) +apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) +apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, domain_size::NTuple{D,Integer}) where {Op,T,R,D} = range_size(tmBinOp.tm1, domain_size) domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}, range_size::NTuple{R,Integer}) where {Op,T,R,D} = domain_size(tmBinOp.tm2, range_size)
--- a/Manifest.toml Wed Sep 09 21:06:59 2020 +0200 +++ b/Manifest.toml Wed Sep 09 22:00:13 2020 +0200 @@ -51,7 +51,7 @@ version = "0.1.0" [[SbpOperators]] -deps = ["RegionIndices"] +deps = ["LazyTensors", "RegionIndices"] path = "SbpOperators" uuid = "204357d8-97fd-11e9-05e7-010897a14cd0" version = "0.1.0"
--- a/SbpOperators/Manifest.toml Wed Sep 09 21:06:59 2020 +0200 +++ b/SbpOperators/Manifest.toml Wed Sep 09 22:00:13 2020 +0200 @@ -1,5 +1,11 @@ # This file is machine-generated - editing it directly is not advised +[[LazyTensors]] +deps = ["RegionIndices"] +path = "../LazyTensors" +uuid = "62fbed2c-918d-11e9-279b-eb3a325b37d3" +version = "0.1.0" + [[RegionIndices]] path = "../RegionIndices" uuid = "5d527584-97f1-11e9-084c-4540c7ecf219"
--- a/SbpOperators/Project.toml Wed Sep 09 21:06:59 2020 +0200 +++ b/SbpOperators/Project.toml Wed Sep 09 22:00:13 2020 +0200 @@ -4,10 +4,12 @@ version = "0.1.0" [deps] +LazyTensors = "62fbed2c-918d-11e9-279b-eb3a325b37d3" RegionIndices = "5d527584-97f1-11e9-084c-4540c7ecf219" [extras] +Grids = "960fdf28-97ed-11e9-2a74-bd90bf2fab5a" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] -test = ["Test"] +test = ["Test", "Grids"]
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/SbpOperators/src/BoundaryValue.jl Wed Sep 09 22:00:13 2020 +0200 @@ -0,0 +1,127 @@ +""" + BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} + +Implements the boundary operator `e` as a TensorMapping +""" +struct BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} + eClosure::Stencil{T,M} + bId::CartesianBoundary +end +export BoundaryValue + +# TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? +# Can we give special treatment to TensorMappings that go to a higher dim? +function LazyTensors.range_size(e::BoundaryValue{T}, domain_size::NTuple{1,Integer}) where T + if dim(e.bId) == 1 + return (UnknownDim, domain_size[1]) + elseif dim(e.bId) == 2 + return (domain_size[1], UnknownDim) + end +end +LazyTensors.domain_size(e::BoundaryValue{T}, range_size::NTuple{2,Integer}) where T = (range_size[3-dim(e.bId)],) +# TODO: Make a nicer solution for 3-dim(e.bId) + +# TODO: Make this independent of dimension +function LazyTensors.apply(e::BoundaryValue{T}, v::AbstractArray{T}, I::NTuple{2,Index}) where T + i = I[dim(e.bId)] + j = I[3-dim(e.bId)] + N_i = size(e.grid)[dim(e.bId)] + return apply_boundary_value(e.op, v[j], i, N_i, region(e.bId)) +end + +function LazyTensors.apply_transpose(e::BoundaryValue{T}, v::AbstractArray{T}, I::NTuple{1,Index}) where T + u = selectdim(v,3-dim(e.bId),Int(I[1])) + return apply_boundary_value_transpose(e.op, u, region(e.bId)) +end + +function apply_boundary_value_transpose(op::ConstantStencilOperator, v::AbstractVector, ::Type{Lower}) + @boundscheck if length(v) < closuresize(op) + throw(BoundsError()) + end + apply_stencil(op.eClosure,v,1) +end + +function apply_boundary_value_transpose(op::ConstantStencilOperator, v::AbstractVector, ::Type{Upper}) + @boundscheck if length(v) < closuresize(op) + throw(BoundsError()) + end + apply_stencil_backwards(op.eClosure,v,length(v)) +end +export apply_boundary_value_transpose + +function apply_boundary_value(op::ConstantStencilOperator, v::Number, i::Index, N::Integer, ::Type{Lower}) + @boundscheck if !(0<length(Int(i)) <= N) + throw(BoundsError()) + end + op.eClosure[Int(i)-1]*v +end + +function apply_boundary_value(op::ConstantStencilOperator, v::Number, i::Index, N::Integer, ::Type{Upper}) + @boundscheck if !(0<length(Int(i)) <= N) + throw(BoundsError()) + end + op.eClosure[N-Int(i)]*v +end +export apply_boundary_value + + +""" + BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} + +Implements the boundary operator `e` as a TensorMapping +""" +struct BoundaryValue{D,T,M,R} <: TensorMapping{T,D,1} + e:BoundaryOperator{T,M,R} + bId::CartesianBoundary +end + +function LazyTensors.apply_transpose(bv::BoundaryValue{T,M,Lower}, v::AbstractVector{T}, i::Index) where T + u = selectdim(v,3-dim(bv.bId),Int(I[1])) + return apply_transpose(bv.e, u, I) +end + + +""" + BoundaryOperator{T,N,R} <: TensorMapping{T,1,1} + +Implements the boundary operator `e` as a TensorMapping +""" +export BoundaryOperator +struct BoundaryOperator{T,M,R<:Region} <: TensorMapping{T,1,1} + closure::Stencil{T,M} +end + +function LazyTensors.range_size(e::BoundaryOperator, domain_size::NTuple{1,Integer}) + return UnknownDim +end + +LazyTensors.domain_size(e::BoundaryOperator{T}, range_size::NTuple{1,Integer}) where T = range_size + +function LazyTensors.apply_transpose(e::BoundaryOperator{T,M,Lower}, v::AbstractVector{T}, i::Index{Lower}) where T + @boundscheck if length(v) < closuresize(e) #TODO: Use domain_size here? + throw(BoundsError()) + end + apply_stencil(e.closure,v,Int(i)) +end + +function LazyTensors.apply_transpose(e::BoundaryOperator{T,M,Upper}}, v::AbstractVector{T}, i::Index{Upper}) where T + @boundscheck if length(v) < closuresize(e) #TODO: Use domain_size here? + throw(BoundsError()) + end + apply_stencil_backwards(e.closure,v,Int(i)) +end + +function LazyTensors.apply_transpose(e::BoundaryOperator{T}, v::AbstractVector{T}, i::Index) where T + @boundscheck if length(v) < closuresize(e) #TODO: Use domain_size here? + throw(BoundsError()) + end + return eltype(v)(0) +end + +#TODO: Implement apply in a meaningful way. Should it return a vector or a single value (perferable?) Should fit into the +function LazyTensors.apply(e::BoundaryOperator, v::AbstractVector, i::Index) + @boundscheck if !(0<length(Int(i)) <= length(v)) + throw(BoundsError()) + end + return e.closure[Int(i)].*v +end
--- a/SbpOperators/src/InverseQuadrature.jl Wed Sep 09 21:06:59 2020 +0200 +++ b/SbpOperators/src/InverseQuadrature.jl Wed Sep 09 22:00:13 2020 +0200 @@ -1,43 +1,44 @@ """ - Quadrature{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim} + InverseQuadrature{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim} Implements the inverse quadrature operator `Qi` of Dim dimension as a TensorOperator The multi-dimensional tensor operator consists of a tuple of 1D InverseDiagonalNorm tensor operators. """ -struct Quadrature{Dim,T<:Real,N,M} <: TensorOperator{T,Dim} +export InverseQuadrature +struct InverseQuadrature{Dim,T<:Real,N,M} <: TensorOperator{T,Dim} Hi::NTuple{Dim,InverseDiagonalNorm{T,N,M}} end -export Quadrature -LazyTensors.domain_size(Qi::Quadrature{Dim}, range_size::NTuple{Dim,Integer}) where Dim = range_size +LazyTensors.domain_size(Qi::InverseQuadrature{Dim}, range_size::NTuple{Dim,Integer}) where Dim = range_size -function LazyTensors.apply(Qi::Quadrature{Dim,T}, v::AbstractArray{T,Dim}, I::NTuple{Dim,Index}) where {T,Dim} +function LazyTensors.apply(Qi::InverseQuadrature{Dim,T}, v::AbstractArray{T,Dim}, I::NTuple{Dim,Index}) where {T,Dim} error("not implemented") end -LazyTensors.apply_transpose(Qi::Quadrature{Dim,T}, v::AbstractArray{T,2}, I::NTuple{2,Index}) where {Dim,T} = LazyTensors.apply(Q,v,I) +LazyTensors.apply_transpose(Qi::InverseQuadrature{Dim,T}, v::AbstractArray{T,2}, I::NTuple{2,Index}) where {Dim,T} = LazyTensors.apply(Q,v,I) -@inline function LazyTensors.apply(Qi::Quadrature{1,T}, v::AbstractVector{T}, I::NTuple{1,Index}) where T +@inline function LazyTensors.apply(Qi::InverseQuadrature{1,T}, v::AbstractVector{T}, I::NTuple{1,Index}) where T @inbounds q = apply(Qi.Hi[1], v , I[1]) return q end -@inline function LazyTensors.apply(Qi::Quadrature{2,T}, v::AbstractArray{T,2}, I::NTuple{2,Index}) where T - # Quadrature in x direction +@inline function LazyTensors.apply(Qi::InverseQuadrature{2,T}, v::AbstractArray{T,2}, I::NTuple{2,Index}) where T + # InverseQuadrature in x direction @inbounds vx = view(v, :, Int(I[2])) @inbounds qx_inv = apply(Qi.Hi[1], vx , I[1]) - # Quadrature in y-direction + # InverseQuadrature in y-direction @inbounds vy = view(v, Int(I[1]), :) @inbounds qy_inv = apply(Qi.Hi[2], vy, I[2]) return qx_inv*qy_inv end """ - Quadrature{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim} + InverseQuadrature{Dim,T<:Real,N,M,K} <: TensorMapping{T,Dim,Dim} Implements the quadrature operator `Hi` of Dim dimension as a TensorMapping """ +export InverseDiagonalNorm, closuresize struct InverseDiagonalNorm{T<:Real,N,M} <: TensorOperator{T,1} h_inv::T # The reciprocl grid spacing could be included in the stencil already. Preferable? closure::NTuple{M,T} @@ -48,7 +49,7 @@ return @inbounds apply(Hi, v, I[1]) end -LazyTensors.apply_transpose(Hi::Quadrature{Dim,T}, v::AbstractArray{T,2}, I::NTuple{2,Index}) where T = LazyTensors.apply(Hi,v,I) +LazyTensors.apply_transpose(Hi::InverseQuadrature{Dim,T}, v::AbstractArray{T,2}, I::NTuple{2,Index}) where T = LazyTensors.apply(Hi,v,I) @inline LazyTensors.apply(Hi::InverseDiagonalNorm, v::AbstractVector{T}, i::Index{Lower}) where T return @inbounds Hi.h_inv*Hi.closure[Int(i)]*v[Int(i)]
--- a/SbpOperators/src/Quadrature.jl Wed Sep 09 21:06:59 2020 +0200 +++ b/SbpOperators/src/Quadrature.jl Wed Sep 09 22:00:13 2020 +0200 @@ -6,10 +6,10 @@ The multi-dimensional tensor operator consists of a tuple of 1D DiagonalNorm H tensor operators. """ +export Quadrature struct Quadrature{Dim,T<:Real,N,M} <: TensorOperator{T,Dim} H::NTuple{Dim,DiagonalNorm{T,N,M}} end -export Quadrature LazyTensors.domain_size(Q::Quadrature{Dim}, range_size::NTuple{Dim,Integer}) where Dim = range_size @@ -39,6 +39,7 @@ Implements the diagnoal norm operator `H` of Dim dimension as a TensorMapping """ +export DiagonalNorm, closuresize, LazyTensors.apply struct DiagonalNorm{T<:Real,N,M} <: TensorOperator{T,1} h::T # The grid spacing could be included in the stencil already. Preferable? closure::NTuple{M,T} @@ -69,7 +70,6 @@ i = Index(Int(index), r) return LazyTensors.apply(H, v, i) end -export LazyTensors.apply function closuresize(H::DiagonalNorm{T<:Real,N,M}) where {T,N,M} return M
--- a/SbpOperators/src/SbpOperators.jl Wed Sep 09 21:06:59 2020 +0200 +++ b/SbpOperators/src/SbpOperators.jl Wed Sep 09 22:00:13 2020 +0200 @@ -1,10 +1,12 @@ module SbpOperators using RegionIndices +using LazyTensors include("stencil.jl") include("constantstenciloperator.jl") include("d2.jl") include("readoperator.jl") - +include("laplace/secondderivative.jl") +include("laplace/laplace.jl") end # module
--- a/SbpOperators/src/constantstenciloperator.jl Wed Sep 09 21:06:59 2020 +0200 +++ b/SbpOperators/src/constantstenciloperator.jl Wed Sep 09 22:00:13 2020 +0200 @@ -46,36 +46,6 @@ export apply_inverse_quadrature -function apply_boundary_value_transpose(op::ConstantStencilOperator, v::AbstractVector, ::Type{Lower}) - @boundscheck if length(v) < closuresize(op) - throw(BoundsError()) - end - apply_stencil(op.eClosure,v,1) -end - -function apply_boundary_value_transpose(op::ConstantStencilOperator, v::AbstractVector, ::Type{Upper}) - @boundscheck if length(v) < closuresize(op) - throw(BoundsError()) - end - apply_stencil_backwards(op.eClosure,v,length(v)) -end -export apply_boundary_value_transpose - -function apply_boundary_value(op::ConstantStencilOperator, v::Number, i::Index, N::Integer, ::Type{Lower}) - @boundscheck if !(0<length(Int(i)) <= N) - throw(BoundsError()) - end - op.eClosure[Int(i)-1]*v -end - -function apply_boundary_value(op::ConstantStencilOperator, v::Number, i::Index, N::Integer, ::Type{Upper}) - @boundscheck if !(0<length(Int(i)) <= N) - throw(BoundsError()) - end - op.eClosure[N-Int(i)]*v -end -export apply_boundary_value - function apply_normal_derivative_transpose(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, ::Type{Lower}) @boundscheck if length(v) < closuresize(op) throw(BoundsError())
--- a/SbpOperators/src/laplace/laplace.jl Wed Sep 09 21:06:59 2020 +0200 +++ b/SbpOperators/src/laplace/laplace.jl Wed Sep 09 22:00:13 2020 +0200 @@ -1,3 +1,4 @@ +export Laplace """ Laplace{Dim,T<:Real,N,M,K} <: TensorOperator{T,Dim} @@ -5,167 +6,131 @@ The multi-dimensional tensor operator consists of a tuple of 1D SecondDerivative tensor operators. """ -struct Laplace{Dim,T<:Real,N,M,K} <: TensorOperator{T,Dim} - D2::NTuple(Dim,SecondDerivative{T,N,M,K}) +#export quadrature, inverse_quadrature, boundary_quadrature, boundary_value, normal_derivative +struct Laplace{Dim,T,N,M,K} <: TensorOperator{T,Dim} + D2::NTuple{Dim,SecondDerivative{T,N,M,K}} #TODO: Write a good constructor end -export Laplace -LazyTensors.domain_size(H::Laplace{Dim}, range_size::NTuple{Dim,Integer}) = range_size +LazyTensors.domain_size(H::Laplace{Dim}, range_size::NTuple{Dim,Integer}) where {Dim} = range_size -function LazyTensors.apply(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::NTuple{Dim,Index}) where {T,Dim} +function LazyTensors.apply(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} error("not implemented") end -function LazyTensors.apply_transpose(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::NTuple{Dim,Index}) where {T,Dim} = LazyTensors.apply(L, v, I) - # u = L*v -function LazyTensors.apply(L::Laplace{1,T}, v::AbstractVector{T}, I::NTuple{1,Index}) where T - @inbounds u = apply(L.D2[1],v,I) +function LazyTensors.apply(L::Laplace{1,T}, v::AbstractVector{T}, I::Index) where T + @inbounds u = LazyTensors.apply(L.D2[1],v,I) return u end - -@inline function LazyTensors.apply(L::Laplace{2,T}, v::AbstractArray{T,2}, I::NTuple{2,Index}) where T +# TODO: Fix dispatch on tuples! +@inline function LazyTensors.apply(L::Laplace{2,T}, v::AbstractArray{T,2}, I::Index, J::Index) where T # 2nd x-derivative - @inbounds vx = view(v, :, Int(I[2])) - @inbounds uᵢ = apply(L.D2[1], vx , I[1]) + @inbounds vx = view(v, :, Int(J)) + @inbounds uᵢ = LazyTensors.apply(L.D2[1], vx , I) # 2nd y-derivative - @inbounds vy = view(v, Int(I[1]), :) - @inbounds uᵢ += apply(L.D2[2], vy , I[2]) + @inbounds vy = view(v, Int(I), :) + @inbounds uᵢ += LazyTensors.apply(L.D2[2], vy , J) return uᵢ end -quadrature(L::Laplace) = Quadrature(L.op, L.grid) -inverse_quadrature(L::Laplace) = InverseQuadrature(L.op, L.grid) -boundary_value(L::Laplace, bId::CartesianBoundary) = BoundaryValue(L.op, L.grid, bId) -normal_derivative(L::Laplace, bId::CartesianBoundary) = NormalDerivative(L.op, L.grid, bId) -boundary_quadrature(L::Laplace, bId::CartesianBoundary) = BoundaryQuadrature(L.op, L.grid, bId) -export quadrature - -""" - BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} - -Implements the boundary operator `e` as a TensorMapping -""" -struct BoundaryValue{T,N,M,K} <: TensorMapping{T,2,1} - op::D2{T,N,M,K} - grid::EquidistantGrid{2} - bId::CartesianBoundary -end -export BoundaryValue - -# TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? -# Can we give special treatment to TensorMappings that go to a higher dim? -function LazyTensors.range_size(e::BoundaryValue{T}, domain_size::NTuple{1,Integer}) where T - if dim(e.bId) == 1 - return (UnknownDim, domain_size[1]) - elseif dim(e.bId) == 2 - return (domain_size[1], UnknownDim) - end -end -LazyTensors.domain_size(e::BoundaryValue{T}, range_size::NTuple{2,Integer}) where T = (range_size[3-dim(e.bId)],) -# TODO: Make a nicer solution for 3-dim(e.bId) - -# TODO: Make this independent of dimension -function LazyTensors.apply(e::BoundaryValue{T}, v::AbstractArray{T}, I::NTuple{2,Index}) where T - i = I[dim(e.bId)] - j = I[3-dim(e.bId)] - N_i = size(e.grid)[dim(e.bId)] - return apply_boundary_value(e.op, v[j], i, N_i, region(e.bId)) -end - -function LazyTensors.apply_transpose(e::BoundaryValue{T}, v::AbstractArray{T}, I::NTuple{1,Index}) where T - u = selectdim(v,3-dim(e.bId),Int(I[1])) - return apply_boundary_value_transpose(e.op, u, region(e.bId)) +@inline function LazyTensors.apply_transpose(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim} + return LazyTensors.apply(L, v, I) end -""" - NormalDerivative{T,N,M,K} <: TensorMapping{T,2,1} - -Implements the boundary operator `d` as a TensorMapping -""" -struct NormalDerivative{T,N,M,K} <: TensorMapping{T,2,1} - op::D2{T,N,M,K} - grid::EquidistantGrid{2} - bId::CartesianBoundary -end -export NormalDerivative - -# TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? -# Can we give special treatment to TensorMappings that go to a higher dim? -function LazyTensors.range_size(e::NormalDerivative, domain_size::NTuple{1,Integer}) - if dim(e.bId) == 1 - return (UnknownDim, domain_size[1]) - elseif dim(e.bId) == 2 - return (domain_size[1], UnknownDim) - end -end -LazyTensors.domain_size(e::NormalDerivative, range_size::NTuple{2,Integer}) = (range_size[3-dim(e.bId)],) - -# TODO: Not type stable D:< -# TODO: Make this independent of dimension -function LazyTensors.apply(d::NormalDerivative{T}, v::AbstractArray{T}, I::NTuple{2,Index}) where T - i = I[dim(d.bId)] - j = I[3-dim(d.bId)] - N_i = size(d.grid)[dim(d.bId)] - h_inv = inverse_spacing(d.grid)[dim(d.bId)] - return apply_normal_derivative(d.op, h_inv, v[j], i, N_i, region(d.bId)) -end - -function LazyTensors.apply_transpose(d::NormalDerivative{T}, v::AbstractArray{T}, I::NTuple{1,Index}) where T - u = selectdim(v,3-dim(d.bId),Int(I[1])) - return apply_normal_derivative_transpose(d.op, inverse_spacing(d.grid)[dim(d.bId)], u, region(d.bId)) -end - -""" - BoundaryQuadrature{T,N,M,K} <: TensorOperator{T,1} - -Implements the boundary operator `q` as a TensorOperator -""" -struct BoundaryQuadrature{T,N,M,K} <: TensorOperator{T,1} - op::D2{T,N,M,K} - grid::EquidistantGrid{2} - bId::CartesianBoundary -end -export BoundaryQuadrature - -# TODO: Make this independent of dimension -function LazyTensors.apply(q::BoundaryQuadrature{T}, v::AbstractArray{T,1}, I::NTuple{1,Index}) where T - h = spacing(q.grid)[3-dim(q.bId)] - N = size(v) - return apply_quadrature(q.op, h, v[I[1]], I[1], N[1]) -end - -LazyTensors.apply_transpose(q::BoundaryQuadrature{T}, v::AbstractArray{T,1}, I::NTuple{1,Index}) where T = LazyTensors.apply(q,v,I) - - - - -struct Neumann{Bid<:BoundaryIdentifier} <: BoundaryCondition end - -function sat(L::Laplace{2,T}, bc::Neumann{Bid}, v::AbstractArray{T,2}, g::AbstractVector{T}, I::CartesianIndex{2}) where {T,Bid} - e = boundary_value(L, Bid()) - d = normal_derivative(L, Bid()) - Hᵧ = boundary_quadrature(L, Bid()) - H⁻¹ = inverse_quadrature(L) - return (-H⁻¹*e*Hᵧ*(d'*v - g))[I] -end - -struct Dirichlet{Bid<:BoundaryIdentifier} <: BoundaryCondition - tau::Float64 -end - -function sat(L::Laplace{2,T}, bc::Dirichlet{Bid}, v::AbstractArray{T,2}, g::AbstractVector{T}, i::CartesianIndex{2}) where {T,Bid} - e = boundary_value(L, Bid()) - d = normal_derivative(L, Bid()) - Hᵧ = boundary_quadrature(L, Bid()) - H⁻¹ = inverse_quadrature(L) - return (-H⁻¹*(tau/h*e + d)*Hᵧ*(e'*v - g))[I] - # Need to handle scalar multiplication and addition of TensorMapping -end +# quadrature(L::Laplace) = Quadrature(L.op, L.grid) +# inverse_quadrature(L::Laplace) = InverseQuadrature(L.op, L.grid) +# boundary_value(L::Laplace, bId::CartesianBoundary) = BoundaryValue(L.op, L.grid, bId) +# normal_derivative(L::Laplace, bId::CartesianBoundary) = NormalDerivative(L.op, L.grid, bId) +# boundary_quadrature(L::Laplace, bId::CartesianBoundary) = BoundaryQuadrature(L.op, L.grid, bId) +# export NormalDerivative +# """ +# NormalDerivative{T,N,M,K} <: TensorMapping{T,2,1} +# +# Implements the boundary operator `d` as a TensorMapping +# """ +# struct NormalDerivative{T,N,M,K} <: TensorMapping{T,2,1} +# op::D2{T,N,M,K} +# grid::EquidistantGrid{2} +# bId::CartesianBoundary +# end +# +# # TODO: This is obviouly strange. Is domain_size just discarded? Is there a way to avoid storing grid in BoundaryValue? +# # Can we give special treatment to TensorMappings that go to a higher dim? +# function LazyTensors.range_size(e::NormalDerivative, domain_size::NTuple{1,Integer}) +# if dim(e.bId) == 1 +# return (UnknownDim, domain_size[1]) +# elseif dim(e.bId) == 2 +# return (domain_size[1], UnknownDim) +# end +# end +# LazyTensors.domain_size(e::NormalDerivative, range_size::NTuple{2,Integer}) = (range_size[3-dim(e.bId)],) +# +# # TODO: Not type stable D:< +# # TODO: Make this independent of dimension +# function LazyTensors.apply(d::NormalDerivative{T}, v::AbstractArray{T}, I::NTuple{2,Index}) where T +# i = I[dim(d.bId)] +# j = I[3-dim(d.bId)] +# N_i = size(d.grid)[dim(d.bId)] +# h_inv = inverse_spacing(d.grid)[dim(d.bId)] +# return apply_normal_derivative(d.op, h_inv, v[j], i, N_i, region(d.bId)) +# end +# +# function LazyTensors.apply_transpose(d::NormalDerivative{T}, v::AbstractArray{T}, I::NTuple{1,Index}) where T +# u = selectdim(v,3-dim(d.bId),Int(I[1])) +# return apply_normal_derivative_transpose(d.op, inverse_spacing(d.grid)[dim(d.bId)], u, region(d.bId)) +# end +# +# """ +# BoundaryQuadrature{T,N,M,K} <: TensorOperator{T,1} +# +# Implements the boundary operator `q` as a TensorOperator +# """ +# export BoundaryQuadrature +# struct BoundaryQuadrature{T,N,M,K} <: TensorOperator{T,1} +# op::D2{T,N,M,K} +# grid::EquidistantGrid{2} +# bId::CartesianBoundary +# end +# +# +# # TODO: Make this independent of dimension +# function LazyTensors.apply(q::BoundaryQuadrature{T}, v::AbstractArray{T,1}, I::NTuple{1,Index}) where T +# h = spacing(q.grid)[3-dim(q.bId)] +# N = size(v) +# return apply_quadrature(q.op, h, v[I[1]], I[1], N[1]) +# end +# +# LazyTensors.apply_transpose(q::BoundaryQuadrature{T}, v::AbstractArray{T,1}, I::NTuple{1,Index}) where T = LazyTensors.apply(q,v,I) +# +# +# +# +# struct Neumann{Bid<:BoundaryIdentifier} <: BoundaryCondition end +# +# function sat(L::Laplace{2,T}, bc::Neumann{Bid}, v::AbstractArray{T,2}, g::AbstractVector{T}, I::CartesianIndex{2}) where {T,Bid} +# e = boundary_value(L, Bid()) +# d = normal_derivative(L, Bid()) +# Hᵧ = boundary_quadrature(L, Bid()) +# H⁻¹ = inverse_quadrature(L) +# return (-H⁻¹*e*Hᵧ*(d'*v - g))[I] +# end +# +# struct Dirichlet{Bid<:BoundaryIdentifier} <: BoundaryCondition +# tau::Float64 +# end +# +# function sat(L::Laplace{2,T}, bc::Dirichlet{Bid}, v::AbstractArray{T,2}, g::AbstractVector{T}, i::CartesianIndex{2}) where {T,Bid} +# e = boundary_value(L, Bid()) +# d = normal_derivative(L, Bid()) +# Hᵧ = boundary_quadrature(L, Bid()) +# H⁻¹ = inverse_quadrature(L) +# return (-H⁻¹*(tau/h*e + d)*Hᵧ*(e'*v - g))[I] +# # Need to handle scalar multiplication and addition of TensorMapping +# end # function apply(s::MyWaveEq{D}, v::AbstractArray{T,D}, i::CartesianIndex{D}) where D # return apply(s.L, v, i) +
--- a/SbpOperators/src/laplace/secondderivative.jl Wed Sep 09 21:06:59 2020 +0200 +++ b/SbpOperators/src/laplace/secondderivative.jl Wed Sep 09 22:00:13 2020 +0200 @@ -3,48 +3,49 @@ Implements the Laplace tensor operator `L` with constant grid spacing and coefficients in 1D dimension """ -struct SecondDerivative{T<:Real,N,M,K} <: TensorOperator{T,1} + +struct SecondDerivative{T,N,M,K} <: TensorOperator{T,1} h_inv::T # The grid spacing could be included in the stencil already. Preferable? innerStencil::Stencil{T,N} closureStencils::NTuple{M,Stencil{T,K}} parity::Parity #TODO: Write a nice constructor end - -@enum Parity begin - odd = -1 - even = 1 -end +export SecondDerivative LazyTensors.domain_size(D2::SecondDerivative, range_size::NTuple{1,Integer}) = range_size -@inline function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::NTuple{1,Index}) where T - return @inbounds apply(D2, v, I[1]) -end - -function LazyTensors.apply_transpose(D2::SecondDerivative{T}, v::AbstractVector{T}, I::NTuple{1,Index}) where T = LazyTensors.apply(D2, v, I) +#TODO: The 1D tensor mappings should not have to dispatch on 1D tuples if we write LazyTensor.apply for vararg right?!?! +# Currently have to index the Tuple{Index} in each method in order to call the stencil methods which is ugly. +# I thought I::Vararg{Index,R} fell back to just Index for R = 1 # Apply for different regions Lower/Interior/Upper or Unknown region -@inline function LazyTensors.apply(D2::SecondDerivative, v::AbstractVector, i::Index{Lower}) - return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.closureStencils[Int(i)], v, Int(i)) +@inline function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Lower}) where T + return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.closureStencils[Int(I)], v, Int(I)) end -@inline function LazyTensors.apply(D2::SecondDerivative, v::AbstractVector, i::Index{Interior}) - return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.innerStencil, v, Int(i)) +@inline function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Interior}) where T + return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.innerStencil, v, Int(I)) end -@inline function LazyTensors.apply(D2::SecondDerivative, v::AbstractVector, i::Index{Upper}) - N = length(v) # TODO: Use domain_size here instead? - return @inbounds D2.h_inv*D2.h_inv*Int(D2.parity)*apply_stencil_backwards(D2.closureStencils[N-Int(i)+1], v, Int(i)) +@inline function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Upper}) where T + N = length(v) # TODO: Use domain_size here instead? N = domain_size(D2,size(v)) + return @inbounds D2.h_inv*D2.h_inv*Int(D2.parity)*apply_stencil_backwards(D2.closureStencils[N-Int(I)+1], v, Int(I)) end -@inline function LazyTensors.apply(D2::SecondDerivative, v::AbstractVector, index::Index{Unknown}) +@inline function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, index::Index{Unknown}) where T N = length(v) # TODO: Use domain_size here instead? - r = getregion(Int(index), closuresize(L), N) - i = Index(Int(index), r) - return apply(D2, v, i) + r = getregion(Int(index), closuresize(D2), N) + I = Index(Int(index), r) + return LazyTensors.apply(D2, v, I) end -function closuresize(D2::SecondDerivative{T<:Real,N,M,K}) where {T,N,M,K} + +@inline function LazyTensors.apply_transpose(D2::SecondDerivative, v::AbstractVector, I::Index) + return LazyTensors.apply(D2, v, I) +end + + +function closuresize(D2::SecondDerivative{T,N,M,K}) where {T<:Real,N,M,K} return M end
--- a/SbpOperators/test/runtests.jl Wed Sep 09 21:06:59 2020 +0200 +++ b/SbpOperators/test/runtests.jl Wed Sep 09 22:00:13 2020 +0200 @@ -1,23 +1,338 @@ +using Test using SbpOperators -using Test +using Grids +using RegionIndices +using LazyTensors + +# @testset "apply_quadrature" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# h = 0.5 +# +# @test apply_quadrature(op, h, 1.0, 10, 100) == h +# +# N = 10 +# qc = op.quadratureClosure +# q = h.*(qc..., ones(N-2*closuresize(op))..., reverse(qc)...) +# @assert length(q) == N +# +# for i ∈ 1:N +# @test apply_quadrature(op, h, 1.0, i, N) == q[i] +# end +# +# v = [2.,3.,2.,4.,5.,4.,3.,4.,5.,4.5] +# for i ∈ 1:N +# @test apply_quadrature(op, h, v[i], i, N) == q[i]*v[i] +# end +# end + +@testset "SecondDerivative" begin + op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") + L = 3.5 + g = EquidistantGrid((101,), (0.0,), (L,)) + h_inv = inverse_spacing(g) + h = 1/h_inv[1]; + Dₓₓ = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity) + + f0(x::Float64) = 1. + f1(x::Float64) = x + f2(x::Float64) = 1/2*x^2 + f3(x::Float64) = 1/6*x^3 + f4(x::Float64) = 1/24*x^4 + f5(x::Float64) = sin(x) + f5ₓₓ(x::Float64) = -f5(x) + + v0 = evalOn(g,f0) + v1 = evalOn(g,f1) + v2 = evalOn(g,f2) + v3 = evalOn(g,f3) + v4 = evalOn(g,f4) + v5 = evalOn(g,f5) -@testset "apply_quadrature" begin + @test Dₓₓ isa TensorOperator{T,1} where T + @test Dₓₓ' isa TensorMapping{T,1,1} where T + + # TODO: Should perhaps set tolerance level for isapporx instead? + # Are these tolerance levels resonable or should tests be constructed + # differently? + equalitytol = 0.5*1e-10 + accuracytol = 0.5*1e-3 + # 4th order interior stencil, 2nd order boundary stencil, + # implies that L*v should be exact for v - monomial up to order 3. + # Exact differentiation is measured point-wise. For other grid functions + # the error is measured in the l2-norm. + @test all(abs.(collect(Dₓₓ*v0)) .<= equalitytol) + @test all(abs.(collect(Dₓₓ*v1)) .<= equalitytol) + @test all(abs.((collect(Dₓₓ*v2) - v0)) .<= equalitytol) + @test all(abs.((collect(Dₓₓ*v3) - v1)) .<= equalitytol) + e4 = collect(Dₓₓ*v4) - v2 + e5 = collect(Dₓₓ*v5) + v5 + @test sqrt(h*sum(collect(e4.^2))) <= accuracytol + @test sqrt(h*sum(collect(e5.^2))) <= accuracytol +end + +@testset "Laplace2D" begin op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") - h = 0.5 + Lx = 1.5 + Ly = 3.2 + g = EquidistantGrid((102,131), (0.0, 0.0), (Lx,Ly)) + + h_inv = inverse_spacing(g) + h = spacing(g) + D_xx = SecondDerivative(h_inv[1],op.innerStencil,op.closureStencils,op.parity) + D_yy = SecondDerivative(h_inv[2],op.innerStencil,op.closureStencils,op.parity) + L = Laplace((D_xx,D_yy)) - @test apply_quadrature(op, h, 1.0, 10, 100) == h + f0(x::Float64,y::Float64) = 2. + f1(x::Float64,y::Float64) = x+y + f2(x::Float64,y::Float64) = 1/2*x^2 + 1/2*y^2 + f3(x::Float64,y::Float64) = 1/6*x^3 + 1/6*y^3 + f4(x::Float64,y::Float64) = 1/24*x^4 + 1/24*y^4 + f5(x::Float64,y::Float64) = sin(x) + cos(y) + f5ₓₓ(x::Float64,y::Float64) = -f5(x,y) + + v0 = evalOn(g,f0) + v1 = evalOn(g,f1) + v2 = evalOn(g,f2) + v3 = evalOn(g,f3) + v4 = evalOn(g,f4) + v5 = evalOn(g,f5) + v5ₓₓ = evalOn(g,f5ₓₓ) + + @test L isa TensorOperator{T,2} where T + @test L' isa TensorMapping{T,2,2} where T - N = 10 - qc = op.quadratureClosure - q = h.*(qc..., ones(N-2*closuresize(op))..., reverse(qc)...) - @assert length(q) == N - - for i ∈ 1:N - @test apply_quadrature(op, h, 1.0, i, N) == q[i] - end - - v = [2.,3.,2.,4.,5.,4.,3.,4.,5.,4.5] - for i ∈ 1:N - @test apply_quadrature(op, h, v[i], i, N) == q[i]*v[i] - end + # TODO: Should perhaps set tolerance level for isapporx instead? + # Are these tolerance levels resonable or should tests be constructed + # differently? + equalitytol = 0.5*1e-10 + accuracytol = 0.5*1e-3 + # 4th order interior stencil, 2nd order boundary stencil, + # implies that L*v should be exact for v - monomial up to order 3. + # Exact differentiation is measured point-wise. For other grid functions + # the error is measured in the H-norm. + @test all(abs.(collect(L*v0)) .<= equalitytol) + @test all(abs.(collect(L*v1)) .<= equalitytol) + @test all(collect(L*v2) .≈ v0) # Seems to be more accurate + @test all(abs.((collect(L*v3) - v1)) .<= equalitytol) + e4 = collect(L*v4) - v2 + e5 = collect(L*v5) - v5ₓₓ + @test sqrt(prod(h)*sum(collect(e4.^2))) <= accuracytol + @test sqrt(prod(h)*sum(collect(e5.^2))) <= accuracytol end +# +# @testset "Quadrature" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# Lx = 2.3 +# Ly = 5.2 +# g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) +# H = Quadrature(op,g) +# v = ones(Float64, size(g)) +# +# @test H isa TensorOperator{T,2} where T +# @test H' isa TensorMapping{T,2,2} where T +# @test sum(collect(H*v)) ≈ (Lx*Ly) +# @test collect(H*v) == collect(H'*v) +# end +# +# @testset "InverseQuadrature" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# Lx = 7.3 +# Ly = 8.2 +# g = EquidistantGrid((77,66), (0.0, 0.0), (Lx,Ly)) +# H = Quadrature(op,g) +# Hinv = InverseQuadrature(op,g) +# v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) +# +# @test Hinv isa TensorOperator{T,2} where T +# @test Hinv' isa TensorMapping{T,2,2} where T +# @test collect(Hinv*H*v) ≈ v +# @test collect(Hinv*v) == collect(Hinv'*v) +# end +# +# @testset "BoundaryValue" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# g = EquidistantGrid((4,5), (0.0, 0.0), (1.0,1.0)) +# +# e_w = BoundaryValue(op, g, CartesianBoundary{1,Lower}()) +# e_e = BoundaryValue(op, g, CartesianBoundary{1,Upper}()) +# e_s = BoundaryValue(op, g, CartesianBoundary{2,Lower}()) +# e_n = BoundaryValue(op, g, CartesianBoundary{2,Upper}()) +# +# v = zeros(Float64, 4, 5) +# v[:,5] = [1, 2, 3,4] +# v[:,4] = [1, 2, 3,4] +# v[:,3] = [4, 5, 6, 7] +# v[:,2] = [7, 8, 9, 10] +# v[:,1] = [10, 11, 12, 13] +# +# @test e_w isa TensorMapping{T,2,1} where T +# @test e_w' isa TensorMapping{T,1,2} where T +# +# @test domain_size(e_w, (3,2)) == (2,) +# @test domain_size(e_e, (3,2)) == (2,) +# @test domain_size(e_s, (3,2)) == (3,) +# @test domain_size(e_n, (3,2)) == (3,) +# +# @test size(e_w'*v) == (5,) +# @test size(e_e'*v) == (5,) +# @test size(e_s'*v) == (4,) +# @test size(e_n'*v) == (4,) +# +# @test collect(e_w'*v) == [10,7,4,1.0,1] +# @test collect(e_e'*v) == [13,10,7,4,4.0] +# @test collect(e_s'*v) == [10,11,12,13.0] +# @test collect(e_n'*v) == [1,2,3,4.0] +# +# g_x = [1,2,3,4.0] +# g_y = [5,4,3,2,1.0] +# +# G_w = zeros(Float64, (4,5)) +# G_w[1,:] = g_y +# +# G_e = zeros(Float64, (4,5)) +# G_e[4,:] = g_y +# +# G_s = zeros(Float64, (4,5)) +# G_s[:,1] = g_x +# +# G_n = zeros(Float64, (4,5)) +# G_n[:,5] = g_x +# +# @test size(e_w*g_y) == (UnknownDim,5) +# @test size(e_e*g_y) == (UnknownDim,5) +# @test size(e_s*g_x) == (4,UnknownDim) +# @test size(e_n*g_x) == (4,UnknownDim) +# +# # These tests should be moved to where they are possible (i.e we know what the grid should be) +# @test_broken collect(e_w*g_y) == G_w +# @test_broken collect(e_e*g_y) == G_e +# @test_broken collect(e_s*g_x) == G_s +# @test_broken collect(e_n*g_x) == G_n +# end +# +# @testset "NormalDerivative" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# g = EquidistantGrid((5,6), (0.0, 0.0), (4.0,5.0)) +# +# d_w = NormalDerivative(op, g, CartesianBoundary{1,Lower}()) +# d_e = NormalDerivative(op, g, CartesianBoundary{1,Upper}()) +# d_s = NormalDerivative(op, g, CartesianBoundary{2,Lower}()) +# d_n = NormalDerivative(op, g, CartesianBoundary{2,Upper}()) +# +# +# v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) +# v∂x = evalOn(g, (x,y)-> 2*x + y) +# v∂y = evalOn(g, (x,y)-> 2*(y-1) + x) +# +# @test d_w isa TensorMapping{T,2,1} where T +# @test d_w' isa TensorMapping{T,1,2} where T +# +# @test domain_size(d_w, (3,2)) == (2,) +# @test domain_size(d_e, (3,2)) == (2,) +# @test domain_size(d_s, (3,2)) == (3,) +# @test domain_size(d_n, (3,2)) == (3,) +# +# @test size(d_w'*v) == (6,) +# @test size(d_e'*v) == (6,) +# @test size(d_s'*v) == (5,) +# @test size(d_n'*v) == (5,) +# +# @test collect(d_w'*v) ≈ v∂x[1,:] +# @test collect(d_e'*v) ≈ v∂x[5,:] +# @test collect(d_s'*v) ≈ v∂y[:,1] +# @test collect(d_n'*v) ≈ v∂y[:,6] +# +# +# d_x_l = zeros(Float64, 5) +# d_x_u = zeros(Float64, 5) +# for i ∈ eachindex(d_x_l) +# d_x_l[i] = op.dClosure[i-1] +# d_x_u[i] = -op.dClosure[length(d_x_u)-i] +# end +# +# d_y_l = zeros(Float64, 6) +# d_y_u = zeros(Float64, 6) +# for i ∈ eachindex(d_y_l) +# d_y_l[i] = op.dClosure[i-1] +# d_y_u[i] = -op.dClosure[length(d_y_u)-i] +# end +# +# function prod_matrix(x,y) +# G = zeros(Float64, length(x), length(y)) +# for I ∈ CartesianIndices(G) +# G[I] = x[I[1]]*y[I[2]] +# end +# +# return G +# end +# +# g_x = [1,2,3,4.0,5] +# g_y = [5,4,3,2,1.0,11] +# +# G_w = prod_matrix(d_x_l, g_y) +# G_e = prod_matrix(d_x_u, g_y) +# G_s = prod_matrix(g_x, d_y_l) +# G_n = prod_matrix(g_x, d_y_u) +# +# +# @test size(d_w*g_y) == (UnknownDim,6) +# @test size(d_e*g_y) == (UnknownDim,6) +# @test size(d_s*g_x) == (5,UnknownDim) +# @test size(d_n*g_x) == (5,UnknownDim) +# +# # These tests should be moved to where they are possible (i.e we know what the grid should be) +# @test_broken collect(d_w*g_y) ≈ G_w +# @test_broken collect(d_e*g_y) ≈ G_e +# @test_broken collect(d_s*g_x) ≈ G_s +# @test_broken collect(d_n*g_x) ≈ G_n +# end +# +# @testset "BoundaryQuadrature" begin +# op = readOperator(sbp_operators_path()*"d2_4th.txt",sbp_operators_path()*"h_4th.txt") +# g = EquidistantGrid((10,11), (0.0, 0.0), (1.0,1.0)) +# +# H_w = BoundaryQuadrature(op, g, CartesianBoundary{1,Lower}()) +# H_e = BoundaryQuadrature(op, g, CartesianBoundary{1,Upper}()) +# H_s = BoundaryQuadrature(op, g, CartesianBoundary{2,Lower}()) +# H_n = BoundaryQuadrature(op, g, CartesianBoundary{2,Upper}()) +# +# v = evalOn(g, (x,y)-> x^2 + (y-1)^2 + x*y) +# +# function get_quadrature(N) +# qc = op.quadratureClosure +# q = (qc..., ones(N-2*closuresize(op))..., reverse(qc)...) +# @assert length(q) == N +# return q +# end +# +# v_w = v[1,:] +# v_e = v[10,:] +# v_s = v[:,1] +# v_n = v[:,11] +# +# q_x = spacing(g)[1].*get_quadrature(10) +# q_y = spacing(g)[2].*get_quadrature(11) +# +# @test H_w isa TensorOperator{T,1} where T +# +# @test domain_size(H_w, (3,)) == (3,) +# @test domain_size(H_n, (3,)) == (3,) +# +# @test range_size(H_w, (3,)) == (3,) +# @test range_size(H_n, (3,)) == (3,) +# +# @test size(H_w*v_w) == (11,) +# @test size(H_e*v_e) == (11,) +# @test size(H_s*v_s) == (10,) +# @test size(H_n*v_n) == (10,) +# +# @test collect(H_w*v_w) ≈ q_y.*v_w +# @test collect(H_e*v_e) ≈ q_y.*v_e +# @test collect(H_s*v_s) ≈ q_x.*v_s +# @test collect(H_n*v_n) ≈ q_x.*v_n +# +# @test collect(H_w'*v_w) == collect(H_w'*v_w) +# @test collect(H_e'*v_e) == collect(H_e'*v_e) +# @test collect(H_s'*v_s) == collect(H_s'*v_s) +# @test collect(H_n'*v_n) == collect(H_n'*v_n) +# end
--- a/TODO.md Wed Sep 09 21:06:59 2020 +0200 +++ b/TODO.md Wed Sep 09 22:00:13 2020 +0200 @@ -10,8 +10,10 @@ - [ ] Create a struct that bundles the necessary Tensor operators for solving the wave equation. - [ ] Use traits like IndexStyle, IndexLinear, IndexCartesian to differentiate TensorMappings that are flexible in size and those that are fixed in size + - [ ] Use traits for symmetric tensor mappings such that apply_transpoe = apply for all such mappings - [x] Move Laplace tensor operator to different package - [x] Remove grid as a property of the Laplace tensor operator + - [ ] Update how dependencies are handled for tests. This was updated in Julia v1.2 and would allow us to use test specific dev packages. ## Reasearch and thinking - [ ] Redo all Tensor applys to take Vararg instead of tuple of Index? @@ -22,7 +24,7 @@ - [x] Should there be some kind of collection struct for SBP operators (as TensorOperators), providing easy access to all parts (D2, e, d , -> YES! H.. H_gamma etc.) - [x] Is "missing" a good value for unknown dimension sizes (of `e*g` for example) - - [] Add traits for symmetric tensor mappings such that apply_transpoe = apply for all such mappings + - [ ] Create a macro @lazy which replaces a binary op (+,-) by its lazy equivalent? Would be a neat way to indicate which evaluations are lazy without cluttering/confusing with special characters. # Wrap up task