changeset 557:3c18a15934a7 feature/quadrature_as_outer_product

Merge in default
author Vidar Stiernström <vidar.stiernstrom@it.uu.se>
date Sun, 29 Nov 2020 21:52:44 +0100
parents dab9df9c4d66 (current diff) 37a81dad36b9 (diff)
children 9b5710ae6587
files src/SbpOperators/quadrature/diagonal_quadrature.jl src/SbpOperators/quadrature/inverse_diagonal_quadrature.jl
diffstat 9 files changed, 80 insertions(+), 99 deletions(-) [+]
line wrap: on
line diff
--- a/TODO.md	Sun Nov 29 21:16:55 2020 +0100
+++ b/TODO.md	Sun Nov 29 21:52:44 2020 +0100
@@ -10,10 +10,10 @@
  - [ ] Create a struct that bundles the necessary Tensor operators for solving the wave equation.
  - [ ] Add a quick and simple way of running all tests for all subpackages.
  - [ ] Replace getindex hack for flatteing tuples with flatten_tuple.
- - [ ] Fix indexing signatures. We should make sure we are not too specific. For the "inbetween" layers we don't know what type of index is coming so we should use `I...` instead of `I::Vararg{Int,R}` or probably better `I::Vararg{Any,R}`
  - [ ] Use `@inferred` in a lot of tests.
  - [ ] Make sure we are setting tolerances in tests in a consistent way
  - [ ] Add check for correct domain sizes to lazy tensor operations using SizeMismatch
+ - [ ] Write down some coding guideline or checklist for code convetions. For example i,j,... för indecies and I for multi-index
 
 ## Repo
  - [ ] Add Vidar to the authors list
--- a/src/LazyTensors/lazy_tensor_operations.jl	Sun Nov 29 21:16:55 2020 +0100
+++ b/src/LazyTensors/lazy_tensor_operations.jl	Sun Nov 29 21:52:44 2020 +0100
@@ -14,10 +14,7 @@
 # TODO: Do boundschecking on creation!
 export LazyTensorMappingApplication
 
-# TODO: Go through and remove unneccerary type parameters on functions
-
-Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Index,R}) where {T,R,D} = apply(ta.t, ta.o, I...)
-Base.getindex(ta::LazyTensorMappingApplication{T,R,D}, I::Vararg{Int,R}) where {T,R,D} = apply(ta.t, ta.o, Index{Unknown}.(I)...)
+Base.getindex(ta::LazyTensorMappingApplication{T,R}, I::Vararg{Any,R}) where {T,R} = apply(ta.t, ta.o, I...)
 Base.size(ta::LazyTensorMappingApplication) = range_size(ta.t)
 # TODO: What else is needed to implement the AbstractArray interface?
 
@@ -50,8 +47,8 @@
 Base.adjoint(tm::TensorMapping) = LazyTensorMappingTranspose(tm)
 Base.adjoint(tmt::LazyTensorMappingTranspose) = tmt.tm
 
-apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...)
-apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmt.tm, v, I...)
+apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...)
+apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmt.tm, v, I...)
 
 range_size(tmt::LazyTensorMappingTranspose) = domain_size(tmt.tm)
 domain_size(tmt::LazyTensorMappingTranspose) = range_size(tmt.tm)
@@ -67,11 +64,11 @@
 end
 # TODO: Boundschecking in constructor.
 
-apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...)
-apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...)
+apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...)
+apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...)
 
-range_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = range_size(tmBinOp.tm1)
-domain_size(tmBinOp::LazyTensorMappingBinaryOperation{Op,T,R,D}) where {Op,T,R,D} = domain_size(tmBinOp.tm1)
+range_size(tmBinOp::LazyTensorMappingBinaryOperation) = range_size(tmBinOp.tm1)
+domain_size(tmBinOp::LazyTensorMappingBinaryOperation) = domain_size(tmBinOp.tm1)
 
 Base.:+(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(tm1,tm2)
 Base.:-(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:-,T,R,D}(tm1,tm2)
@@ -95,11 +92,11 @@
 range_size(tm::TensorMappingComposition) = range_size(tm.t1)
 domain_size(tm::TensorMappingComposition) = domain_size(tm.t2)
 
-function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg{S,R} where S) where {T,R,K,D}
+function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,K,D}
     apply(c.t1, c.t2*v, I...)
 end
 
-function apply_transpose(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,R}, I::Vararg{S,D} where S) where {T,R,K,D}
+function apply_transpose(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,K,D}
     apply_transpose(c.t2, c.t1'*v, I...)
 end
 
@@ -132,7 +129,7 @@
 range_size(llm::LazyLinearMap) = size(llm.A)[[llm.range_indicies...]]
 domain_size(llm::LazyLinearMap) = size(llm.A)[[llm.domain_indicies...]]
 
-function apply(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Index,R}) where {T,R,D}
+function apply(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D}
     view_index = ntuple(i->:,ndims(llm.A))
     for i ∈ 1:R
         view_index = Base.setindex(view_index, Int(I[i]), llm.range_indicies[i])
@@ -141,7 +138,7 @@
     return sum(A_view.*v)
 end
 
-function apply_transpose(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Index,D}) where {T,R,D}
+function apply_transpose(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,D}
     apply(LazyLinearMap(llm.A, llm.domain_indicies, llm.range_indicies), v, I...)
 end
 
--- a/src/RegionIndices/RegionIndices.jl	Sun Nov 29 21:16:55 2020 +0100
+++ b/src/RegionIndices/RegionIndices.jl	Sun Nov 29 21:52:44 2020 +0100
@@ -4,9 +4,8 @@
 struct Interior <: Region end
 struct Lower    <: Region end
 struct Upper    <: Region end
-struct Unknown  <: Region end
 
-export Region, Interior, Lower, Upper, Unknown
+export Region, Interior, Lower, Upper
 
 struct Index{R<:Region, T<:Integer}
     i::T
--- a/src/SbpOperators/constantstenciloperator.jl	Sun Nov 29 21:16:55 2020 +0100
+++ b/src/SbpOperators/constantstenciloperator.jl	Sun Nov 29 21:52:44 2020 +0100
@@ -14,7 +14,7 @@
     return @inbounds h_inv*h_inv*Int(op.parity)*apply_stencil_backwards(op.closureStencils[N-Int(i)+1], v, Int(i))
 end
 
-@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, index::Index{Unknown})
+@inline function apply_2nd_derivative(op::ConstantStencilOperator, h_inv::Real, v::AbstractVector, i)
     N = length(v)
     r = getregion(Int(index), closuresize(op), N)
     i = Index(Int(index), r)
@@ -26,9 +26,9 @@
 apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, i::Index{Upper}, N::Integer) where T = v*h*op.quadratureClosure[N-Int(i)+1]
 apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, i::Index{Interior}, N::Integer) where T = v*h
 
-function apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, index::Index{Unknown}, N::Integer) where T
-    r = getregion(Int(index), closuresize(op), N)
-    i = Index(Int(index), r)
+function apply_quadrature(op::ConstantStencilOperator, h::Real, v::T, i, N::Integer) where T
+    r = getregion(i, closuresize(op), N)
+    i = Index(i, r)
     return apply_quadrature(op, h, v, i, N)
 end
 export apply_quadrature
@@ -38,9 +38,9 @@
 apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, i::Index{Upper}, N::Integer) where T = h_inv*v/op.quadratureClosure[N-Int(i)+1]
 apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, i::Index{Interior}, N::Integer) where T = v*h_inv
 
-function apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, index::Index{Unknown}, N::Integer) where T
-    r = getregion(Int(index), closuresize(op), N)
-    i = Index(Int(index), r)
+function apply_inverse_quadrature(op::ConstantStencilOperator, h_inv::Real, v::T, i, N::Integer) where T
+    r = getregion(i, closuresize(op), N)
+    i = Index(i, r)
     return apply_inverse_quadrature(op, h_inv, v, i, N)
 end
 
@@ -62,14 +62,14 @@
 
 export apply_normal_derivative_transpose
 
-function apply_normal_derivative(op::ConstantStencilOperator, h_inv::Real, v::Number, i::Index, N::Integer, ::Type{Lower})
+function apply_normal_derivative(op::ConstantStencilOperator, h_inv::Real, v::Number, i, N::Integer, ::Type{Lower})
     @boundscheck if !(0<length(Int(i)) <= N)
         throw(BoundsError())
     end
     h_inv*op.dClosure[Int(i)-1]*v
 end
 
-function apply_normal_derivative(op::ConstantStencilOperator, h_inv::Real, v::Number, i::Index, N::Integer, ::Type{Upper})
+function apply_normal_derivative(op::ConstantStencilOperator, h_inv::Real, v::Number, i, N::Integer, ::Type{Upper})
     @boundscheck if !(0<length(Int(i)) <= N)
         throw(BoundsError())
     end
--- a/src/SbpOperators/laplace/laplace.jl	Sun Nov 29 21:16:55 2020 +0100
+++ b/src/SbpOperators/laplace/laplace.jl	Sun Nov 29 21:52:44 2020 +0100
@@ -23,24 +23,24 @@
 LazyTensors.range_size(L::Laplace) = getindex.(range_size.(L.D2),1)
 LazyTensors.domain_size(L::Laplace) = getindex.(domain_size.(L.D2),1)
 
-function LazyTensors.apply(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Index,Dim}) where {T,Dim}
+function LazyTensors.apply(L::Laplace{Dim,T}, v::AbstractArray{T,Dim}, I::Vararg{Any,Dim}) where {T,Dim}
     error("not implemented")
 end
 
 # u = L*v
-function LazyTensors.apply(L::Laplace{1,T}, v::AbstractVector{T}, I::Index) where T
-    @inbounds u = LazyTensors.apply(L.D2[1],v,I)
+function LazyTensors.apply(L::Laplace{1,T}, v::AbstractVector{T}, i) where T
+    @inbounds u = LazyTensors.apply(L.D2[1],v,i)
     return u
 end
 
-function LazyTensors.apply(L::Laplace{2,T}, v::AbstractArray{T,2}, I::Index, J::Index) where T
+function LazyTensors.apply(L::Laplace{2,T}, v::AbstractArray{T,2}, i, j) where T
     # 2nd x-derivative
-    @inbounds vx = view(v, :, Int(J))
-    @inbounds uᵢ = LazyTensors.apply(L.D2[1], vx , I)
+    @inbounds vx = view(v, :, Int(j))
+    @inbounds uᵢ = LazyTensors.apply(L.D2[1], vx , i)
 
     # 2nd y-derivative
-    @inbounds vy = view(v, Int(I), :)
-    @inbounds uᵢ += LazyTensors.apply(L.D2[2], vy , J)
+    @inbounds vy = view(v, Int(i), :)
+    @inbounds uᵢ += LazyTensors.apply(L.D2[2], vy , j)
 
     return uᵢ
 end
--- a/src/SbpOperators/laplace/secondderivative.jl	Sun Nov 29 21:16:55 2020 +0100
+++ b/src/SbpOperators/laplace/secondderivative.jl	Sun Nov 29 21:52:44 2020 +0100
@@ -20,29 +20,24 @@
 LazyTensors.range_size(D2::SecondDerivative) = D2.size
 LazyTensors.domain_size(D2::SecondDerivative) = D2.size
 
-#TODO: The 1D tensor mappings should not have to dispatch on 1D tuples if we write LazyTensor.apply for vararg right?!?!
-#      Currently have to index the Tuple{Index} in each method in order to call the stencil methods which is ugly.
-#      I thought I::Vararg{Index,R} fell back to just Index for R = 1
+# Apply for different regions Lower/Interior/Upper or Unknown region
+function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, i::Index{Lower}) where T
+    return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.closureStencils[Int(i)], v, Int(i))
+end
 
-# Apply for different regions Lower/Interior/Upper or Unknown region
-function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Lower}) where T
-    return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.closureStencils[Int(I)], v, Int(I))
+function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, i::Index{Interior}) where T
+    return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.innerStencil, v, Int(i))
 end
 
-function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Interior}) where T
-    return @inbounds D2.h_inv*D2.h_inv*apply_stencil(D2.innerStencil, v, Int(I))
+function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, i::Index{Upper}) where T
+    N = length(v) # TODO: Use domain_size here instead? N = domain_size(D2,size(v))
+    return @inbounds D2.h_inv*D2.h_inv*apply_stencil_backwards(D2.closureStencils[N-Int(i)+1], v, Int(i))
 end
 
-function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, I::Index{Upper}) where T
-    N = length(v) # TODO: Use domain_size here instead? N = domain_size(D2,size(v))
-    return @inbounds D2.h_inv*D2.h_inv*apply_stencil_backwards(D2.closureStencils[N-Int(I)+1], v, Int(I))
-end
-
-function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, index::Index{Unknown}) where T
+function LazyTensors.apply(D2::SecondDerivative{T}, v::AbstractVector{T}, i) where T
     N = length(v)  # TODO: Use domain_size here instead?
-    r = getregion(Int(index), closuresize(D2), N)
-    I = Index(Int(index), r)
-    return LazyTensors.apply(D2, v, I)
+    r = getregion(i, closuresize(D2), N)
+    return LazyTensors.apply(D2, v, Index(i, r))
 end
 
 closuresize(D2::SecondDerivative{T,N,M,K}) where {T<:Real,N,M,K} = M
--- a/src/SbpOperators/quadrature/diagonal_quadrature.jl	Sun Nov 29 21:16:55 2020 +0100
+++ b/src/SbpOperators/quadrature/diagonal_quadrature.jl	Sun Nov 29 21:52:44 2020 +0100
@@ -44,39 +44,38 @@
 LazyTensors.domain_size(H::DiagonalQuadrature) = H.size
 
 """
-    apply(H::DiagonalQuadrature{T}, v::AbstractVector{T}, I::Index) where T
+    apply(H::DiagonalQuadrature{T}, v::AbstractVector{T}, i) where T
 Implements the application `(H*v)[i]` an `Index{R}` where `R` is one of the regions
-`Lower`,`Interior`,`Upper`,`Unknown`.
+`Lower`,`Interior`,`Upper`.
 """
-function LazyTensors.apply(H::DiagonalQuadrature{T}, v::AbstractVector{T}, I::Index{Lower}) where T
-    return @inbounds H.h*H.closure[Int(I)]*v[Int(I)]
+function LazyTensors.apply(H::DiagonalQuadrature{T}, v::AbstractVector{T}, i::Index{Lower}) where T
+    return @inbounds H.h*H.closure[Int(i)]*v[Int(i)]
 end
 
-function LazyTensors.apply(H::DiagonalQuadrature{T},v::AbstractVector{T}, I::Index{Upper}) where T
+function LazyTensors.apply(H::DiagonalQuadrature{T},v::AbstractVector{T}, i::Index{Upper}) where T
     N = length(v);
-    return @inbounds H.h*H.closure[N-Int(I)+1]*v[Int(I)]
+    return @inbounds H.h*H.closure[N-Int(i)+1]*v[Int(i)]
 end
 
-function LazyTensors.apply(H::DiagonalQuadrature{T}, v::AbstractVector{T}, I::Index{Interior}) where T
-    return @inbounds H.h*v[Int(I)]
+function LazyTensors.apply(H::DiagonalQuadrature{T}, v::AbstractVector{T}, i::Index{Interior}) where T
+    return @inbounds H.h*v[Int(i)]
 end
 
-function LazyTensors.apply(H::DiagonalQuadrature{T},  v::AbstractVector{T}, I::Index{Unknown}) where T
+function LazyTensors.apply(H::DiagonalQuadrature{T},  v::AbstractVector{T}, i) where T
     N = length(v);
-    r = getregion(Int(I), closure_size(H), N)
-    i = Index(Int(I), r)
-    return LazyTensors.apply(H, v, i)
+    r = getregion(i, closure_size(H), N)
+
+    return LazyTensors.apply(H, v, Index(i, r))
 end
 
 """
     apply(H::DiagonalQuadrature{T}, v::AbstractVector{T}, I::Index) where T
 Implements the application (H'*v)[I]. The operator is self-adjoint.
 """
-LazyTensors.apply_transpose(H::DiagonalQuadrature, v::AbstractVector, I) = LazyTensors.apply(H,v,I)
+LazyTensors.apply_transpose(H::DiagonalQuadrature{T}, v::AbstractVector{T}, i) where T = LazyTensors.apply(H,v,i)
 
 """
     closure_size(H)
 Returns the size of the closure stencil of a DiagonalQuadrature `H`.
 """
 closure_size(H::DiagonalQuadrature{T,M}) where {T,M} = M
-export closure_size
--- a/src/SbpOperators/quadrature/inverse_diagonal_quadrature.jl	Sun Nov 29 21:16:55 2020 +0100
+++ b/src/SbpOperators/quadrature/inverse_diagonal_quadrature.jl	Sun Nov 29 21:52:44 2020 +0100
@@ -51,18 +51,16 @@
     return @inbounds Hi.h_inv*v[Int(I)]
 end
 
-function LazyTensors.apply(Hi::InverseDiagonalQuadrature,  v::AbstractVector{T}, index::Index{Unknown}) where T
+function LazyTensors.apply(Hi::InverseDiagonalQuadrature{T},  v::AbstractVector{T}, i) where T
     N = length(v);
-    r = getregion(Int(index), closure_size(Hi), N)
-    i = Index(Int(index), r)
-    return LazyTensors.apply(Hi, v, i)
+    r = getregion(i, closure_size(Hi), N)
+    return LazyTensors.apply(Hi, v, Index(i, r))
 end
 
-LazyTensors.apply_transpose(Hi::InverseDiagonalQuadrature{T}, v::AbstractVector{T}, I::Index) where T = LazyTensors.apply(Hi,v,I)
+LazyTensors.apply_transpose(Hi::InverseDiagonalQuadrature{T}, v::AbstractVector{T}, i) where T = LazyTensors.apply(Hi,v,i)
 
 """
     closure_size(H)
 Returns the size of the closure stencil of a InverseDiagonalQuadrature `Hi`.
 """
 closure_size(Hi::InverseDiagonalQuadrature{T,M}) where {T,M} =  M
-export closure_size
--- a/test/testLazyTensors.jl	Sun Nov 29 21:16:55 2020 +0100
+++ b/test/testLazyTensors.jl	Sun Nov 29 21:52:44 2020 +0100
@@ -8,10 +8,10 @@
 
 @testset "Generic Mapping methods" begin
     struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end
-    LazyTensors.apply(m::DummyMapping{T,R,D}, v, i::NTuple{R,Index{<:Region}}) where {T,R,D} = :apply
+    LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Any,R}) where {T,R,D} = :apply
     @test range_dim(DummyMapping{Int,2,3}()) == 2
     @test domain_dim(DummyMapping{Int,2,3}()) == 3
-    @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),(Index{Unknown}(0),Index{Unknown}(0))) == :apply
+    @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),0,0) == :apply
     @test eltype(DummyMapping{Int,2,3}()) == Int
     @test eltype(DummyMapping{Float64,2,3}()) == Float64
 end
@@ -19,19 +19,18 @@
 @testset "Mapping transpose" begin
     struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end
 
-    LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},R}) where {T,R,D} = :apply
-    LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Index{<:Region},D}) where {T,R,D} = :apply_transpose
+    LazyTensors.apply(m::DummyMapping{T,R}, v, I::Vararg{Any,R}) where {T,R} = :apply
+    LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Any,D}) where {T,R,D} = :apply_transpose
 
-    LazyTensors.range_size(m::DummyMapping{T,R,D}) where {T,R,D} = :range_size
-    LazyTensors.domain_size(m::DummyMapping{T,R,D}) where {T,R,D} = :domain_size
+    LazyTensors.range_size(m::DummyMapping) = :range_size
+    LazyTensors.domain_size(m::DummyMapping) = :domain_size
 
     m = DummyMapping{Float64,2,3}()
-    I = Index{Unknown}(0)
     @test m' isa TensorMapping{Float64, 3,2}
     @test m'' == m
-    @test apply(m',zeros(Float64,(0,0)), I, I, I) == :apply_transpose
-    @test apply(m'',zeros(Float64,(0,0,0)), I, I) == :apply
-    @test apply_transpose(m', zeros(Float64,(0,0,0)), I, I) == :apply
+    @test apply(m',zeros(Float64,(0,0)), 0, 0, 0) == :apply_transpose
+    @test apply(m'',zeros(Float64,(0,0,0)), 0, 0) == :apply
+    @test apply_transpose(m', zeros(Float64,(0,0,0)), 0, 0) == :apply
 
     @test range_size(m') == :domain_size
     @test domain_size(m') == :range_size
@@ -42,7 +41,7 @@
         domain_size::NTuple{D,Int}
     end
 
-    LazyTensors.apply(m::SizeDoublingMapping{T,R,D}, v, i::Vararg{Index{<:Region},R}) where {T,R,D} = (:apply,v,i)
+    LazyTensors.apply(m::SizeDoublingMapping{T,R}, v, i::Vararg{Any,R}) where {T,R} = (:apply,v,i)
     LazyTensors.range_size(m::SizeDoublingMapping) = 2 .* m.domain_size
     LazyTensors.domain_size(m::SizeDoublingMapping) = m.domain_size
 
@@ -51,15 +50,11 @@
     v = [0,1,2]
     @test m*v isa AbstractVector{Int}
     @test size(m*v) == 2 .*size(v)
-    @test (m*v)[Index{Upper}(0)] == (:apply,v,(Index{Upper}(0),))
-    @test (m*v)[0] == (:apply,v,(Index{Unknown}(0),))
+    @test (m*v)[0] == (:apply,v,(0,))
     @test m*m*v isa AbstractVector{Int}
-    @test (m*m*v)[Index{Upper}(1)] == (:apply,m*v,(Index{Upper}(1),))
-    @test (m*m*v)[1] == (:apply,m*v,(Index{Unknown}(1),))
-    @test (m*m*v)[Index{Interior}(3)] == (:apply,m*v,(Index{Interior}(3),))
-    @test (m*m*v)[3] == (:apply,m*v,(Index{Unknown}(3),))
-    @test (m*m*v)[Index{Lower}(6)] == (:apply,m*v,(Index{Lower}(6),))
-    @test (m*m*v)[6] == (:apply,m*v,(Index{Unknown}(6),))
+    @test (m*m*v)[1] == (:apply,m*v,(1,))
+    @test (m*m*v)[3] == (:apply,m*v,(3,))
+    @test (m*m*v)[6] == (:apply,m*v,(6,))
     @test_broken BoundsError == (m*m*v)[0]
     @test_broken BoundsError == (m*m*v)[7]
     @test_throws MethodError m*m
@@ -70,16 +65,15 @@
 
     m = SizeDoublingMapping{Float64, 2, 2}((3,3))
     v = ones(3,3)
-    I = (Index{Lower}(1),Index{Interior}(2));
     @test size(m*v) == 2 .*size(v)
-    @test (m*v)[I] == (:apply,v,I)
+    @test (m*v)[1,2] == (:apply,v,(1,2))
 
     struct ScalingOperator{T,D} <: TensorMapping{T,D,D}
         λ::T
         size::NTuple{D,Int}
     end
 
-    LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I]
+    LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Any,D}) where {T,D} = m.λ*v[I...]
     LazyTensors.range_size(m::ScalingOperator) = m.size
     LazyTensors.domain_size(m::ScalingOperator) = m.size
 
@@ -91,8 +85,7 @@
     m = ScalingOperator{Int,2}(2,(2,2))
     v = [[1 2];[3 4]]
     @test m*v == [[2 4];[6 8]]
-    I = (Index{Upper}(2),Index{Lower}(1))
-    @test (m*v)[I] == 6
+    @test (m*v)[2,1] == 6
 end
 
 @testset "TensorMapping binary operations" begin
@@ -102,7 +95,7 @@
         domain_size::NTuple{D,Int}
     end
 
-    LazyTensors.apply(m::ScalarMapping{T,R,D}, v, I::Vararg{Index{<:Region}}) where {T,R,D} = m.λ*v[I...]
+    LazyTensors.apply(m::ScalarMapping{T,R}, v, I::Vararg{Any,R}) where {T,R} = m.λ*v[I...]
     LazyTensors.range_size(m::ScalarMapping) = m.domain_size
     LazyTensors.domain_size(m::ScalarMapping) = m.range_size
 
@@ -438,14 +431,14 @@
                 size::NTuple{D,Int}
             end
 
-            LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I]
+            LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Any,D}) where {T,D} = m.λ*v[I...]
             LazyTensors.range_size(m::ScalingOperator) = m.size
             LazyTensors.domain_size(m::ScalingOperator) = m.size
 
             tm = InflatedTensorMapping(I(2,3),ScalingOperator(2.0, (3,2)),I(3,4))
             v = rand(domain_size(tm)...)
 
-            @inferred apply(tm,v,Index{Unknown}.((1,2,3,2,2,4))...)
+            @inferred apply(tm,v,1,2,3,2,2,4)
             @inferred (tm*v)[1,2,3,2,2,4]
         end
     end
@@ -523,7 +516,7 @@
         size::NTuple{D,Int}
     end
 
-    LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Index,D}) where {T,D} = m.λ*v[I]
+    LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Any,D}) where {T,D} = m.λ*v[I...]
     LazyTensors.range_size(m::ScalingOperator) = m.size
     LazyTensors.domain_size(m::ScalingOperator) = m.size