Mercurial > repos > public > sbplib_julia
changeset 1207:f1c2a4fa0ee1 performance/get_region_type_inference
Merge default
author | Jonatan Werpers <jonatan@werpers.com> |
---|---|
date | Fri, 03 Feb 2023 22:14:47 +0100 |
parents | b41180efb6c2 (current diff) d13b73e9f65e (diff) |
children | e679d4fab8ee |
files | src/LazyTensors/LazyTensors.jl src/LazyTensors/lazy_tensor_operations.jl src/SbpOperators/boundaryops/boundary_operator.jl src/SbpOperators/volumeops/constant_interior_scaling_operator.jl src/SbpOperators/volumeops/inference_trouble.txt src/SbpOperators/volumeops/volume_operator.jl |
diffstat | 58 files changed, 2729 insertions(+), 1877 deletions(-) [+] |
line wrap: on
line diff
--- a/Manifest.toml Mon Feb 21 10:33:58 2022 +0100 +++ b/Manifest.toml Fri Feb 03 22:14:47 2023 +0100 @@ -1,25 +1,50 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.7.0" +julia_version = "1.8.5" manifest_format = "2.0" +project_hash = "b024d6898b484706c36ee3b2a041918f3a9d2088" [[deps.Adapt]] deps = ["LinearAlgebra"] -git-tree-sha1 = "84918055d15b3114ede17ac6a7182f68870c16f7" +git-tree-sha1 = "0310e08cb19f5da31d08341c6120c047598f5b9c" uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "3.3.1" +version = "3.5.0" + +[[deps.ArrayInterface]] +deps = ["ArrayInterfaceCore", "Compat", "IfElse", "LinearAlgebra", "SnoopPrecompile", "Static"] +git-tree-sha1 = "dedc16cbdd1d32bead4617d27572f582216ccf23" +uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" +version = "6.0.25" + +[[deps.ArrayInterfaceCore]] +deps = ["LinearAlgebra", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "e5f08b5689b1aad068e01751889f2f615c7db36d" +uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" +version = "0.1.29" [[deps.Artifacts]] uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" +[[deps.Compat]] +deps = ["Dates", "LinearAlgebra", "UUIDs"] +git-tree-sha1 = "61fdd77467a5c3ad071ef8277ac6bd6af7dd4c04" +uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" +version = "4.6.0" + [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" +version = "1.0.1+0" [[deps.Dates]] deps = ["Printf"] uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" +[[deps.IfElse]] +git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1" +uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173" +version = "0.1.1" + [[deps.Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" @@ -29,27 +54,70 @@ [[deps.OffsetArrays]] deps = ["Adapt"] -git-tree-sha1 = "043017e0bdeff61cfbb7afeb558ab29536bbb5ed" +git-tree-sha1 = "82d7c9e310fe55aa54996e6f7f94674e2a38fcb4" uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "1.10.8" +version = "1.12.9" [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" +version = "0.3.20+0" + +[[deps.Preferences]] +deps = ["TOML"] +git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" +uuid = "21216c6a-2e73-6563-6e65-726566657250" +version = "1.3.0" [[deps.Printf]] deps = ["Unicode"] uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" +[[deps.Random]] +deps = ["SHA", "Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[deps.SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" +version = "0.7.0" + +[[deps.Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[deps.SnoopPrecompile]] +deps = ["Preferences"] +git-tree-sha1 = "e760a70afdcd461cf01a575947738d359234665c" +uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c" +version = "1.0.3" + +[[deps.SparseArrays]] +deps = ["LinearAlgebra", "Random"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[deps.Static]] +deps = ["IfElse"] +git-tree-sha1 = "c35b107b61e7f34fa3f124026f2a9be97dea9e1c" +uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" +version = "0.8.3" + +[[deps.SuiteSparse]] +deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] +uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" + [[deps.TOML]] deps = ["Dates"] uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" +version = "1.0.0" [[deps.TiledIteration]] -deps = ["OffsetArrays"] -git-tree-sha1 = "5683455224ba92ef59db72d10690690f4a8dc297" +deps = ["ArrayInterface", "OffsetArrays"] +git-tree-sha1 = "1bf2bb587a7fc99fefac2ff076b18b500128e9c0" uuid = "06e1c1a7-607b-532d-9fad-de7d9aa2abac" -version = "0.3.1" +version = "0.4.2" + +[[deps.UUIDs]] +deps = ["Random", "SHA"] +uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" [[deps.Unicode]] uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" @@ -57,3 +125,4 @@ [[deps.libblastrampoline_jll]] deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" +version = "5.1.1+0"
--- a/Notes.md Mon Feb 21 10:33:58 2022 +0100 +++ b/Notes.md Fri Feb 03 22:14:47 2023 +0100 @@ -132,27 +132,65 @@ * When doing specialised computations for different parts of the range/domain? * More? - Maybe if we should have dynamic sizing it could be only for the range. `domain_size` would not be implemented. And the `range_size` would be a function of a vector that the TensorMapping is applied to. + Maybe if we should have dynamic sizing it could be only for the range. `domain_size` would not be implemented. And the `range_size` would be a function of a vector that the LazyTensor is applied to. ## Reasearch and thinking - - [ ] Use a trait to indicate that a TensorMapping har the same range and domain? - - [ ] Rename all the Tensor stuff to just LazyOperator, LazyApplication and so on? + - [ ] Use a trait to indicate that a LazyTensor har the same range and domain? - [ ] Check how the native julia doc generator works - [ ] Check if Vidars design docs fit in there - [ ] Create a macro @lazy which replaces a binary op (+,-) by its lazy equivalent? Would be a neat way to indicate which evaluations are lazy without cluttering/confusing with special characters. - - [ ] Specificera operatorer i TOML eller något liknande? - H.. H_gamma etc.) - [ ] Dispatch on Lower() instead of the type Lower so `::Lower` instead of `::Type{Lower}` ??? Seems better unless there is some specific reason to use the type instead of the value. - [ ] How do we handle mixes of periodic and non-periodic grids? Seems it should be supported on the grid level and on the 1d operator level. Between there it should be transparent. - - [ ] Can we have a trait to tell if a TensorMapping is transposable? + - [ ] Can we have a trait to tell if a LazyTensor is transposable? - [ ] Is it ok to have "Constructors" for abstract types which create subtypes? For example a Grids() functions that gives different kind of grids based on input? + - [ ] Figure out how to treat the borrowing parameters of operators. Include in into the struct? Expose via function dispatched on the operator type and grid? + +## Identifiers for regions +The identifiers (`Upper`, `Lower`, `Interior`) used for region indecies should probabily be included in the grid module. This allows new grid types to come with their own regions. ## Regions and tensormappings -- [ ] Use a trait to indicate if a TensorMapping uses indices with regions. +- [ ] Use a trait to indicate if a LazyTensor uses indices with regions. The default should be that they do NOT. - [ ] What to name this trait? Can we call it IndexStyle but not export it to avoid conflicts with Base.IndexStyle? - - [ ] Figure out repeated application of regioned TensorMappings. Maybe an instance of a tensor mapping needs to know the exact size of the range and domain for this to work? + - [ ] Figure out repeated application of regioned LazyTensors. Maybe an instance of a tensor mapping needs to know the exact size of the range and domain for this to work? + +### Ideas for information sharing functions +```julia +using StaticArrays + +function regions(op::SecondDerivativeVariable) + t = ntuple(i->(Interior(),),range_dim(op)) + return Base.setindex(t, (Lower(), Interior(), Upper()), derivative_direction(op)) +end + +function regionsizes(op::SecondDerivativeVariable) + sz = tuple.(range_size(op)) + + cl = closuresize(op) + return Base.setindex(sz, (cl, n-2cl, cl), derivative_direction(op)) +end + + +g = EquidistantGrid((11,9), (0.,0.), (10.,8.)) # h = 1 +c = evalOn(g, (x,y)->x+y) + +D₂ᶜ = SecondDerivativeVariable(g, c, interior_stencil, closure_stencils,1) +@test regions(D₂ᶜ) == ( + (Lower(), Interior(), Upper()), + (Interior(),), +) +@test regionsizes(D₂ᶜ) == ((1,9,1),(9,)) + + +D₂ᶜ = SecondDerivativeVariable(g, c, interior_stencil, closure_stencils,2) +@test regions(D₂ᶜ) == ( + (Interior(),), + (Lower(), Interior(), Upper()), +) +@test regionsizes(D₂ᶜ) == ((11,),(1,7,1)) +``` + ## Boundschecking and dimension checking Does it make sense to have boundschecking only in getindex methods? @@ -260,16 +298,16 @@ Vi kan ha tensor-operatorer som agerar på ett skalärt fält och ger ett vektorfält eller tensorfält. Vi kan också ha tensor-operatorer som agerar på ett vektorfält eller tensorfält och ger ett skalärt fält. -TBD: Just nu gör `apply_transpose` antagandet att domän-typen är samma som range-typen. Det behöver vi på något sätt bryta. Ett alternativ är låta en TensorMapping ha `T_domain` och `T_range` istället för bara `T`. Känns dock lite grötigt. Ett annat alternativ skulle vara någon typ av trait för transpose? Den skulle kunna innehålla typen som transponatet agerar på? Vet inte om det fungerar dock. +TBD: Just nu gör `apply_transpose` antagandet att domän-typen är samma som range-typen. Det behöver vi på något sätt bryta. Ett alternativ är låta en LazyTensor ha `T_domain` och `T_range` istället för bara `T`. Känns dock lite grötigt. Ett annat alternativ skulle vara någon typ av trait för transpose? Den skulle kunna innehålla typen som transponatet agerar på? Vet inte om det fungerar dock. -TBD: Vad är målet med `T`-parametern för en TensorMapping? Om vi vill kunna applicera en difference operator på vad som helst kan man inte anta att en `TensorMapping{T}` bara agerar på instanser av `T`. +TBD: Vad är målet med `T`-parametern för en LazyTensor? Om vi vill kunna applicera en difference operator på vad som helst kan man inte anta att en `LazyTensor{T}` bara agerar på instanser av `T`. Man kan implementera `∇` som en tensormapping som agerar på T och returnerar `StaticVector{N,T} where N`. (Man skulle eventuellt också kunna låta den agera på `StaticMatrix{N,T,D} where N` och returnera `StaticMatrix{M,T,D+1}`. Frågan är om man vinner något på det...) -Skulle kunna ha en funktion `range_type(::TensorMapping, ::Type{domain_type})` +Skulle kunna ha en funktion `range_type(::LazyTensor, ::Type{domain_type})` -Kanske kan man implementera `⋅(tm::TensorMapping{R,D}, v::AbstractArray{T,D})` där T är en AbstractArray, tm på något sätt har komponenter, lika många som T har element. +Kanske kan man implementera `⋅(tm::LazyTensor{R,D}, v::AbstractArray{T,D})` där T är en AbstractArray, tm på något sätt har komponenter, lika många som T har element. ### Ratade alternativ: @@ -301,7 +339,7 @@ En viktig operation för vektor fält är att kunna få ut komponenter som grid-funktioner. Detta behöver antagligen kunna ske lazy. Det finns ett par olika lösningar: * Implementera en egen typ av view som tar hand om detta. Eller Accessors.jl? -* Använda en TensorMapping +* Använda en LazyTensor * Någon typ av lazy-broadcast * En lazy array som applicerar en funktion för varje element. @@ -348,4 +386,4 @@ ## Name of the `VolumeOperator` type for constant stencils -It seems that the name is too general. The name of the method `volume_operator` makes sense. It should return different types of `TensorMapping` specialized for the grid. A suggetion for a better name is `ConstantStencilVolumeOperator` +It seems that the name is too general. The name of the method `volume_operator` makes sense. It should return different types of `LazyTensor` specialized for the grid. A suggetion for a better name is `ConstantStencilVolumeOperator`
--- a/TODO.md Mon Feb 21 10:33:58 2022 +0100 +++ b/TODO.md Fri Feb 03 22:14:47 2023 +0100 @@ -1,20 +1,15 @@ # TODO -## Skämskudde - - [ ] Ändra namn på variabler och funktioner så att det följer style-guide - - [ ] Skriv tester ## Coding + - [ ] Ändra namn på variabler och funktioner så att det följer style-guide - [ ] Add new Laplace operator to DiffOps, probably named WaveEqOp(?!!?) - [ ] Create a struct that bundles the necessary Tensor operators for solving the wave equation. - - [ ] Add a quick and simple way of running all tests for all subpackages. - [ ] Replace getindex hack for flattening tuples with flatten_tuple. (eg. `getindex.(range_size.(L.D2),1)`) - [ ] Use `@inferred` in a lot of tests. + - [ ] Replace `@inferred` tests with a benchmark suite that automatically tests for regressions. - [ ] Make sure we are setting tolerances in tests in a consistent way - - [ ] Add check for correct domain sizes to lazy tensor operations using SizeMismatch - [ ] Write down some coding guideline or checklist for code conventions. For example i,j,... for indices and I for multi-index - - [ ] Add boundschecking in TensorMappingApplication - - [ ] Start renaming things in LazyTensors - [ ] Clean up RegionIndices 1. [ ] Write tests for how things should work 2. [ ] Update RegionIndices accordingly @@ -23,11 +18,11 @@ - [ ] Add possibility to create tensor mapping application with `()`, e.g `D1(v) <=> D1*v`? - [ ] Add custom pretty printing to LazyTensors/SbpOperators to enhance readability of e.g error messages. See (https://docs.julialang.org/en/v1/manual/types/#man-custom-pretty-printing) + - [ ] Samla noggrannhets- och SBP-ness-tester för alla operatorer på ett ställe + - [ ] Move export statements to top of each module -## Repo - - [ ] Rename repo to Sbplib.jl -# Wrap up tasks + - [ ] Gå igenom alla typ parametrar och kolla om de är motiverade. Både i signaturer och typer, tex D i VariableSecondDerivative. Kan vi använda promote istället? - [ ] Kolla att vi har @inbounds och @propagate_inbounds på rätt ställen - [ ] Kolla att vi gör boundschecks överallt och att de är markerade med @boundscheck - [ ] Kolla att vi har @inline på rätt ställen
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/WORKFLOW.md Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,46 @@ +# Branch model + +The `default` branch contains stable code and the goal is that all tests should be passing on this branch. Version of the code are marked with mercurial tags. Changes in the code are developed in named branches which are closed and merged into `default` when the code is ready. During development merging `default` into the development branch is encouraged to avoid complicated conflicts. + +Branches are named using slash separated keywords. The first keyword describes the type of change being pursued in the branch. Important type keywords are + * feature + * bugfix + * refactor +Further keywords may describe where, e.g. what sub package, the change happens. The last keyword should describe the change. + +Some examples: + * refactor/grids: Branch to refactor the grids module + * bugfix/lazy_tensors/lazyfunctionarray: Branch to fix a bug in LazyFunctionArray + +## Merging a branch into `default` +The changes in a branch has been reviewed and deemed ready to merge the branch is closed and then merged. + +Before merging a development branch, `default` should be merge into the development branch to make sure the whole state of the code is reviewed and tested before it ends up on `default`. + +With the development branch active the following commands can be used to complete the merging of a development branch. +```shell +hg merge default +hg commit --close-branch -m "Close before merge" +hg update default +hg merge development/branch +hg commit -m "Merge development/branch" +``` + +# Review + +## Checklist for review +* Push and pull new changes +* Search and check TODOs +* Search and check TBDs +* Search and check REVIEWs +* Review code +* Review tests +* Review docstrings +* Render Documenter and check docstrings in browser +* Run full tests + +# Special comments +The following special comments are used: +* `# TODO: `: Something that should be done at some point. +* `# TBD: `: "To be determined", i.e a decision that has to be made. +* `# REVIEW: `: A review comment. Should only exist on development branches.
--- a/docs/Manifest.toml Mon Feb 21 10:33:58 2022 +0100 +++ b/docs/Manifest.toml Fri Feb 03 22:14:47 2023 +0100 @@ -1,7 +1,8 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.7.1" +julia_version = "1.8.5" manifest_format = "2.0" +project_hash = "4f0756199bb5f6739a5f4697152617efc4e0705c" [[deps.ANSIColoredPrinters]] git-tree-sha1 = "574baf8110975760d391c710b6341da1afa48d8c" @@ -10,9 +11,21 @@ [[deps.Adapt]] deps = ["LinearAlgebra"] -git-tree-sha1 = "af92965fb30777147966f58acb05da51c5616b5f" +git-tree-sha1 = "0310e08cb19f5da31d08341c6120c047598f5b9c" uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "3.3.3" +version = "3.5.0" + +[[deps.ArrayInterface]] +deps = ["ArrayInterfaceCore", "Compat", "IfElse", "LinearAlgebra", "SnoopPrecompile", "Static"] +git-tree-sha1 = "dedc16cbdd1d32bead4617d27572f582216ccf23" +uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" +version = "6.0.25" + +[[deps.ArrayInterfaceCore]] +deps = ["LinearAlgebra", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "e5f08b5689b1aad068e01751889f2f615c7db36d" +uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" +version = "0.1.29" [[deps.Artifacts]] uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" @@ -20,9 +33,16 @@ [[deps.Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" +[[deps.Compat]] +deps = ["Dates", "LinearAlgebra", "UUIDs"] +git-tree-sha1 = "61fdd77467a5c3ad071ef8277ac6bd6af7dd4c04" +uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" +version = "4.6.0" + [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" +version = "1.0.1+0" [[deps.Dates]] deps = ["Printf"] @@ -30,15 +50,15 @@ [[deps.DocStringExtensions]] deps = ["LibGit2"] -git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b" +git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d" uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -version = "0.8.6" +version = "0.9.3" [[deps.Documenter]] deps = ["ANSIColoredPrinters", "Base64", "Dates", "DocStringExtensions", "IOCapture", "InteractiveUtils", "JSON", "LibGit2", "Logging", "Markdown", "REPL", "Test", "Unicode"] -git-tree-sha1 = "f425293f7e0acaf9144de6d731772de156676233" +git-tree-sha1 = "58fea7c536acd71f3eef6be3b21c0df5f3df88fd" uuid = "e30172f5-a6a5-5a46-863b-614d45cd2de4" -version = "0.27.10" +version = "0.27.24" [[deps.IOCapture]] deps = ["Logging", "Random"] @@ -46,15 +66,20 @@ uuid = "b5f81e59-6552-4d32-b1f0-c071b021bf89" version = "0.2.2" +[[deps.IfElse]] +git-tree-sha1 = "debdd00ffef04665ccbb3e150747a77560e8fad1" +uuid = "615f187c-cbe4-4ef1-ba3b-2fcf58d6d173" +version = "0.1.1" + [[deps.InteractiveUtils]] deps = ["Markdown"] uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" [[deps.JSON]] deps = ["Dates", "Mmap", "Parsers", "Unicode"] -git-tree-sha1 = "8076680b162ada2a031f707ac7b4953e30667a37" +git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -version = "0.21.2" +version = "0.21.3" [[deps.LibGit2]] deps = ["Base64", "NetworkOptions", "Printf", "SHA"] @@ -79,22 +104,30 @@ [[deps.NetworkOptions]] uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" +version = "1.2.0" [[deps.OffsetArrays]] deps = ["Adapt"] -git-tree-sha1 = "043017e0bdeff61cfbb7afeb558ab29536bbb5ed" +git-tree-sha1 = "82d7c9e310fe55aa54996e6f7f94674e2a38fcb4" uuid = "6fe1bfb0-de20-5000-8ca7-80f57d26f881" -version = "1.10.8" +version = "1.12.9" [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" +version = "0.3.20+0" [[deps.Parsers]] -deps = ["Dates"] -git-tree-sha1 = "d7fa6237da8004be601e19bd6666083056649918" +deps = ["Dates", "SnoopPrecompile"] +git-tree-sha1 = "151d91d63d8d6c1a5789ecb7de51547e00480f1b" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.1.3" +version = "2.5.4" + +[[deps.Preferences]] +deps = ["TOML"] +git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" +uuid = "21216c6a-2e73-6563-6e65-726566657250" +version = "1.3.0" [[deps.Printf]] deps = ["Unicode"] @@ -110,6 +143,7 @@ [[deps.SHA]] uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" +version = "0.7.0" [[deps.Sbplib]] deps = ["TOML", "TiledIteration"] @@ -120,22 +154,47 @@ [[deps.Serialization]] uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" +[[deps.SnoopPrecompile]] +deps = ["Preferences"] +git-tree-sha1 = "e760a70afdcd461cf01a575947738d359234665c" +uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c" +version = "1.0.3" + [[deps.Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" +[[deps.SparseArrays]] +deps = ["LinearAlgebra", "Random"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[deps.Static]] +deps = ["IfElse"] +git-tree-sha1 = "c35b107b61e7f34fa3f124026f2a9be97dea9e1c" +uuid = "aedffcd0-7271-4cad-89d0-dc628f76c6d3" +version = "0.8.3" + +[[deps.SuiteSparse]] +deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] +uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" + [[deps.TOML]] deps = ["Dates"] uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" +version = "1.0.0" [[deps.Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [[deps.TiledIteration]] -deps = ["OffsetArrays"] -git-tree-sha1 = "5683455224ba92ef59db72d10690690f4a8dc297" +deps = ["ArrayInterface", "OffsetArrays"] +git-tree-sha1 = "1bf2bb587a7fc99fefac2ff076b18b500128e9c0" uuid = "06e1c1a7-607b-532d-9fad-de7d9aa2abac" -version = "0.3.1" +version = "0.4.2" + +[[deps.UUIDs]] +deps = ["Random", "SHA"] +uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" [[deps.Unicode]] uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" @@ -143,3 +202,4 @@ [[deps.libblastrampoline_jll]] deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" +version = "5.1.1+0"
--- a/src/Grids/AbstractGrid.jl Mon Feb 21 10:33:58 2022 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,24 +0,0 @@ -""" - AbstractGrid - -Should implement - dimension(grid::AbstractGrid) - points(grid::AbstractGrid) - -""" -abstract type AbstractGrid end - -function dimension end -function points end -export dimension, points - -""" - evalOn(g::AbstractGrid, f::Function) - -Evaluate function f on the grid g -""" -function evalOn(g::AbstractGrid, f::Function) - F(x) = f(x...) - return F.(points(g)) -end -export evalOn
--- a/src/Grids/EquidistantGrid.jl Mon Feb 21 10:33:58 2022 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,183 +0,0 @@ -export EquidistantGrid -export spacing -export inverse_spacing -export restrict -export boundary_identifiers -export boundary_grid -export refine -export coarsen - -""" - EquidistantGrid{Dim,T<:Real} <: AbstractGrid - -`Dim`-dimensional equidistant grid with coordinates of type `T`. -""" -struct EquidistantGrid{Dim,T<:Real} <: AbstractGrid - size::NTuple{Dim, Int} - limit_lower::NTuple{Dim, T} - limit_upper::NTuple{Dim, T} - - function EquidistantGrid{Dim,T}(size::NTuple{Dim, Int}, limit_lower::NTuple{Dim, T}, limit_upper::NTuple{Dim, T}) where {Dim,T} - if any(size .<= 0) - throw(DomainError("all components of size must be postive")) - end - if any(limit_upper.-limit_lower .<= 0) - throw(DomainError("all side lengths must be postive")) - end - return new{Dim,T}(size, limit_lower, limit_upper) - end -end - -""" - EquidistantGrid(size, limit_lower, limit_upper) - -Construct an equidistant grid with corners at the coordinates `limit_lower` and -`limit_upper`. - -The length of the domain sides are given by the components of -`limit_upper-limit_lower`. E.g for a 2D grid with `limit_lower=(-1,0)` and `limit_upper=(1,2)` the domain is defined -as `(-1,1)x(0,2)`. The side lengths of the grid are not allowed to be negative. - -The number of equidistantly spaced points in each coordinate direction are given -by the tuple `size`. -""" -function EquidistantGrid(size, limit_lower, limit_upper) - return EquidistantGrid{length(size), eltype(limit_lower)}(size, limit_lower, limit_upper) -end - -""" - EquidistantGrid{T}() - -Constructs a 0-dimensional grid. -""" -EquidistantGrid{T}() where T = EquidistantGrid{0,T}((),(),()) # Convenience constructor for 0-dim grid - -""" - EquidistantGrid(size::Int, limit_lower::T, limit_upper::T) - -Convenience constructor for 1D grids. -""" -function EquidistantGrid(size::Int, limit_lower::T, limit_upper::T) where T - return EquidistantGrid((size,),(limit_lower,),(limit_upper,)) -end - -Base.eltype(grid::EquidistantGrid{Dim,T}) where {Dim,T} = T - -Base.eachindex(grid::EquidistantGrid) = CartesianIndices(grid.size) - -Base.size(g::EquidistantGrid) = g.size - -""" - dimension(grid::EquidistantGrid) - -The dimension of the grid. -""" -dimension(grid::EquidistantGrid{Dim}) where Dim = Dim - -""" - spacing(grid::EquidistantGrid) - -The spacing between grid points. -""" -spacing(grid::EquidistantGrid) = (grid.limit_upper.-grid.limit_lower)./(grid.size.-1) - -""" - inverse_spacing(grid::EquidistantGrid) - -The reciprocal of the spacing between grid points. -""" -inverse_spacing(grid::EquidistantGrid) = 1 ./ spacing(grid) - -""" - points(grid::EquidistantGrid) - -The point of the grid as an array of tuples with the same dimension as the grid. -The points are stored as [(x1,y1), (x1,y2), … (x1,yn); - (x2,y1), (x2,y2), … (x2,yn); - ⋮ ⋮ ⋮ - (xm,y1), (xm,y2), … (xm,yn)] -""" -function points(grid::EquidistantGrid) - indices = Tuple.(CartesianIndices(grid.size)) - h = spacing(grid) - return broadcast(I -> grid.limit_lower .+ (I.-1).*h, indices) -end - -""" - restrict(::EquidistantGrid, dim) - -Pick out given dimensions from the grid and return a grid for them. -""" -function restrict(grid::EquidistantGrid, dim) - size = grid.size[dim] - limit_lower = grid.limit_lower[dim] - limit_upper = grid.limit_upper[dim] - - return EquidistantGrid(size, limit_lower, limit_upper) -end - -""" - boundary_identifiers(::EquidistantGrid) - -Returns a tuple containing the boundary identifiers for the grid, stored as - (CartesianBoundary(1,Lower), - CartesianBoundary(1,Upper), - CartesianBoundary(2,Lower), - ...) -""" -boundary_identifiers(g::EquidistantGrid) = (((ntuple(i->(CartesianBoundary{i,Lower}(),CartesianBoundary{i,Upper}()),dimension(g)))...)...,) - - -""" - boundary_grid(grid::EquidistantGrid,id::CartesianBoundary) - boundary_grid(::EquidistantGrid{1},::CartesianBoundary{1}) - -Creates the lower-dimensional restriciton of `grid` spanned by the dimensions -orthogonal to the boundary specified by `id`. The boundary grid of a 1-dimensional -grid is a zero-dimensional grid. -""" -function boundary_grid(grid::EquidistantGrid,id::CartesianBoundary) - dims = collect(1:dimension(grid)) - orth_dims = dims[dims .!= dim(id)] - if orth_dims == dims - throw(DomainError("boundary identifier not matching grid")) - end - return restrict(grid,orth_dims) -end -boundary_grid(::EquidistantGrid{1,T},::CartesianBoundary{1}) where T = EquidistantGrid{T}() - - -""" - refine(grid::EquidistantGrid, r::Int) - -Refines `grid` by a factor `r`. The factor is applied to the number of -intervals which is 1 less than the size of the grid. - -See also: [`coarsen`](@ref) -""" -function refine(grid::EquidistantGrid, r::Int) - sz = size(grid) - new_sz = (sz .- 1).*r .+ 1 - return EquidistantGrid{dimension(grid), eltype(grid)}(new_sz, grid.limit_lower, grid.limit_upper) -end - -""" - coarsen(grid::EquidistantGrid, r::Int) - -Coarsens `grid` by a factor `r`. The factor is applied to the number of -intervals which is 1 less than the size of the grid. If the number of -intervals are not divisible by `r` an error is raised. - -See also: [`refine`](@ref) -""" -function coarsen(grid::EquidistantGrid, r::Int) - sz = size(grid) - - if !all(n -> (n % r == 0), sz.-1) - throw(DomainError(r, "Size minus 1 must be divisible by the ratio.")) - end - - new_sz = (sz .- 1).÷r .+ 1 - - return EquidistantGrid{dimension(grid), eltype(grid)}(new_sz, grid.limit_lower, grid.limit_upper) -end
--- a/src/Grids/Grids.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/Grids/Grids.jl Fri Feb 03 22:14:47 2023 +0100 @@ -2,18 +2,30 @@ using Sbplib.RegionIndices -export BoundaryIdentifier, CartesianBoundary +# Grid +export Grid +export dims +export points +export evalOn -abstract type BoundaryIdentifier end -struct CartesianBoundary{Dim, R<:Region} <: BoundaryIdentifier end -dim(::CartesianBoundary{Dim, R}) where {Dim, R} = Dim -region(::CartesianBoundary{Dim, R}) where {Dim, R} = R() +# BoundaryIdentifier +export BoundaryIdentifier +export CartesianBoundary +export dim +export region -export dim, region +# EquidistantGrid +export EquidistantGrid +export spacing +export inverse_spacing +export restrict +export boundary_identifiers +export boundary_grid +export refine +export coarsen -include("AbstractGrid.jl") -include("EquidistantGrid.jl") - -# TODO: Rename AbstractGrid to Grid and move definition here. +include("grid.jl") +include("boundary_identifier.jl") +include("equidistant_grid.jl") end # module
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Grids/boundary_identifier.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,6 @@ + +abstract type BoundaryIdentifier end + +struct CartesianBoundary{Dim, R<:Region} <: BoundaryIdentifier end +dim(::CartesianBoundary{Dim, R}) where {Dim, R} = Dim +region(::CartesianBoundary{Dim, R}) where {Dim, R} = R() \ No newline at end of file
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Grids/equidistant_grid.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,191 @@ + +""" + EquidistantGrid{Dim,T<:Real} <: Grid + +`Dim`-dimensional equidistant grid with coordinates of type `T`. +""" +struct EquidistantGrid{Dim,T<:Real} <: Grid + size::NTuple{Dim, Int} + limit_lower::NTuple{Dim, T} + limit_upper::NTuple{Dim, T} + + function EquidistantGrid{Dim,T}(size::NTuple{Dim, Int}, limit_lower::NTuple{Dim, T}, limit_upper::NTuple{Dim, T}) where {Dim,T} + if any(size .<= 0) + throw(DomainError("all components of size must be postive")) + end + if any(limit_upper.-limit_lower .<= 0) + throw(DomainError("all side lengths must be postive")) + end + return new{Dim,T}(size, limit_lower, limit_upper) + end +end + + +""" + EquidistantGrid(size, limit_lower, limit_upper) + +Construct an equidistant grid with corners at the coordinates `limit_lower` and +`limit_upper`. + +The length of the domain sides are given by the components of +`limit_upper-limit_lower`. E.g for a 2D grid with `limit_lower=(-1,0)` and `limit_upper=(1,2)` the domain is defined +as `(-1,1)x(0,2)`. The side lengths of the grid are not allowed to be negative. + +The number of equidistantly spaced points in each coordinate direction are given +by the tuple `size`. +""" +function EquidistantGrid(size, limit_lower, limit_upper) + return EquidistantGrid{length(size), eltype(limit_lower)}(size, limit_lower, limit_upper) +end + + +""" + EquidistantGrid{T}() + +Constructs a 0-dimensional grid. +""" +EquidistantGrid{T}() where T = EquidistantGrid{0,T}((),(),()) # Convenience constructor for 0-dim grid + + +""" + EquidistantGrid(size::Int, limit_lower::T, limit_upper::T) + +Convenience constructor for 1D grids. +""" +function EquidistantGrid(size::Int, limit_lower::T, limit_upper::T) where T + return EquidistantGrid((size,),(limit_lower,),(limit_upper,)) +end + +Base.eltype(grid::EquidistantGrid{Dim,T}) where {Dim,T} = T + +Base.eachindex(grid::EquidistantGrid) = CartesianIndices(grid.size) + +Base.size(g::EquidistantGrid) = g.size + +Base.ndims(::EquidistantGrid{Dim}) where Dim = Dim + + + + + +""" + spacing(grid::EquidistantGrid) + +The spacing between grid points. +""" +spacing(grid::EquidistantGrid) = (grid.limit_upper.-grid.limit_lower)./(grid.size.-1) + + +""" + inverse_spacing(grid::EquidistantGrid) + +The reciprocal of the spacing between grid points. +""" +inverse_spacing(grid::EquidistantGrid) = 1 ./ spacing(grid) + + +""" + points(grid::EquidistantGrid) + +The point of the grid as an array of tuples with the same dimension as the grid. +The points are stored as [(x1,y1), (x1,y2), … (x1,yn); + (x2,y1), (x2,y2), … (x2,yn); + ⋮ ⋮ ⋮ + (xm,y1), (xm,y2), … (xm,yn)] +""" +function points(grid::EquidistantGrid) + indices = Tuple.(CartesianIndices(grid.size)) + h = spacing(grid) + return broadcast(I -> grid.limit_lower .+ (I.-1).*h, indices) +end + + +""" + restrict(::EquidistantGrid, dim) + +Pick out given dimensions from the grid and return a grid for them. +""" +function restrict(grid::EquidistantGrid, dim) + size = grid.size[dim] + limit_lower = grid.limit_lower[dim] + limit_upper = grid.limit_upper[dim] + + return EquidistantGrid(size, limit_lower, limit_upper) +end + + +""" + orthogonal_dims(grid::EquidistantGrid,dim) + +Returns the dimensions of grid orthogonal to that of dim. +""" +function orthogonal_dims(grid::EquidistantGrid, dim) + orth_dims = filter(i -> i != dim, dims(grid)) + if orth_dims == dims(grid) + throw(DomainError(string("dimension ",string(dim)," not matching grid"))) + end + return orth_dims +end + + +""" + boundary_identifiers(::EquidistantGrid) + +Returns a tuple containing the boundary identifiers for the grid, stored as + (CartesianBoundary(1,Lower), + CartesianBoundary(1,Upper), + CartesianBoundary(2,Lower), + ...) +""" +boundary_identifiers(g::EquidistantGrid) = (((ntuple(i->(CartesianBoundary{i,Lower}(),CartesianBoundary{i,Upper}()),ndims(g)))...)...,) + + +""" + boundary_grid(grid::EquidistantGrid, id::CartesianBoundary) + +Creates the lower-dimensional restriciton of `grid` spanned by the dimensions +orthogonal to the boundary specified by `id`. The boundary grid of a 1-dimensional +grid is a zero-dimensional grid. +""" +function boundary_grid(grid::EquidistantGrid, id::CartesianBoundary) + orth_dims = orthogonal_dims(grid, dim(id)) + return restrict(grid, orth_dims) +end +boundary_grid(::EquidistantGrid{1,T},::CartesianBoundary{1}) where T = EquidistantGrid{T}() + + +""" + refine(grid::EquidistantGrid, r::Int) + +Refines `grid` by a factor `r`. The factor is applied to the number of +intervals which is 1 less than the size of the grid. + +See also: [`coarsen`](@ref) +""" +function refine(grid::EquidistantGrid, r::Int) + sz = size(grid) + new_sz = (sz .- 1).*r .+ 1 + return EquidistantGrid{ndims(grid), eltype(grid)}(new_sz, grid.limit_lower, grid.limit_upper) +end + + +""" + coarsen(grid::EquidistantGrid, r::Int) + +Coarsens `grid` by a factor `r`. The factor is applied to the number of +intervals which is 1 less than the size of the grid. If the number of +intervals are not divisible by `r` an error is raised. + +See also: [`refine`](@ref) +""" +function coarsen(grid::EquidistantGrid, r::Int) + sz = size(grid) + + if !all(n -> (n % r == 0), sz.-1) + throw(DomainError(r, "Size minus 1 must be divisible by the ratio.")) + end + + new_sz = (sz .- 1).÷r .+ 1 + + return EquidistantGrid{ndims(grid), eltype(grid)}(new_sz, grid.limit_lower, grid.limit_upper) +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/Grids/grid.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,27 @@ +""" + Grid + +Should implement + Base.ndims(grid::Grid) + points(grid::Grid) + +""" +abstract type Grid end +function points end + +""" + dims(grid::Grid) + +A range containing the dimensions of `grid` +""" +dims(grid::Grid) = 1:ndims(grid) + +""" + evalOn(grid::Grid, f::Function) + +Evaluate function `f` on `grid` +""" +function evalOn(grid::Grid, f::Function) + F(x) = f(x...) + return F.(points(grid)) +end
--- a/src/LazyTensors/LazyTensors.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/LazyTensors/LazyTensors.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,7 +1,39 @@ module LazyTensors + using Sbplib.RegionIndices -include("tensor_mapping.jl") + +export TensorApplication +export TensorTranspose +export TensorComposition +export IdentityTensor +export ScalingTensor +export DiagonalTensor +export DenseTensor +export InflatedTensor +export LazyOuterProduct +export ⊗ +export DomainSizeMismatch +export RangeSizeMismatch + +include("lazy_tensor.jl") +include("tensor_types.jl") include("lazy_array.jl") include("lazy_tensor_operations.jl") +include("tuple_manipulation.jl") + +# Applying lazy tensors to vectors +Base.:*(a::LazyTensor, v::AbstractArray) = TensorApplication(a,v) +Base.:*(a::LazyTensor, b::LazyTensor) = throw(MethodError(Base.:*,(a,b))) +Base.:*(a::LazyTensor, args::Union{LazyTensor, AbstractArray}...) = foldr(*,(a,args...)) + +# Addition and subtraction of lazy tensors +Base.:+(s::LazyTensor, t::LazyTensor) = ElementwiseTensorOperation{:+}(s,t) +Base.:-(s::LazyTensor, t::LazyTensor) = ElementwiseTensorOperation{:-}(s,t) + +# Composing lazy tensors +Base.:∘(s::LazyTensor, t::LazyTensor) = TensorComposition(s,t) + +# Outer products of tensors +⊗(a::LazyTensor, b::LazyTensor) = LazyOuterProduct(a,b) end # module
--- a/src/LazyTensors/lazy_array.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/LazyTensors/lazy_array.jl Fri Feb 03 22:14:47 2023 +0100 @@ -28,7 +28,7 @@ export LazyFunctionArray function LazyFunctionArray(f::F, size::NTuple{D,Int}) where {F<:Function,D} - T = typeof(f(ones(D)...)) + T = typeof(f(ones(Int, D)...)) return LazyFunctionArray{F,T,D}(f,size) end @@ -36,7 +36,7 @@ function Base.getindex(lfa::LazyFunctionArray{F,T,D}, I::Vararg{Int,D}) where {F,T,D} @boundscheck checkbounds(lfa, I...) - return lfa.f(I...) + return @inbounds lfa.f(I...) end @@ -61,25 +61,17 @@ end LazyElementwiseOperation{T,D,Op}(a::AbstractArray{T,D},b::T) where {T,D,Op} = LazyElementwiseOperation{T,D,Op}(a, LazyConstantArray(b, size(a))) LazyElementwiseOperation{T,D,Op}(a::T,b::AbstractArray{T,D}) where {T,D,Op} = LazyElementwiseOperation{T,D,Op}(LazyConstantArray(a, size(b)), b) -# TODO: Move Op to be the first parameter? Compare to Binary operations Base.size(v::LazyElementwiseOperation) = size(v.a) -evaluate(leo::LazyElementwiseOperation{T,D,:+}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] + leo.b[I...] -evaluate(leo::LazyElementwiseOperation{T,D,:-}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] - leo.b[I...] -evaluate(leo::LazyElementwiseOperation{T,D,:*}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] * leo.b[I...] -evaluate(leo::LazyElementwiseOperation{T,D,:/}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] / leo.b[I...] +Base.@propagate_inbounds evaluate(leo::LazyElementwiseOperation{T,D,:+}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] + leo.b[I...] +Base.@propagate_inbounds evaluate(leo::LazyElementwiseOperation{T,D,:-}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] - leo.b[I...] +Base.@propagate_inbounds evaluate(leo::LazyElementwiseOperation{T,D,:*}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] * leo.b[I...] +Base.@propagate_inbounds evaluate(leo::LazyElementwiseOperation{T,D,:/}, I::Vararg{Int,D}) where {T,D} = leo.a[I...] / leo.b[I...] -# TODO: Make sure boundschecking is done properly and that the lenght of the vectors are equal -# NOTE: Boundschecking in getindex functions now assumes that the size of the -# vectors in the LazyElementwiseOperation are the same size. If we remove the -# size assertion in the constructor we might have to handle -# boundschecking differently. -Base.@propagate_inbounds @inline function Base.getindex(leo::LazyElementwiseOperation{T,D}, I::Vararg{Int,D}) where {T,D} - @boundscheck if !checkbounds(Bool, leo.a, I...) - throw(BoundsError([leo], I...)) - end - return evaluate(leo, I...) +function Base.getindex(leo::LazyElementwiseOperation{T,D}, I::Vararg{Int,D}) where {T,D} + @boundscheck checkbounds(leo.a, I...) + return @inbounds evaluate(leo, I...) end # Define lazy operations for AbstractArrays. Operations constructs a LazyElementwiseOperation which @@ -101,17 +93,22 @@ -# NOTE: Är det knas att vi har till exempel * istället för .* ?? -# Oklart om det ens går att lösa.. +# Overload +,-,*,/ for LazyArrays +# Element wise operation for `*` and `/` are not overloaded for due to conflicts with the behavior +# of regular `*` and `/` for AbstractArrays. Use tilde versions instead. Base.@propagate_inbounds Base.:+(a::LazyArray{T,D}, b::LazyArray{T,D}) where {T,D} = a +̃ b +Base.@propagate_inbounds Base.:-(a::LazyArray{T,D}, b::LazyArray{T,D}) where {T,D} = a -̃ b + Base.@propagate_inbounds Base.:+(a::LazyArray{T,D}, b::AbstractArray{T,D}) where {T,D} = a +̃ b -Base.@propagate_inbounds Base.:+(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = a +̃ b +Base.@propagate_inbounds Base.:-(a::LazyArray{T,D}, b::AbstractArray{T,D}) where {T,D} = a -̃ b -Base.@propagate_inbounds Base.:-(a::LazyArray{T,D}, b::LazyArray{T,D}) where {T,D} = a -̃ b -Base.@propagate_inbounds Base.:-(a::LazyArray{T,D}, b::AbstractArray{T,D}) where {T,D} = a -̃ b +Base.@propagate_inbounds Base.:+(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = a +̃ b Base.@propagate_inbounds Base.:-(a::AbstractArray{T,D}, b::LazyArray{T,D}) where {T,D} = a -̃ b -# Element wise operation for `*` and `\` are not overloaded due to conflicts with the behavior -# of regular `*` and `/` for AbstractArrays. Use tilde versions instead. +Base.@propagate_inbounds Base.:+(a::LazyArray{T,D}, b::T) where {T,D} = a +̃ b +Base.@propagate_inbounds Base.:-(a::LazyArray{T,D}, b::T) where {T,D} = a -̃ b + +Base.@propagate_inbounds Base.:+(a::T, b::LazyArray{T,D}) where {T,D} = a +̃ b +Base.@propagate_inbounds Base.:-(a::T, b::LazyArray{T,D}) where {T,D} = a -̃ b export +̃, -̃, *̃, /̃
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/LazyTensors/lazy_tensor.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,79 @@ +export LazyTensor +export apply +export apply_transpose +export range_dim, domain_dim +export range_size, domain_size + +""" + LazyTensor{T,R,D} + +Describes a mapping of a `D` dimension tensor to an `R` dimension tensor. +The action of the mapping is implemented through the method +```julia + apply(t::LazyTensor{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg) where {R,D,T} +``` + +The size of the range and domain that the operator works with should be returned by +the functions +```julia + range_size(::LazyTensor) + domain_size(::LazyTensor) +``` +to allow querying for one or the other. + +Optionally the action of the transpose may be defined through +```julia + apply_transpose(t::LazyTensor{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} +``` +""" +abstract type LazyTensor{T,R,D} end + +""" + apply(t::LazyTensor{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg) where {R,D,T} + +Return the result of the mapping for a given index. +""" +function apply end + +""" + apply_transpose(t::LazyTensor{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg) where {R,D,T} + +Return the result of the transposed mapping for a given index. +""" +function apply_transpose end + +""" + range_dim(::LazyTensor) +Return the dimension of the range space of a given mapping +""" +range_dim(::LazyTensor{T,R,D}) where {T,R,D} = R + +""" + domain_dim(::LazyTensor) +Return the dimension of the domain space of a given mapping +""" +domain_dim(::LazyTensor{T,R,D}) where {T,R,D} = D + + +""" + range_size(M::LazyTensor) + +Return the range size for the mapping. +""" +function range_size end + +""" + domain_size(M::LazyTensor) + +Return the domain size for the mapping. +""" +function domain_size end + + +""" + eltype(::LazyTensor{T}) + +The type of elements the LazyTensor acts on. +""" +Base.eltype(::LazyTensor{T}) where T = T +
--- a/src/LazyTensors/lazy_tensor_operations.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/LazyTensors/lazy_tensor_operations.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,202 +1,138 @@ """ - LazyTensorMappingApplication{T,R,D} <: LazyArray{T,R} + TensorApplication{T,R,D} <: LazyArray{T,R} -Struct for lazy application of a TensorMapping. Created using `*`. +Struct for lazy application of a LazyTensor. Created using `*`. -Allows the result of a `TensorMapping` applied to a vector to be treated as an `AbstractArray`. -With a mapping `m` and a vector `v` the LazyTensorMappingApplication object can be created by `m*v`. +Allows the result of a `LazyTensor` applied to a vector to be treated as an `AbstractArray`. +With a mapping `m` and a vector `v` the TensorApplication object can be created by `m*v`. The actual result will be calcualted when indexing into `m*v`. """ -struct LazyTensorMappingApplication{T,R,D, TM<:TensorMapping{T,R,D}, AA<:AbstractArray{T,D}} <: LazyArray{T,R} +struct TensorApplication{T,R,D, TM<:LazyTensor{<:Any,R,D}, AA<:AbstractArray{<:Any,D}} <: LazyArray{T,R} t::TM o::AA -end -# TODO: Do boundschecking on creation! -export LazyTensorMappingApplication - -Base.getindex(ta::LazyTensorMappingApplication{T,R}, I::Vararg{Any,R}) where {T,R} = apply(ta.t, ta.o, I...) -Base.size(ta::LazyTensorMappingApplication) = range_size(ta.t) -# TODO: What else is needed to implement the AbstractArray interface? -Base.:*(a::TensorMapping, v::AbstractArray) = LazyTensorMappingApplication(a,v) -Base.:*(a::TensorMapping, b::TensorMapping) = throw(MethodError(Base.:*,(a,b))) -Base.:*(a::TensorMapping, args::Union{TensorMapping, AbstractArray}...) = foldr(*,(a,args...)) + function TensorApplication(t::LazyTensor{<:Any,R,D}, o::AbstractArray{<:Any,D}) where {R,D} + @boundscheck check_domain_size(t, size(o)) + I = ntuple(i->1, range_dim(t)) + T = typeof(apply(t,o,I...)) + return new{T,R,D,typeof(t), typeof(o)}(t,o) + end +end -# # We need the associativity to be a→b→c = a→(b→c), which is the case for '→' -# # Should we overload some other infix binary opesrator? -# →(tm::TensorMapping{T,R,D}, o::AbstractArray{T,D}) where {T,R,D} = LazyTensorMappingApplication(tm,o) -# TODO: We need to be really careful about good error messages. -# For example what happens if you try to multiply LazyTensorMappingApplication with a TensorMapping(wrong order)? +function Base.getindex(ta::TensorApplication{T,R}, I::Vararg{Any,R}) where {T,R} + @boundscheck checkbounds(ta, Int.(I)...) + return @inbounds apply(ta.t, ta.o, I...) +end +Base.@propagate_inbounds Base.getindex(ta::TensorApplication{T,1} where T, I::CartesianIndex{1}) = ta[Tuple(I)...] # Would otherwise be caught in the previous method. +Base.size(ta::TensorApplication) = range_size(ta.t) + """ - LazyTensorMappingTranspose{T,R,D} <: TensorMapping{T,D,R} + TensorTranspose{T,R,D} <: LazyTensor{T,D,R} -Struct for lazy transpose of a TensorMapping. +Struct for lazy transpose of a LazyTensor. If a mapping implements the the `apply_transpose` method this allows working with -the transpose of mapping `m` by using `m'`. `m'` will work as a regular TensorMapping lazily calling +the transpose of mapping `m` by using `m'`. `m'` will work as a regular LazyTensor lazily calling the appropriate methods of `m`. """ -struct LazyTensorMappingTranspose{T,R,D, TM<:TensorMapping{T,R,D}} <: TensorMapping{T,D,R} +struct TensorTranspose{T,R,D, TM<:LazyTensor{T,R,D}} <: LazyTensor{T,D,R} tm::TM end -export LazyTensorMappingTranspose # # TBD: Should this be implemented on a type by type basis or through a trait to provide earlier errors? -# Jonatan 2020-09-25: Is the problem that you can take the transpose of any TensorMapping even if it doesn't implement `apply_transpose`? -Base.adjoint(tm::TensorMapping) = LazyTensorMappingTranspose(tm) -Base.adjoint(tmt::LazyTensorMappingTranspose) = tmt.tm +# Jonatan 2020-09-25: Is the problem that you can take the transpose of any LazyTensor even if it doesn't implement `apply_transpose`? +Base.adjoint(tm::LazyTensor) = TensorTranspose(tm) +Base.adjoint(tmt::TensorTranspose) = tmt.tm -apply(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) -apply_transpose(tmt::LazyTensorMappingTranspose{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmt.tm, v, I...) +apply(tmt::TensorTranspose{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D} = apply_transpose(tmt.tm, v, I...) +apply_transpose(tmt::TensorTranspose{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmt.tm, v, I...) -range_size(tmt::LazyTensorMappingTranspose) = domain_size(tmt.tm) -domain_size(tmt::LazyTensorMappingTranspose) = range_size(tmt.tm) +range_size(tmt::TensorTranspose) = domain_size(tmt.tm) +domain_size(tmt::TensorTranspose) = range_size(tmt.tm) -struct LazyTensorMappingBinaryOperation{Op,T,R,D,T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} <: TensorMapping{T,D,R} +struct ElementwiseTensorOperation{Op,T,R,D,T1<:LazyTensor{T,R,D},T2<:LazyTensor{T,R,D}} <: LazyTensor{T,D,R} tm1::T1 tm2::T2 - @inline function LazyTensorMappingBinaryOperation{Op,T,R,D}(tm1::T1,tm2::T2) where {Op,T,R,D, T1<:TensorMapping{T,R,D},T2<:TensorMapping{T,R,D}} + function ElementwiseTensorOperation{Op,T,R,D}(tm1::T1,tm2::T2) where {Op,T,R,D, T1<:LazyTensor{T,R,D},T2<:LazyTensor{T,R,D}} + @boundscheck check_domain_size(tm2, domain_size(tm1)) + @boundscheck check_range_size(tm2, range_size(tm1)) return new{Op,T,R,D,T1,T2}(tm1,tm2) end end -# TODO: Boundschecking in constructor. -apply(tmBinOp::LazyTensorMappingBinaryOperation{:+,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) -apply(tmBinOp::LazyTensorMappingBinaryOperation{:-,T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) +ElementwiseTensorOperation{Op}(s,t) where Op = ElementwiseTensorOperation{Op,eltype(s), range_dim(s), domain_dim(s)}(s,t) -range_size(tmBinOp::LazyTensorMappingBinaryOperation) = range_size(tmBinOp.tm1) -domain_size(tmBinOp::LazyTensorMappingBinaryOperation) = domain_size(tmBinOp.tm1) +apply(tmBinOp::ElementwiseTensorOperation{:+,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) + apply(tmBinOp.tm2, v, I...) +apply(tmBinOp::ElementwiseTensorOperation{:-,T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} = apply(tmBinOp.tm1, v, I...) - apply(tmBinOp.tm2, v, I...) -Base.:+(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:+,T,R,D}(tm1,tm2) -Base.:-(tm1::TensorMapping{T,R,D}, tm2::TensorMapping{T,R,D}) where {T,R,D} = LazyTensorMappingBinaryOperation{:-,T,R,D}(tm1,tm2) +range_size(tmBinOp::ElementwiseTensorOperation) = range_size(tmBinOp.tm1) +domain_size(tmBinOp::ElementwiseTensorOperation) = domain_size(tmBinOp.tm1) + """ - TensorMappingComposition{T,R,K,D} + TensorComposition{T,R,K,D} -Lazily compose two `TensorMapping`s, so that they can be handled as a single `TensorMapping`. +Lazily compose two `LazyTensor`s, so that they can be handled as a single `LazyTensor`. """ -struct TensorMappingComposition{T,R,K,D, TM1<:TensorMapping{T,R,K}, TM2<:TensorMapping{T,K,D}} <: TensorMapping{T,R,D} +struct TensorComposition{T,R,K,D, TM1<:LazyTensor{T,R,K}, TM2<:LazyTensor{T,K,D}} <: LazyTensor{T,R,D} t1::TM1 t2::TM2 - @inline function TensorMappingComposition(t1::TensorMapping{T,R,K}, t2::TensorMapping{T,K,D}) where {T,R,K,D} + function TensorComposition(t1::LazyTensor{T,R,K}, t2::LazyTensor{T,K,D}) where {T,R,K,D} @boundscheck check_domain_size(t1, range_size(t2)) return new{T,R,K,D, typeof(t1), typeof(t2)}(t1,t2) end end -export TensorMappingComposition -range_size(tm::TensorMappingComposition) = range_size(tm.t1) -domain_size(tm::TensorMappingComposition) = domain_size(tm.t2) +range_size(tm::TensorComposition) = range_size(tm.t1) +domain_size(tm::TensorComposition) = domain_size(tm.t2) -function apply(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,K,D} +function apply(c::TensorComposition{T,R,K,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,K,D} apply(c.t1, c.t2*v, I...) end -function apply_transpose(c::TensorMappingComposition{T,R,K,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,K,D} +function apply_transpose(c::TensorComposition{T,R,K,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,K,D} apply_transpose(c.t2, c.t1'*v, I...) end -Base.@propagate_inbounds Base.:∘(s::TensorMapping, t::TensorMapping) = TensorMappingComposition(s,t) - """ - LazyLinearMap{T,R,D,...}(A, range_indicies, domain_indicies) - -TensorMapping defined by the AbstractArray A. `range_indicies` and `domain_indicies` define which indicies of A should -be considerd the range and domain of the TensorMapping. Each set of indices must be ordered in ascending order. - -For instance, if A is a m x n matrix, and range_size = (1,), domain_size = (2,), then the LazyLinearMap performs the -standard matrix-vector product on vectors of size n. -""" -struct LazyLinearMap{T,R,D, RD, AA<:AbstractArray{T,RD}} <: TensorMapping{T,R,D} - A::AA - range_indicies::NTuple{R,Int} - domain_indicies::NTuple{D,Int} - - function LazyLinearMap(A::AA, range_indicies::NTuple{R,Int}, domain_indicies::NTuple{D,Int}) where {T,R,D, RD, AA<:AbstractArray{T,RD}} - if !issorted(range_indicies) || !issorted(domain_indicies) - throw(DomainError("range_indicies and domain_indicies must be sorted in ascending order")) - end - - return new{T,R,D,RD,AA}(A,range_indicies,domain_indicies) - end -end -export LazyLinearMap - -range_size(llm::LazyLinearMap) = size(llm.A)[[llm.range_indicies...]] -domain_size(llm::LazyLinearMap) = size(llm.A)[[llm.domain_indicies...]] + TensorComposition(tm, tmi::IdentityTensor) + TensorComposition(tmi::IdentityTensor, tm) -function apply(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} - view_index = ntuple(i->:,ndims(llm.A)) - for i ∈ 1:R - view_index = Base.setindex(view_index, Int(I[i]), llm.range_indicies[i]) - end - A_view = @view llm.A[view_index...] - return sum(A_view.*v) -end - -function apply_transpose(llm::LazyLinearMap{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,D} - apply(LazyLinearMap(llm.A, llm.domain_indicies, llm.range_indicies), v, I...) -end - - -""" - IdentityMapping{T,D} <: TensorMapping{T,D,D} - -The lazy identity TensorMapping for a given size. Usefull for building up higher dimensional tensor mappings from lower -dimensional ones through outer products. Also used in the Implementation for InflatedTensorMapping. +Composes a `Tensormapping` `tm` with an `IdentityTensor` `tmi`, by returning `tm` """ -struct IdentityMapping{T,D} <: TensorMapping{T,D,D} - size::NTuple{D,Int} -end -export IdentityMapping - -IdentityMapping{T}(size::NTuple{D,Int}) where {T,D} = IdentityMapping{T,D}(size) -IdentityMapping{T}(size::Vararg{Int,D}) where {T,D} = IdentityMapping{T,D}(size) -IdentityMapping(size::Vararg{Int,D}) where D = IdentityMapping{Float64,D}(size) - -range_size(tmi::IdentityMapping) = tmi.size -domain_size(tmi::IdentityMapping) = tmi.size - -apply(tmi::IdentityMapping{T,D}, v::AbstractArray{T,D}, I::Vararg{Any,D}) where {T,D} = v[I...] -apply_transpose(tmi::IdentityMapping{T,D}, v::AbstractArray{T,D}, I::Vararg{Any,D}) where {T,D} = v[I...] - -""" - Base.:∘(tm, tmi) - Base.:∘(tmi, tm) - -Composes a `Tensormapping` `tm` with an `IdentityMapping` `tmi`, by returning `tm` -""" -@inline function Base.:∘(tm::TensorMapping{T,R,D}, tmi::IdentityMapping{T,D}) where {T,R,D} +function TensorComposition(tm::LazyTensor{T,R,D}, tmi::IdentityTensor{T,D}) where {T,R,D} @boundscheck check_domain_size(tm, range_size(tmi)) return tm end -@inline function Base.:∘(tmi::IdentityMapping{T,R}, tm::TensorMapping{T,R,D}) where {T,R,D} +function TensorComposition(tmi::IdentityTensor{T,R}, tm::LazyTensor{T,R,D}) where {T,R,D} @boundscheck check_domain_size(tmi, range_size(tm)) return tm end -# Specialization for the case where tm is an IdentityMapping. Required to resolve ambiguity. -@inline function Base.:∘(tm::IdentityMapping{T,D}, tmi::IdentityMapping{T,D}) where {T,D} +# Specialization for the case where tm is an IdentityTensor. Required to resolve ambiguity. +function TensorComposition(tm::IdentityTensor{T,D}, tmi::IdentityTensor{T,D}) where {T,D} @boundscheck check_domain_size(tm, range_size(tmi)) return tmi end +Base.:*(a::T, tm::LazyTensor{T}) where T = TensorComposition(ScalingTensor{T,range_dim(tm)}(a,range_size(tm)), tm) +Base.:*(tm::LazyTensor{T}, a::T) where T = a*tm """ - InflatedTensorMapping{T,R,D} <: TensorMapping{T,R,D} + InflatedTensor{T,R,D} <: LazyTensor{T,R,D} -An inflated `TensorMapping` with dimensions added before and afer its actual dimensions. +An inflated `LazyTensor` with dimensions added before and afer its actual dimensions. """ -struct InflatedTensorMapping{T,R,D,D_before,R_middle,D_middle,D_after, TM<:TensorMapping{T,R_middle,D_middle}} <: TensorMapping{T,R,D} - before::IdentityMapping{T,D_before} +struct InflatedTensor{T,R,D,D_before,R_middle,D_middle,D_after, TM<:LazyTensor{T,R_middle,D_middle}} <: LazyTensor{T,R,D} + before::IdentityTensor{T,D_before} tm::TM - after::IdentityMapping{T,D_after} + after::IdentityTensor{T,D_after} - function InflatedTensorMapping(before, tm::TensorMapping{T}, after) where T + function InflatedTensor(before, tm::LazyTensor{T}, after) where T R_before = range_dim(before) R_middle = range_dim(tm) R_after = range_dim(after) @@ -209,37 +145,37 @@ return new{T,R,D,D_before,R_middle,D_middle,D_after, typeof(tm)}(before, tm, after) end end -export InflatedTensorMapping + """ - InflatedTensorMapping(before, tm, after) - InflatedTensorMapping(before,tm) - InflatedTensorMapping(tm,after) + InflatedTensor(before, tm, after) + InflatedTensor(before,tm) + InflatedTensor(tm,after) -The outer product of `before`, `tm` and `after`, where `before` and `after` are `IdentityMapping`s. +The outer product of `before`, `tm` and `after`, where `before` and `after` are `IdentityTensor`s. -If one of `before` or `after` is left out, a 0-dimensional `IdentityMapping` is used as the default value. +If one of `before` or `after` is left out, a 0-dimensional `IdentityTensor` is used as the default value. -If `tm` already is an `InflatedTensorMapping`, `before` and `after` will be extended instead of -creating a nested `InflatedTensorMapping`. +If `tm` already is an `InflatedTensor`, `before` and `after` will be extended instead of +creating a nested `InflatedTensor`. """ -InflatedTensorMapping(::IdentityMapping, ::TensorMapping, ::IdentityMapping) +InflatedTensor(::IdentityTensor, ::LazyTensor, ::IdentityTensor) -function InflatedTensorMapping(before, itm::InflatedTensorMapping, after) - return InflatedTensorMapping( - IdentityMapping(before.size..., itm.before.size...), +function InflatedTensor(before, itm::InflatedTensor, after) + return InflatedTensor( + IdentityTensor(before.size..., itm.before.size...), itm.tm, - IdentityMapping(itm.after.size..., after.size...), + IdentityTensor(itm.after.size..., after.size...), ) end -InflatedTensorMapping(before::IdentityMapping, tm::TensorMapping{T}) where T = InflatedTensorMapping(before,tm,IdentityMapping{T}()) -InflatedTensorMapping(tm::TensorMapping{T}, after::IdentityMapping) where T = InflatedTensorMapping(IdentityMapping{T}(),tm,after) +InflatedTensor(before::IdentityTensor, tm::LazyTensor{T}) where T = InflatedTensor(before,tm,IdentityTensor{T}()) +InflatedTensor(tm::LazyTensor{T}, after::IdentityTensor) where T = InflatedTensor(IdentityTensor{T}(),tm,after) # Resolve ambiguity between the two previous methods -InflatedTensorMapping(I1::IdentityMapping{T}, I2::IdentityMapping{T}) where T = InflatedTensorMapping(I1,I2,IdentityMapping{T}()) +InflatedTensor(I1::IdentityTensor{T}, I2::IdentityTensor{T}) where T = InflatedTensor(I1,I2,IdentityTensor{T}()) -# TODO: Implement some pretty printing in terms of ⊗. E.g InflatedTensorMapping(I(3),B,I(2)) -> I(3)⊗B⊗I(2) +# TODO: Implement some pretty printing in terms of ⊗. E.g InflatedTensor(I(3),B,I(2)) -> I(3)⊗B⊗I(2) -function range_size(itm::InflatedTensorMapping) +function range_size(itm::InflatedTensor) return flatten_tuple( range_size(itm.before), range_size(itm.tm), @@ -247,7 +183,7 @@ ) end -function domain_size(itm::InflatedTensorMapping) +function domain_size(itm::InflatedTensor) return flatten_tuple( domain_size(itm.before), domain_size(itm.tm), @@ -255,7 +191,7 @@ ) end -function apply(itm::InflatedTensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg{Any,R}) where {T,R,D} +function apply(itm::InflatedTensor{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} dim_before = range_dim(itm.before) dim_domain = domain_dim(itm.tm) dim_range = range_dim(itm.tm) @@ -267,7 +203,7 @@ return apply(itm.tm, v_inner, inner_index...) end -function apply_transpose(itm::InflatedTensorMapping{T,R,D}, v::AbstractArray{T,R}, I::Vararg{Any,D}) where {T,R,D} +function apply_transpose(itm::InflatedTensor{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D} dim_before = range_dim(itm.before) dim_domain = domain_dim(itm.tm) dim_range = range_dim(itm.tm) @@ -280,87 +216,10 @@ end -""" - split_index(::Val{dim_before}, ::Val{dim_view}, ::Val{dim_index}, ::Val{dim_after}, I...) - -Splits the multi-index `I` into two parts. One part which is expected to be -used as a view, and one which is expected to be used as an index. -Eg. -``` -split_index(Val(1),Val(3),Val(2),Val(1),(1,2,3,4)) -> (1,:,:,:,4), (2,3) -``` - -`dim_view` controls how many colons are in the view, and `dim_index` controls -how many elements are extracted from the middle. -`dim_before` and `dim_after` decides the length of the index parts before and after the colons in the view index. - -Arguments should satisfy `length(I) == dim_before+B_domain+dim_after`. - -The returned values satisfy - * `length(view_index) == dim_before + dim_view + dim_after` - * `length(I_middle) == dim_index` -""" -function split_index(::Val{dim_before}, ::Val{dim_view}, ::Val{dim_index}, ::Val{dim_after}, I...) where {dim_before,dim_view, dim_index,dim_after} - I_before, I_middle, I_after = split_tuple(I, Val(dim_before), Val(dim_index)) - - view_index = (I_before..., ntuple((i)->:, dim_view)..., I_after...) - - return view_index, I_middle -end - -# TODO: Can this be replaced by something more elegant while still being type stable? 2020-10-21 -# See: -# https://github.com/JuliaLang/julia/issues/34884 -# https://github.com/JuliaLang/julia/issues/30386 -""" - slice_tuple(t, Val(l), Val(u)) - -Get a slice of a tuple in a type stable way. -Equivalent to `t[l:u]` but type stable. -""" -function slice_tuple(t,::Val{L},::Val{U}) where {L,U} - return ntuple(i->t[i+L-1], U-L+1) -end - -""" - split_tuple(t::Tuple{...}, ::Val{M}) where {N,M} - -Split the tuple `t` into two parts. the first part is `M` long. -E.g -```julia -split_tuple((1,2,3,4),Val(3)) -> (1,2,3), (4,) -``` -""" -function split_tuple(t::NTuple{N,Any},::Val{M}) where {N,M} - return slice_tuple(t,Val(1), Val(M)), slice_tuple(t,Val(M+1), Val(N)) -end - -""" - split_tuple(t::Tuple{...},::Val{M},::Val{K}) where {N,M,K} - -Same as `split_tuple(t::NTuple{N},::Val{M})` but splits the tuple in three parts. With the first -two parts having lenght `M` and `K`. -""" -function split_tuple(t::NTuple{N,Any},::Val{M},::Val{K}) where {N,M,K} - p1, tail = split_tuple(t, Val(M)) - p2, p3 = split_tuple(tail, Val(K)) - return p1,p2,p3 -end - - -""" - flatten_tuple(t) - -Takes a nested tuple and flattens the whole structure -""" -flatten_tuple(t::NTuple{N, Number} where N) = t -flatten_tuple(t::Tuple) = ((flatten_tuple.(t)...)...,) # simplify? -flatten_tuple(ts::Vararg) = flatten_tuple(ts) - @doc raw""" LazyOuterProduct(tms...) -Creates a `TensorMappingComposition` for the outerproduct of `tms...`. +Creates a `TensorComposition` for the outerproduct of `tms...`. This is done by separating the outer product into regular products of outer products involving only identity mappings and one non-identity mapping. First let @@ -395,42 +254,78 @@ ``` """ function LazyOuterProduct end -export LazyOuterProduct -function LazyOuterProduct(tm1::TensorMapping{T}, tm2::TensorMapping{T}) where T - itm1 = InflatedTensorMapping(tm1, IdentityMapping{T}(range_size(tm2))) - itm2 = InflatedTensorMapping(IdentityMapping{T}(domain_size(tm1)),tm2) +function LazyOuterProduct(tm1::LazyTensor{T}, tm2::LazyTensor{T}) where T + itm1 = InflatedTensor(tm1, IdentityTensor{T}(range_size(tm2))) + itm2 = InflatedTensor(IdentityTensor{T}(domain_size(tm1)),tm2) return itm1∘itm2 end -LazyOuterProduct(t1::IdentityMapping{T}, t2::IdentityMapping{T}) where T = IdentityMapping{T}(t1.size...,t2.size...) -LazyOuterProduct(t1::TensorMapping, t2::IdentityMapping) = InflatedTensorMapping(t1, t2) -LazyOuterProduct(t1::IdentityMapping, t2::TensorMapping) = InflatedTensorMapping(t1, t2) +LazyOuterProduct(t1::IdentityTensor{T}, t2::IdentityTensor{T}) where T = IdentityTensor{T}(t1.size...,t2.size...) +LazyOuterProduct(t1::LazyTensor, t2::IdentityTensor) = InflatedTensor(t1, t2) +LazyOuterProduct(t1::IdentityTensor, t2::LazyTensor) = InflatedTensor(t1, t2) -LazyOuterProduct(tms::Vararg{TensorMapping}) = foldl(LazyOuterProduct, tms) +LazyOuterProduct(tms::Vararg{LazyTensor}) = foldl(LazyOuterProduct, tms) -⊗(a::TensorMapping, b::TensorMapping) = LazyOuterProduct(a,b) -export ⊗ -function check_domain_size(tm::TensorMapping, sz) +""" + inflate(tm::LazyTensor, sz, dir) + +Inflate `tm` such that it gets the size `sz` in all directions except `dir`. +Here `sz[dir]` is ignored and replaced with the range and domains size of +`tm`. + +An example of when this operation is useful is when extending a one +dimensional difference operator `D` to a 2D grid of a ceratin size. In that +case we could have + +```julia +Dx = inflate(D, (10,10), 1) +Dy = inflate(D, (10,10), 2) +``` +""" +function inflate(tm::LazyTensor, sz, dir) + Is = IdentityTensor{eltype(tm)}.(sz) + parts = Base.setindex(Is, tm, dir) + return foldl(⊗, parts) +end + +function check_domain_size(tm::LazyTensor, sz) if domain_size(tm) != sz - throw(SizeMismatch(tm,sz)) + throw(DomainSizeMismatch(tm,sz)) end end -struct SizeMismatch <: Exception - tm::TensorMapping +function check_range_size(tm::LazyTensor, sz) + if range_size(tm) != sz + throw(RangeSizeMismatch(tm,sz)) + end +end + +struct DomainSizeMismatch <: Exception + tm::LazyTensor sz end -export SizeMismatch + +function Base.showerror(io::IO, err::DomainSizeMismatch) + print(io, "DomainSizeMismatch: ") + print(io, "domain size $(domain_size(err.tm)) of LazyTensor not matching size $(err.sz)") +end + -function Base.showerror(io::IO, err::SizeMismatch) - print(io, "SizeMismatch: ") - print(io, "domain size $(domain_size(err.tm)) of TensorMapping not matching size $(err.sz)") +struct RangeSizeMismatch <: Exception + tm::LazyTensor + sz end +function Base.showerror(io::IO, err::RangeSizeMismatch) + print(io, "RangeSizeMismatch: ") + print(io, "range size $(range_size(err.tm)) of LazyTensor not matching size $(err.sz)") +end + + function apply_with_region(op, v, boundary_width::Integer, dim_size::Integer, i) if 0 < i <= boundary_width return LazyTensors.apply(op,v,Index(i,Lower)) @@ -455,3 +350,4 @@ error("Bounds error") # TODO: Make this more standard end end +
--- a/src/LazyTensors/tensor_mapping.jl Mon Feb 21 10:33:58 2022 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,79 +0,0 @@ -""" - TensorMapping{T,R,D} - -Describes a mapping of a `D` dimension tensor to an `R` dimension tensor. -The action of the mapping is implemented through the method -```julia - apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} -``` - -The size of the range and domain that the operator works with should be returned by -the functions -```julia - range_size(::TensorMapping) - domain_size(::TensorMapping) -``` -to allow querying for one or the other. - -Optionally the action of the transpose may be defined through -```julia - apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} -``` -""" -abstract type TensorMapping{T,R,D} end -export TensorMapping - -""" - apply(t::TensorMapping{T,R,D}, v::AbstractArray{T,D}, I::Vararg) where {R,D,T} - -Return the result of the mapping for a given index. -""" -function apply end -export apply - -""" - apply_transpose(t::TensorMapping{T,R,D}, v::AbstractArray{T,R}, I::Vararg) where {R,D,T} - -Return the result of the transposed mapping for a given index. -""" -function apply_transpose end -export apply_transpose - -""" - range_dim(::TensorMapping) -Return the dimension of the range space of a given mapping -""" -range_dim(::TensorMapping{T,R,D}) where {T,R,D} = R - -""" - domain_dim(::TensorMapping) -Return the dimension of the domain space of a given mapping -""" -domain_dim(::TensorMapping{T,R,D}) where {T,R,D} = D - -export range_dim, domain_dim - -""" - range_size(M::TensorMapping) - -Return the range size for the mapping. -""" -function range_size end - -""" - domain_size(M::TensorMapping) - -Return the domain size for the mapping. -""" -function domain_size end - -export range_size, domain_size - -""" - eltype(::TensorMapping{T}) - -The type of elements the TensorMapping acts on. -""" -Base.eltype(::TensorMapping{T}) where T = T - -# TODO: Think about boundschecking!
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/LazyTensors/tensor_types.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,94 @@ +""" + IdentityTensor{T,D} <: LazyTensor{T,D,D} + +The lazy identity LazyTensor for a given size. Usefull for building up higher dimensional tensor mappings from lower +dimensional ones through outer products. Also used in the Implementation for InflatedTensor. +""" +struct IdentityTensor{T,D} <: LazyTensor{T,D,D} + size::NTuple{D,Int} +end + +IdentityTensor{T}(size::NTuple{D,Int}) where {T,D} = IdentityTensor{T,D}(size) +IdentityTensor{T}(size::Vararg{Int,D}) where {T,D} = IdentityTensor{T,D}(size) +IdentityTensor(size::Vararg{Int,D}) where D = IdentityTensor{Float64,D}(size) + +range_size(tmi::IdentityTensor) = tmi.size +domain_size(tmi::IdentityTensor) = tmi.size + +apply(tmi::IdentityTensor{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = v[I...] +apply_transpose(tmi::IdentityTensor{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = v[I...] + + +""" + ScalingTensor{T,D} <: LazyTensor{T,D,D} + +A lazy tensor that scales its input with `λ`. +""" +struct ScalingTensor{T,D} <: LazyTensor{T,D,D} + λ::T + size::NTuple{D,Int} +end + +LazyTensors.apply(tm::ScalingTensor{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = tm.λ*v[I...] +LazyTensors.apply_transpose(tm::ScalingTensor{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = tm.λ*v[I...] + +LazyTensors.range_size(m::ScalingTensor) = m.size +LazyTensors.domain_size(m::ScalingTensor) = m.size + + +""" + DiagonalTensor{T,D,...} <: LazyTensor{T,D,D} + DiagonalTensor(a::AbstractArray) + +A lazy tensor with diagonal `a`. +""" +struct DiagonalTensor{T,D,AT<:AbstractArray{T,D}} <: LazyTensor{T,D,D} + diagonal::AT +end + +range_size(tm::DiagonalTensor) = size(tm.diagonal) +domain_size(tm::DiagonalTensor) = size(tm.diagonal) + + +LazyTensors.apply(tm::DiagonalTensor{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = tm.diagonal[I...]*v[I...] +LazyTensors.apply_transpose(tm::DiagonalTensor{T,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,D}) where {T,D} = tm.diagonal[I...]*v[I...] + + +""" + DenseTensor{T,R,D,...}(A, range_indicies, domain_indicies) + +LazyTensor defined by the AbstractArray A. `range_indicies` and `domain_indicies` define which indicies of A should +be considerd the range and domain of the LazyTensor. Each set of indices must be ordered in ascending order. + +For instance, if A is a m x n matrix, and range_size = (1,), domain_size = (2,), then the DenseTensor performs the +standard matrix-vector product on vectors of size n. +""" +struct DenseTensor{T,R,D, RD, AA<:AbstractArray{T,RD}} <: LazyTensor{T,R,D} + A::AA + range_indicies::NTuple{R,Int} + domain_indicies::NTuple{D,Int} + + function DenseTensor(A::AA, range_indicies::NTuple{R,Int}, domain_indicies::NTuple{D,Int}) where {T,R,D, RD, AA<:AbstractArray{T,RD}} + if !issorted(range_indicies) || !issorted(domain_indicies) + throw(DomainError("range_indicies and domain_indicies must be sorted in ascending order")) + end + + return new{T,R,D,RD,AA}(A,range_indicies,domain_indicies) + end +end + +range_size(llm::DenseTensor) = size(llm.A)[[llm.range_indicies...]] +domain_size(llm::DenseTensor) = size(llm.A)[[llm.domain_indicies...]] + +function apply(llm::DenseTensor{T,R,D}, v::AbstractArray{<:Any,D}, I::Vararg{Any,R}) where {T,R,D} + view_index = ntuple(i->:,ndims(llm.A)) + for i ∈ 1:R + view_index = Base.setindex(view_index, Int(I[i]), llm.range_indicies[i]) + end + A_view = @view llm.A[view_index...] + return sum(A_view.*v) +end + +function apply_transpose(llm::DenseTensor{T,R,D}, v::AbstractArray{<:Any,R}, I::Vararg{Any,D}) where {T,R,D} + apply(DenseTensor(llm.A, llm.domain_indicies, llm.range_indicies), v, I...) +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/LazyTensors/tuple_manipulation.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,76 @@ +""" + split_index(::Val{dim_before}, ::Val{dim_view}, ::Val{dim_index}, ::Val{dim_after}, I...) + +Splits the multi-index `I` into two parts. One part which is expected to be +used as a view, and one which is expected to be used as an index. +Eg. +``` +split_index(Val(1),Val(3),Val(2),Val(1),(1,2,3,4)) -> (1,:,:,:,4), (2,3) +``` + +`dim_view` controls how many colons are in the view, and `dim_index` controls +how many elements are extracted from the middle. +`dim_before` and `dim_after` decides the length of the index parts before and after the colons in the view index. + +Arguments should satisfy `length(I) == dim_before+B_domain+dim_after`. + +The returned values satisfy + * `length(view_index) == dim_before + dim_view + dim_after` + * `length(I_middle) == dim_index` +""" +function split_index(::Val{dim_before}, ::Val{dim_view}, ::Val{dim_index}, ::Val{dim_after}, I...) where {dim_before,dim_view, dim_index,dim_after} + I_before, I_middle, I_after = split_tuple(I, Val(dim_before), Val(dim_index)) + + view_index = (I_before..., ntuple((i)->:, dim_view)..., I_after...) + + return view_index, I_middle +end + +# TODO: Can this be replaced by something more elegant while still being type stable? 2020-10-21 +# See: +# https://github.com/JuliaLang/julia/issues/34884 +# https://github.com/JuliaLang/julia/issues/30386 +""" + slice_tuple(t, Val(l), Val(u)) + +Get a slice of a tuple in a type stable way. +Equivalent to `t[l:u]` but type stable. +""" +function slice_tuple(t,::Val{L},::Val{U}) where {L,U} + return ntuple(i->t[i+L-1], U-L+1) +end + +""" + split_tuple(t::Tuple{...}, ::Val{M}) where {N,M} + +Split the tuple `t` into two parts. the first part is `M` long. +E.g +```julia +split_tuple((1,2,3,4),Val(3)) -> (1,2,3), (4,) +``` +""" +function split_tuple(t::NTuple{N,Any},::Val{M}) where {N,M} + return slice_tuple(t,Val(1), Val(M)), slice_tuple(t,Val(M+1), Val(N)) +end + +""" + split_tuple(t::Tuple{...},::Val{M},::Val{K}) where {N,M,K} + +Same as `split_tuple(t::NTuple{N},::Val{M})` but splits the tuple in three parts. With the first +two parts having lenght `M` and `K`. +""" +function split_tuple(t::NTuple{N,Any},::Val{M},::Val{K}) where {N,M,K} + p1, tail = split_tuple(t, Val(M)) + p2, p3 = split_tuple(tail, Val(K)) + return p1,p2,p3 +end + + +""" + flatten_tuple(t) + +Takes a nested tuple and flattens the whole structure +""" +flatten_tuple(t::NTuple{N, Number} where N) = t +flatten_tuple(t::Tuple) = ((flatten_tuple.(t)...)...,) # simplify? +flatten_tuple(ts::Vararg) = flatten_tuple(ts)
--- a/src/SbpOperators/SbpOperators.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/SbpOperators.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,5 +1,25 @@ module SbpOperators +# Stencil set +export StencilSet +export read_stencil_set +export get_stencil_set +export parse_stencil +export parse_scalar +export parse_tuple +export sbp_operators_path + +# Operators +export boundary_quadrature +export boundary_restriction +export inner_product +export inverse_inner_product +export Laplace +export laplace +export normal_derivative +export first_derivative +export second_derivative + using Sbplib.RegionIndices using Sbplib.LazyTensors using Sbplib.Grids @@ -10,9 +30,10 @@ end include("stencil.jl") -include("readoperator.jl") +include("stencil_set.jl") include("volumeops/volume_operator.jl") include("volumeops/constant_interior_scaling_operator.jl") +include("volumeops/derivatives/first_derivative.jl") include("volumeops/derivatives/second_derivative.jl") include("volumeops/laplace/laplace.jl") include("volumeops/inner_products/inner_product.jl")
--- a/src/SbpOperators/boundaryops/boundary_operator.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/boundaryops/boundary_operator.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,48 +1,17 @@ """ - boundary_operator(grid,closure_stencil,boundary) - -Creates a boundary operator on a `Dim`-dimensional grid for the -specified `boundary`. The action of the operator is determined by `closure_stencil`. - -When `Dim=1`, the corresponding `BoundaryOperator` tensor mapping is returned. -When `Dim>1`, the `BoundaryOperator` `op` is inflated by the outer product -of `IdentityMappings` in orthogonal coordinate directions, e.g for `Dim=3`, -the boundary restriction operator in the y-direction direction is `Ix⊗op⊗Iz`. -""" -function boundary_operator(grid::EquidistantGrid, closure_stencil, boundary::CartesianBoundary) - #TODO:Check that dim(boundary) <= Dim? + BoundaryOperator{T,R,N} <: LazyTensor{T,0,1} - # Create 1D boundary operator - r = region(boundary) - d = dim(boundary) - op = BoundaryOperator(restrict(grid, d), closure_stencil, r) - - # Create 1D IdentityMappings for each coordinate direction - one_d_grids = restrict.(Ref(grid), Tuple(1:dimension(grid))) - Is = IdentityMapping{eltype(grid)}.(size.(one_d_grids)) - - # Formulate the correct outer product sequence of the identity mappings and - # the boundary operator - parts = Base.setindex(Is, op, d) - return foldl(⊗, parts) -end - -""" - BoundaryOperator{T,R,N} <: TensorMapping{T,0,1} - -Implements the boundary operator `op` for 1D as a `TensorMapping` +Implements the boundary operator `op` for 1D as a `LazyTensor` `op` is the restriction of a grid function to the boundary using some closure `Stencil{T,N}`. The boundary to restrict to is determined by `R`. `op'` is the prolongation of a zero dimensional array to the whole grid using the same closure stencil. """ -struct BoundaryOperator{T,R<:Region,N} <: TensorMapping{T,0,1} +struct BoundaryOperator{T,R<:Region,N} <: LazyTensor{T,0,1} stencil::Stencil{T,N} size::Int end -BoundaryOperator{R}(stencil::Stencil{T,N}, size::Int) where {T,R,N} = BoundaryOperator{T,R,N}(stencil, size) - """ BoundaryOperator(grid::EquidistantGrid{1}, closure_stencil, region) @@ -55,6 +24,7 @@ """ closure_size(::BoundaryOperator) + The size of the closure stencil. """ closure_size(::BoundaryOperator{T,R,N}) where {T,R,N} = N @@ -62,27 +32,27 @@ LazyTensors.range_size(op::BoundaryOperator) = () LazyTensors.domain_size(op::BoundaryOperator) = (op.size,) -function LazyTensors.apply(op::BoundaryOperator{T,Lower}, v::AbstractVector{T}) where T +function LazyTensors.apply(op::BoundaryOperator{<:Any,Lower}, v::AbstractVector) apply_stencil(op.stencil,v,1) end -function LazyTensors.apply(op::BoundaryOperator{T,Upper}, v::AbstractVector{T}) where T +function LazyTensors.apply(op::BoundaryOperator{<:Any,Upper}, v::AbstractVector) apply_stencil_backwards(op.stencil,v,op.size) end -function LazyTensors.apply_transpose(op::BoundaryOperator{T,Lower}, v::AbstractArray{T,0}, i::Index{Lower}) where T +function LazyTensors.apply_transpose(op::BoundaryOperator{<:Any,Lower}, v::AbstractArray{<:Any,0}, i::Index{Lower}) return op.stencil[Int(i)-1]*v[] end -function LazyTensors.apply_transpose(op::BoundaryOperator{T,Upper}, v::AbstractArray{T,0}, i::Index{Upper}) where T +function LazyTensors.apply_transpose(op::BoundaryOperator{<:Any,Upper}, v::AbstractArray{<:Any,0}, i::Index{Upper}) return op.stencil[op.size[1] - Int(i)]*v[] end # Catch all combinations of Lower, Upper and Interior not caught by the two previous methods. -function LazyTensors.apply_transpose(op::BoundaryOperator{T}, v::AbstractArray{T,0}, i::Index) where T - return zero(T) +function LazyTensors.apply_transpose(op::BoundaryOperator, v::AbstractArray{<:Any,0}, i::Index) + return zero(eltype(v)) end -function LazyTensors.apply_transpose(op::BoundaryOperator{T}, v::AbstractArray{T,0}, i) where T +function LazyTensors.apply_transpose(op::BoundaryOperator, v::AbstractArray{<:Any,0}, i) return LazyTensors.apply_transpose_with_region(op, v, closure_size(op), op.size[1], i) end
--- a/src/SbpOperators/boundaryops/boundary_restriction.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/boundaryops/boundary_restriction.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,18 +1,25 @@ """ - boundary_restriction(grid::EquidistantGrid, closure_stencil::Stencil, boundary::CartesianBoundary) - boundary_restriction(grid::EquidistantGrid{1}, closure_stencil::Stencil, region::Region) + boundary_restriction(grid, closure_stencil::Stencil, boundary) -Creates the boundary restriction operator `e` as a `TensorMapping` +Creates boundary restriction operators `e` as `LazyTensor`s on `boundary` -`e` is the restriction of a grid function to the boundary specified by `boundary` or `region` using some `closure_stencil`. -`e'` is the prolongation of a grid function on the boundary to the whole grid using the same `closure_stencil`. +`e` is the restriction of a grid function to `boundary` using a `Stencil` `closure_stencil`. +`e'` is the prolongation of a grid function on `boundary` to the whole grid using the same `closure_stencil`. On a one-dimensional `grid`, `e` is a `BoundaryOperator`. On a multi-dimensional `grid`, `e` is the inflation of -a `BoundaryOperator`. Also see the documentation of `SbpOperators.boundary_operator(...)` for more details. +a `BoundaryOperator`. + +See also: [`BoundaryOperator`](@ref), [`LazyTensors.inflate`](@ref). """ -function boundary_restriction(grid::EquidistantGrid, closure_stencil, boundary::CartesianBoundary) +function boundary_restriction(grid, closure_stencil, boundary) converted_stencil = convert(Stencil{eltype(grid)}, closure_stencil) - return SbpOperators.boundary_operator(grid, converted_stencil, boundary) + + op = BoundaryOperator(restrict(grid, dim(boundary)), converted_stencil, region(boundary)) + return LazyTensors.inflate(op, size(grid), dim(boundary)) end -boundary_restriction(grid::EquidistantGrid{1}, closure_stencil, region::Region) = boundary_restriction(grid, closure_stencil, CartesianBoundary{1,typeof(region)}()) + +""" + boundary_restriction(grid, stencil_set, boundary) -export boundary_restriction +Creates a `boundary_restriction` operator on `grid` given a `stencil_set`. +""" +boundary_restriction(grid, stencil_set::StencilSet, boundary) = boundary_restriction(grid, parse_stencil(stencil_set["e"]["closure"]), boundary)
--- a/src/SbpOperators/boundaryops/normal_derivative.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/boundaryops/normal_derivative.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,18 +1,26 @@ """ - normal_derivative(grid::EquidistantGrid, closure_stencil::Stencil, boundary::CartesianBoundary) - normal_derivative(grid::EquidistantGrid{1}, closure_stencil::Stencil, region::Region) + normal_derivative(grid, closure_stencil::Stencil, boundary) -Creates the normal derivative boundary operator `d` as a `TensorMapping` +Creates the normal derivative boundary operator `d` as a `LazyTensor` -`d` is the normal derivative of a grid function at the boundary specified by `boundary` or `region` using some `closure_stencil`. +`d` computes the normal derivative of a grid function on `boundary` a `Stencil` `closure_stencil`. `d'` is the prolongation of the normal derivative of a grid function to the whole grid using the same `closure_stencil`. On a one-dimensional `grid`, `d` is a `BoundaryOperator`. On a multi-dimensional `grid`, `d` is the inflation of -a `BoundaryOperator`. Also see the documentation of `SbpOperators.boundary_operator(...)` for more details. +a `BoundaryOperator`. + +See also: [`BoundaryOperator`](@ref), [`LazyTensors.inflate`](@ref). """ -function normal_derivative(grid::EquidistantGrid, closure_stencil, boundary::CartesianBoundary) +function normal_derivative(grid, closure_stencil, boundary) direction = dim(boundary) h_inv = inverse_spacing(grid)[direction] - return SbpOperators.boundary_operator(grid, scale(closure_stencil,h_inv), boundary) + + op = BoundaryOperator(restrict(grid, dim(boundary)), scale(closure_stencil,h_inv), region(boundary)) + return LazyTensors.inflate(op, size(grid), dim(boundary)) end -normal_derivative(grid::EquidistantGrid{1}, closure_stencil, region::Region) = normal_derivative(grid, closure_stencil, CartesianBoundary{1,typeof(region)}()) -export normal_derivative + +""" + normal_derivative(grid, stencil_set, boundary) + +Creates a `normal_derivative` operator on `grid` given a `stencil_set`. +""" +normal_derivative(grid, stencil_set::StencilSet, boundary) = normal_derivative(grid, parse_stencil(stencil_set["d1"]["closure"]), boundary)
--- a/src/SbpOperators/operators/standard_diagonal.toml Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/operators/standard_diagonal.toml Fri Feb 03 22:14:47 2023 +0100 @@ -31,7 +31,7 @@ ] e.closure = ["1"] -d1.closure = {s = ["-3/2", "2", "-1/2"], c = 1} +d1.closure = {s = ["3/2", "-2", "1/2"], c = 1} [[stencil_set]] @@ -57,4 +57,4 @@ ] e.closure = ["1"] -d1.closure = {s = ["-11/6", "3", "-3/2", "1/3"], c = 1} +d1.closure = {s = ["11/6", "-3", "3/2", "-1/3"], c = 1}
--- a/src/SbpOperators/readoperator.jl Mon Feb 21 10:33:58 2022 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,160 +0,0 @@ -using TOML - -export read_stencil_set -export get_stencil_set - -export parse_stencil -export parse_scalar -export parse_tuple - -export sbp_operators_path - - -""" - read_stencil_set(filename; filters) - -Picks out a stencil set from a TOML file based on some key-value -filters. If more than one set matches the filters an error is raised. The -returned stencil set contains parsed TOML intended for functions like -`parse_scalar` and `parse_stencil`. - -The stencil set is not parsed beyond the inital TOML parse. To get usable -stencils use the `parse_stencil` functions on the fields of the stencil set. - -The reason for this is that since stencil sets are intended to be very -general, and currently do not include any way to specify how to parse a given -section, the exact parsing is left to the user. - -For more information see [Operator file format](@ref) in the documentation. - -See also [`sbp_operators_path`](@ref), [`get_stencil_set`](@ref), [`parse_stencil`](@ref), [`parse_scalar`](@ref), [`parse_tuple`](@ref),. -""" -read_stencil_set(filename; filters...) = get_stencil_set(TOML.parsefile(filename); filters...) - -""" - get_stencil_set(parsed_toml; filters...) - -Picks out a stencil set from an already parsed TOML based on some key-value -filters. - -See also [`read_stencil_set`](@ref). -""" -function get_stencil_set(parsed_toml; filters...) - matches = findall(parsed_toml["stencil_set"]) do set - for (key, val) ∈ filters - if set[string(key)] != val - return false - end - end - - return true - end - - if length(matches) != 1 - throw(ArgumentError("filters must pick out a single stencil set")) - end - - i = matches[1] - return parsed_toml["stencil_set"][i] -end - -""" - parse_stencil(parsed_toml) - -Accepts parsed TOML and reads it as a stencil. - -See also [`read_stencil_set`](@ref), [`parse_scalar`](@ref), [`parse_tuple`](@ref). -""" -function parse_stencil(parsed_toml) - check_stencil_toml(parsed_toml) - - if parsed_toml isa Array - weights = parse_rational.(parsed_toml) - return CenteredStencil(weights...) - end - - weights = parse_rational.(parsed_toml["s"]) - return Stencil(weights..., center = parsed_toml["c"]) -end - -""" - parse_stencil(T, parsed_toml) - -Parses the input as a stencil with element type `T`. -""" -parse_stencil(T, parsed_toml) = Stencil{T}(parse_stencil(parsed_toml)) - -function check_stencil_toml(parsed_toml) - if !(parsed_toml isa Dict || parsed_toml isa Vector{String}) - throw(ArgumentError("the TOML for a stencil must be a vector of strings or a table.")) - end - - if parsed_toml isa Vector{String} - return - end - - if !(haskey(parsed_toml, "s") && haskey(parsed_toml, "c")) - throw(ArgumentError("the table form of a stencil must have fields `s` and `c`.")) - end - - if !(parsed_toml["s"] isa Vector{String}) - throw(ArgumentError("a stencil must be specified as a vector of strings.")) - end - - if !(parsed_toml["c"] isa Int) - throw(ArgumentError("the center of a stencil must be specified as an integer.")) - end -end - -""" - parse_scalar(parsed_toml) - -Parse a scalar, represented as a string or a number in the TOML, and return it as a `Rational` - -See also [`read_stencil_set`](@ref), [`parse_stencil`](@ref) [`parse_tuple`](@ref). -""" -function parse_scalar(parsed_toml) - try - return parse_rational(parsed_toml) - catch e - throw(ArgumentError("must be a number or a string representing a number.")) - end -end - -""" - parse_tuple(parsed_toml) - -Parse an array as a tuple of scalars. - -See also [`read_stencil_set`](@ref), [`parse_stencil`](@ref), [`parse_scalar`](@ref). -""" -function parse_tuple(parsed_toml) - if !(parsed_toml isa Array) - throw(ArgumentError("argument must be an array")) - end - return Tuple(parse_scalar.(parsed_toml)) -end - - -""" - parse_rational(parsed_toml) - -Parse a string or a number as a rational. -""" -function parse_rational(parsed_toml) - if parsed_toml isa String - expr = Meta.parse(replace(parsed_toml, "/"=>"//")) - return eval(:(Rational($expr))) - else - return Rational(parsed_toml) - end -end - -""" - sbp_operators_path() - -Calculate the path for the operators folder with included stencil sets. - -See also [`read_stencil_set`](@ref) -""" -sbp_operators_path() = (@__DIR__) * "/operators/"
--- a/src/SbpOperators/stencil.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/stencil.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,11 +1,12 @@ export CenteredStencil +export CenteredNestedStencil struct Stencil{T,N} - range::Tuple{Int,Int} + range::UnitRange{Int64} weights::NTuple{N,T} - function Stencil(range::Tuple{Int,Int},weights::NTuple{N,T}) where {T, N} - @assert range[2]-range[1]+1 == N + function Stencil(range::UnitRange,weights::NTuple{N,T}) where {T, N} + @assert length(range) == N new{T,N}(range,weights) end end @@ -15,27 +16,30 @@ Create a stencil with the given weights with element `center` as the center of the stencil. """ -function Stencil(weights::Vararg{T}; center::Int) where T # Type parameter T makes sure the weights are valid for the Stencil constuctors and throws an earlier, more readable, error +function Stencil(weights...; center::Int) + weights = promote(weights...) N = length(weights) - range = (1, N) .- center + range = (1:N) .- center return Stencil(range, weights) end -function Stencil{T}(s::Stencil) where T - return Stencil(s.range, T.(s.weights)) -end +Stencil{T,N}(s::Stencil{S,N}) where {T,S,N} = Stencil(s.range, T.(s.weights)) +Stencil{T}(s::Stencil) where T = Stencil{T,length(s)}(s) -Base.convert(::Type{Stencil{T}}, stencil) where T = Stencil{T}(stencil) +Base.convert(::Type{Stencil{T1,N}}, s::Stencil{T2,N}) where {T1,T2,N} = Stencil{T1,N}(s) +Base.convert(::Type{Stencil{T1}}, s::Stencil{T2,N}) where {T1,T2,N} = Stencil{T1,N}(s) -function CenteredStencil(weights::Vararg) +Base.promote_rule(::Type{Stencil{T1,N}}, ::Type{Stencil{T2,N}}) where {T1,T2,N} = Stencil{promote_type(T1,T2),N} + +function CenteredStencil(weights...) if iseven(length(weights)) throw(ArgumentError("a centered stencil must have an odd number of weights.")) end r = length(weights) ÷ 2 - return Stencil((-r, r), weights) + return Stencil(-r:r, weights) end @@ -48,7 +52,8 @@ return Stencil(s.range, a.*s.weights) end -Base.eltype(::Stencil{T}) where T = T +Base.eltype(::Stencil{T,N}) where {T,N} = T +Base.length(::Stencil{T,N}) where {T,N} = N function flip(s::Stencil) range = (-s.range[2], -s.range[1]) @@ -57,24 +62,103 @@ # Provides index into the Stencil based on offset for the root element @inline function Base.getindex(s::Stencil, i::Int) - @boundscheck if i < s.range[1] || s.range[2] < i + @boundscheck if i ∉ s.range return zero(eltype(s)) end return s.weights[1 + i - s.range[1]] end -Base.@propagate_inbounds @inline function apply_stencil(s::Stencil{T,N}, v::AbstractVector, i::Int) where {T,N} - w = s.weights[1]*v[i + s.range[1]] - @simd for k ∈ 2:N - w += s.weights[k]*v[i + s.range[1] + k-1] +Base.@propagate_inbounds @inline function apply_stencil(s::Stencil, v::AbstractVector, i::Int) + w = zero(promote_type(eltype(s),eltype(v))) + @simd for k ∈ 1:length(s) + w += s.weights[k]*v[i + s.range[k]] + end + + return w +end + +Base.@propagate_inbounds @inline function apply_stencil_backwards(s::Stencil, v::AbstractVector, i::Int) + w = zero(promote_type(eltype(s),eltype(v))) + @simd for k ∈ length(s):-1:1 + w += s.weights[k]*v[i - s.range[k]] end return w end -Base.@propagate_inbounds @inline function apply_stencil_backwards(s::Stencil{T,N}, v::AbstractVector, i::Int) where {T,N} - w = s.weights[N]*v[i - s.range[2]] - @simd for k ∈ N-1:-1:1 - w += s.weights[k]*v[i - s.range[1] - k + 1] - end - return w + +struct NestedStencil{T,N,M} + s::Stencil{Stencil{T,N},M} +end + +# Stencil input +NestedStencil(s::Vararg{Stencil}; center) = NestedStencil(Stencil(s... ; center)) +CenteredNestedStencil(s::Vararg{Stencil}) = NestedStencil(CenteredStencil(s...)) + +# Tuple input +function NestedStencil(weights::Vararg{NTuple{N,Any}}; center) where N + inner_stencils = map(w -> Stencil(w...; center), weights) + return NestedStencil(Stencil(inner_stencils... ; center)) +end +function CenteredNestedStencil(weights::Vararg{NTuple{N,Any}}) where N + inner_stencils = map(w->CenteredStencil(w...), weights) + return CenteredNestedStencil(inner_stencils...) +end + + +# Conversion +function NestedStencil{T,N,M}(ns::NestedStencil{S,N,M}) where {T,S,N,M} + return NestedStencil(Stencil{Stencil{T}}(ns.s)) +end + +function NestedStencil{T}(ns::NestedStencil{S,N,M}) where {T,S,N,M} + NestedStencil{T,N,M}(ns) +end + +function Base.convert(::Type{NestedStencil{T,N,M}}, s::NestedStencil{S,N,M}) where {T,S,N,M} + return NestedStencil{T,N,M}(s) +end +Base.convert(::Type{NestedStencil{T}}, stencil) where T = NestedStencil{T}(stencil) + +function Base.promote_rule(::Type{NestedStencil{T,N,M}}, ::Type{NestedStencil{S,N,M}}) where {T,S,N,M} + return NestedStencil{promote_type(T,S),N,M} end + +Base.eltype(::NestedStencil{T}) where T = T + +function scale(ns::NestedStencil, a) + range = ns.s.range + weights = ns.s.weights + + return NestedStencil(Stencil(range, scale.(weights,a))) +end + +function flip(ns::NestedStencil) + s_flip = flip(ns.s) + return NestedStencil(Stencil(s_flip.range, flip.(s_flip.weights))) +end + +Base.getindex(ns::NestedStencil, i::Int) = ns.s[i] + +"Apply inner stencils to `c` and get a concrete stencil" +Base.@propagate_inbounds function apply_inner_stencils(ns::NestedStencil, c::AbstractVector, i::Int) + weights = apply_stencil.(ns.s.weights, Ref(c), i) + return Stencil(ns.s.range, weights) +end + +"Apply the whole nested stencil" +Base.@propagate_inbounds function apply_stencil(ns::NestedStencil, c::AbstractVector, v::AbstractVector, i::Int) + s = apply_inner_stencils(ns,c,i) + return apply_stencil(s, v, i) +end + +"Apply inner stencils backwards to `c` and get a concrete stencil" +Base.@propagate_inbounds @inline function apply_inner_stencils_backwards(ns::NestedStencil, c::AbstractVector, i::Int) + weights = apply_stencil_backwards.(ns.s.weights, Ref(c), i) + return Stencil(ns.s.range, weights) +end + +"Apply the whole nested stencil backwards" +Base.@propagate_inbounds @inline function apply_stencil_backwards(ns::NestedStencil, c::AbstractVector, v::AbstractVector, i::Int) + s = apply_inner_stencils_backwards(ns,c,i) + return apply_stencil_backwards(s, v, i) +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/stencil_set.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,164 @@ +using TOML + + +""" + StencilSet + +A `StencilSet` contains a set of associated stencils. The stencils +are are stored in a table, and can be accesed by indexing into the `StencilSet`. +""" +struct StencilSet + table +end +Base.getindex(set::StencilSet,I...) = set.table[I...] + + +""" + read_stencil_set(filename; filters) + +Creates a `StencilSet` from a TOML file based on some key-value +filters. If more than one set matches the filters an error is raised. The +table of the `StencilSet` is a parsed TOML intended for functions like +`parse_scalar` and `parse_stencil`. + +The `StencilSet` table is not parsed beyond the inital TOML parse. To get usable +stencils use the `parse_stencil` functions on the fields of the stencil set. + +The reason for this is that since stencil sets are intended to be very +general, and currently do not include any way to specify how to parse a given +section, the exact parsing is left to the user. + +For more information see [Operator file format](@ref) in the documentation. + +See also [`StencilSet`](@ref), [`sbp_operators_path`](@ref), [`get_stencil_set`](@ref), [`parse_stencil`](@ref), [`parse_scalar`](@ref), [`parse_tuple`](@ref). +""" +read_stencil_set(filename; filters...) = StencilSet(get_stencil_set(TOML.parsefile(filename); filters...)) + + +""" + get_stencil_set(parsed_toml; filters...) + +Picks out a stencil set from an already parsed TOML based on some key-value +filters. + +See also [`read_stencil_set`](@ref). +""" +function get_stencil_set(parsed_toml; filters...) + matches = findall(parsed_toml["stencil_set"]) do set + for (key, val) ∈ filters + if set[string(key)] != val + return false + end + end + + return true + end + + if length(matches) != 1 + throw(ArgumentError("filters must pick out a single stencil set")) + end + + i = matches[1] + return parsed_toml["stencil_set"][i] +end + +""" + parse_stencil(parsed_toml) + +Accepts parsed TOML and reads it as a stencil. + +See also [`read_stencil_set`](@ref), [`parse_scalar`](@ref), [`parse_tuple`](@ref). +""" +function parse_stencil(parsed_toml) + check_stencil_toml(parsed_toml) + + if parsed_toml isa Array + weights = parse_rational.(parsed_toml) + return CenteredStencil(weights...) + end + + weights = parse_rational.(parsed_toml["s"]) + return Stencil(weights..., center = parsed_toml["c"]) +end + +""" + parse_stencil(T, parsed_toml) + +Parses the input as a stencil with element type `T`. +""" +parse_stencil(T, parsed_toml) = Stencil{T}(parse_stencil(parsed_toml)) + +function check_stencil_toml(parsed_toml) + if !(parsed_toml isa Dict || parsed_toml isa Vector{String}) + throw(ArgumentError("the TOML for a stencil must be a vector of strings or a table.")) + end + + if parsed_toml isa Vector{String} + return + end + + if !(haskey(parsed_toml, "s") && haskey(parsed_toml, "c")) + throw(ArgumentError("the table form of a stencil must have fields `s` and `c`.")) + end + + if !(parsed_toml["s"] isa Vector{String}) + throw(ArgumentError("a stencil must be specified as a vector of strings.")) + end + + if !(parsed_toml["c"] isa Int) + throw(ArgumentError("the center of a stencil must be specified as an integer.")) + end +end + +""" + parse_scalar(parsed_toml) + +Parse a scalar, represented as a string or a number in the TOML, and return it as a `Rational` + +See also [`read_stencil_set`](@ref), [`parse_stencil`](@ref) [`parse_tuple`](@ref). +""" +function parse_scalar(parsed_toml) + try + return parse_rational(parsed_toml) + catch e + throw(ArgumentError("must be a number or a string representing a number.")) + end +end + +""" + parse_tuple(parsed_toml) + +Parse an array as a tuple of scalars. + +See also [`read_stencil_set`](@ref), [`parse_stencil`](@ref), [`parse_scalar`](@ref). +""" +function parse_tuple(parsed_toml) + if !(parsed_toml isa Array) + throw(ArgumentError("argument must be an array")) + end + return Tuple(parse_scalar.(parsed_toml)) +end + + +""" + parse_rational(parsed_toml) + +Parse a string or a number as a rational. +""" +function parse_rational(parsed_toml) + if parsed_toml isa String + expr = Meta.parse(replace(parsed_toml, "/"=>"//")) + return eval(:(Rational($expr))) + else + return Rational(parsed_toml) + end +end + +""" + sbp_operators_path() + +Calculate the path for the operators folder with included stencil sets. + +See also [`StencilSet`](@ref), [`read_stencil_set`](@ref). +""" +sbp_operators_path() = (@__DIR__) * "/operators/"
--- a/src/SbpOperators/volumeops/constant_interior_scaling_operator.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/volumeops/constant_interior_scaling_operator.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,10 +1,10 @@ """ - ConstantInteriorScalingOperator{T,N} <: TensorMapping{T,1,1} + ConstantInteriorScalingOperator{T,N} <: LazyTensor{T,1,1} A one-dimensional operator scaling a vector. The first and last `N` points are scaled with individual weights while all interior points are scaled the same. """ -struct ConstantInteriorScalingOperator{T,N} <: TensorMapping{T,1,1} +struct ConstantInteriorScalingOperator{T,N} <: LazyTensor{T,1,1} interior_weight::T closure_weights::NTuple{N,T} size::Int @@ -28,19 +28,19 @@ LazyTensors.domain_size(op::ConstantInteriorScalingOperator) = (op.size,) # TBD: @inbounds in apply methods? -function LazyTensors.apply(op::ConstantInteriorScalingOperator{T}, v::AbstractVector{T}, i::Index{Lower}) where T +function LazyTensors.apply(op::ConstantInteriorScalingOperator, v::AbstractVector, i::Index{Lower}) return op.closure_weights[Int(i)]*v[Int(i)] end -function LazyTensors.apply(op::ConstantInteriorScalingOperator{T}, v::AbstractVector{T}, i::Index{Interior}) where T +function LazyTensors.apply(op::ConstantInteriorScalingOperator, v::AbstractVector, i::Index{Interior}) return op.interior_weight*v[Int(i)] end -function LazyTensors.apply(op::ConstantInteriorScalingOperator{T}, v::AbstractVector{T}, i::Index{Upper}) where T +function LazyTensors.apply(op::ConstantInteriorScalingOperator, v::AbstractVector, i::Index{Upper}) return op.closure_weights[op.size[1]-Int(i)+1]*v[Int(i)] end -function LazyTensors.apply(op::ConstantInteriorScalingOperator{T}, v::AbstractVector{T}, i) where T +function LazyTensors.apply(op::ConstantInteriorScalingOperator, v::AbstractVector, i) return LazyTensors.apply_with_region(op, v, closure_size(op), op.size[1], i) end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/src/SbpOperators/volumeops/derivatives/first_derivative.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,48 @@ +""" + first_derivative(grid::EquidistantGrid, inner_stencil, closure_stencils, direction) + +Creates the first-derivative operator `D1` as a `LazyTensor` + +`D1` approximates the first-derivative d/dξ on `grid` along the coordinate dimension specified by +`direction`, using the stencil `inner_stencil` in the interior and a set of stencils `closure_stencils` +for the points in the closure regions. + +On a one-dimensional `grid`, `D1` is a `VolumeOperator`. On a multi-dimensional `grid`, `D1` is the inflation of +a `VolumeOperator`. + +See also: [`VolumeOperator`](@ref), [`LazyTensors.inflate`](@ref). +""" +function first_derivative(grid::EquidistantGrid, inner_stencil, closure_stencils, direction) + h_inv = inverse_spacing(grid)[direction] + + D₁ = VolumeOperator(restrict(grid, direction), scale(inner_stencil,h_inv), scale.(closure_stencils,h_inv), odd) + return LazyTensors.inflate(D₁, size(grid), direction) +end + + +""" + first_derivative(grid, inner_stencil, closure_stencils) + +Creates a `first_derivative` operator on a 1D `grid` given `inner_stencil` and `closure_stencils`. +""" +first_derivative(grid::EquidistantGrid{1}, inner_stencil::Stencil, closure_stencils) = first_derivative(grid, inner_stencil, closure_stencils, 1) + + +""" + first_derivative(grid, stencil_set::StencilSet, direction) + +Creates a `first_derivative` operator on `grid` along coordinate dimension `direction` given a `stencil_set`. +""" +function first_derivative(grid::EquidistantGrid, stencil_set::StencilSet, direction) + inner_stencil = parse_stencil(stencil_set["D1"]["inner_stencil"]) + closure_stencils = parse_stencil.(stencil_set["D1"]["closure_stencils"]) + first_derivative(grid,inner_stencil,closure_stencils,direction); +end + + +""" + first_derivative(grid, stencil_set) + +Creates a `first_derivative` operator on a 1D `grid` given a `stencil_set`. +""" +first_derivative(grid::EquidistantGrid{1}, stencil_set::StencilSet) = first_derivative(grid, stencil_set, 1)
--- a/src/SbpOperators/volumeops/derivatives/second_derivative.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/volumeops/derivatives/second_derivative.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,19 +1,48 @@ """ second_derivative(grid::EquidistantGrid, inner_stencil, closure_stencils, direction) -Creates the second-derivative operator `D2` as a `TensorMapping` +Creates the second-derivative operator `D2` as a `LazyTensor` `D2` approximates the second-derivative d²/dξ² on `grid` along the coordinate dimension specified by `direction`, using the stencil `inner_stencil` in the interior and a set of stencils `closure_stencils` for the points in the closure regions. -On a one-dimensional `grid`, `D2` is a `VolumeOperator`. On a multi-dimensional `grid`, `D2` is the outer product of the -one-dimensional operator with the `IdentityMapping`s in orthogonal coordinate dirrections. -Also see the documentation of `SbpOperators.volume_operator(...)` for more details. +On a one-dimensional `grid`, `D2` is a `VolumeOperator`. On a multi-dimensional `grid`, `D2` is the inflation of +a `VolumeOperator`. + +See also: [`VolumeOperator`](@ref), [`LazyTensors.inflate`](@ref). """ function second_derivative(grid::EquidistantGrid, inner_stencil, closure_stencils, direction) h_inv = inverse_spacing(grid)[direction] - return SbpOperators.volume_operator(grid, scale(inner_stencil,h_inv^2), scale.(closure_stencils,h_inv^2), even, direction) + + D₂ = VolumeOperator(restrict(grid, direction), scale(inner_stencil,h_inv^2), scale.(closure_stencils,h_inv^2), even) + return LazyTensors.inflate(D₂, size(grid), direction) end -second_derivative(grid::EquidistantGrid{1}, inner_stencil, closure_stencils) = second_derivative(grid,inner_stencil,closure_stencils,1) -export second_derivative + + +""" + second_derivative(grid, inner_stencil, closure_stencils) + +Creates a `second_derivative` operator on a 1D `grid` given `inner_stencil` and `closure_stencils`. +""" +second_derivative(grid::EquidistantGrid{1}, inner_stencil::Stencil, closure_stencils) = second_derivative(grid, inner_stencil, closure_stencils,1) + + +""" + second_derivative(grid, stencil_set, direction) + +Creates a `second_derivative` operator on `grid` along coordinate dimension `direction` given a `stencil_set`. +""" +function second_derivative(grid::EquidistantGrid, stencil_set::StencilSet, direction) + inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) + closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) + second_derivative(grid,inner_stencil,closure_stencils,direction); +end + + +""" + second_derivative(grid, stencil_set) + +Creates a `second_derivative` operator on a 1D `grid` given a `stencil_set`. +""" +second_derivative(grid::EquidistantGrid{1}, stencil_set::StencilSet) = second_derivative(grid, stencil_set, 1) \ No newline at end of file
--- a/src/SbpOperators/volumeops/inference_trouble.txt Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/volumeops/inference_trouble.txt Fri Feb 03 22:14:47 2023 +0100 @@ -39,15 +39,9 @@ g = EquidistantGrid(10,0., 1.) v = evalOn(g, (x)->x^2+1) H = inner_product(g, 1., [1/2]) - V = SbpOperators.volume_operator(g, Stencil(1.,center=1), (Stencil(1/2,center=1),), SbpOperators.even,1) - b = SbpOperators.boundary_operator(g, Stencil(1/2,center=1), CartesianBoundary{1,Lower}()) + V = SbpOperators.VolumeOperator(g, Stencil(1.,center=1), (Stencil(1/2,center=1),), SbpOperators.even) + b = SbpOperators.BoundaryOperator(g, Stencil(1/2,center=1), Lower()) end @code_warntype LazyTensors.apply(H, H*v, 2) @code_warntype LazyTensors.apply(V, V*v, 2) -@code_warntype LazyTensors.apply(b, b*v, 2) - - - -begin -end
--- a/src/SbpOperators/volumeops/inner_products/inner_product.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/volumeops/inner_products/inner_product.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,7 +1,7 @@ """ inner_product(grid::EquidistantGrid, interior_weight, closure_weights) -Creates the discrete inner product operator `H` as a `TensorMapping` on an +Creates the discrete inner product operator `H` as a `LazyTensor` on an equidistant grid, defined as `(u,v) = u'Hv` for grid functions `u,v`. `inner_product` creates `H` on `grid` using the `interior_weight` for the @@ -10,19 +10,20 @@ On a 1-dimensional grid, `H` is a `ConstantInteriorScalingOperator`. On a N-dimensional grid, `H` is the outer product of the 1-dimensional inner -product operators for each coordinate direction. Also see the documentation of -On a 0-dimensional grid, `H` is a 0-dimensional `IdentityMapping`. +product operators for each coordinate direction. On a 0-dimensional grid, +`H` is a 0-dimensional `IdentityTensor`. + +See also: [`ConstantInteriorScalingOperator`](@ref). """ function inner_product(grid::EquidistantGrid, interior_weight, closure_weights) Hs = () - for i ∈ 1:dimension(grid) + for i ∈ dims(grid) Hs = (Hs..., inner_product(restrict(grid, i), interior_weight, closure_weights)) end return foldl(⊗, Hs) end -export inner_product function inner_product(grid::EquidistantGrid{1}, interior_weight, closure_weights) h = spacing(grid)[1] @@ -31,4 +32,15 @@ return H end -inner_product(grid::EquidistantGrid{0}, interior_weight, closure_weights) = IdentityMapping{eltype(grid)}() +inner_product(grid::EquidistantGrid{0}, interior_weight, closure_weights) = IdentityTensor{eltype(grid)}() + +""" + inner_product(grid, stencil_set) + +Creates a `inner_product` operator on `grid` given a `stencil_set`. +""" +function inner_product(grid, stencil_set::StencilSet) + inner_stencil = parse_scalar(stencil_set["H"]["inner"]) + closure_stencils = parse_tuple(stencil_set["H"]["closure"]) + return inner_product(grid, inner_stencil, closure_stencils) +end
--- a/src/SbpOperators/volumeops/inner_products/inverse_inner_product.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/volumeops/inner_products/inverse_inner_product.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,19 +1,21 @@ """ inverse_inner_product(grid::EquidistantGrid, interior_weight, closure_weights) -Constructs the inverse inner product operator `H⁻¹` as a `TensorMapping` using +Constructs the inverse inner product operator `H⁻¹` as a `LazyTensor` using the weights of `H`, `interior_weight`, `closure_weights`. `H⁻¹` is inverse of the inner product operator `H`. On a 1-dimensional grid, `H⁻¹` is a `ConstantInteriorScalingOperator`. On an N-dimensional grid, `H⁻¹` is the outer product of the 1-dimensional inverse inner product operators for each coordinate direction. On a 0-dimensional -`grid`, `H⁻¹` is a 0-dimensional `IdentityMapping`. +`grid`, `H⁻¹` is a 0-dimensional `IdentityTensor`. + +See also: [`ConstantInteriorScalingOperator`](@ref). """ function inverse_inner_product(grid::EquidistantGrid, interior_weight, closure_weights) H⁻¹s = () - for i ∈ 1:dimension(grid) + for i ∈ dims(grid) H⁻¹s = (H⁻¹s..., inverse_inner_product(restrict(grid, i), interior_weight, closure_weights)) end @@ -25,6 +27,16 @@ H⁻¹ = SbpOperators.ConstantInteriorScalingOperator(grid, h⁻¹*1/interior_weight, h⁻¹./closure_weights) return H⁻¹ end -export inverse_inner_product + +inverse_inner_product(grid::EquidistantGrid{0}, interior_weight, closure_weights) = IdentityTensor{eltype(grid)}() + +""" + inverse_inner_product(grid, stencil_set) -inverse_inner_product(grid::EquidistantGrid{0}, interior_weight, closure_weights) = IdentityMapping{eltype(grid)}() +Creates a `inverse_inner_product` operator on `grid` given a `stencil_set`. +""" +function inverse_inner_product(grid, stencil_set::StencilSet) + inner_stencil = parse_scalar(stencil_set["H"]["inner"]) + closure_stencils = parse_tuple(stencil_set["H"]["closure"]) + return inverse_inner_product(grid, inner_stencil, closure_stencils) +end
--- a/src/SbpOperators/volumeops/laplace/laplace.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/volumeops/laplace/laplace.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,7 +1,40 @@ """ - laplace(grid::EquidistantGrid{Dim}, inner_stencil, closure_stencils) + Laplace{T, Dim, TM} <: LazyTensor{T, Dim, Dim} + +Implements the Laplace operator, approximating ∑d²/xᵢ² , i = 1,...,`Dim` as a +`LazyTensor`. Additionally `Laplace` stores the `StencilSet` +used to construct the `LazyTensor `. +""" +struct Laplace{T, Dim, TM<:LazyTensor{T, Dim, Dim}} <: LazyTensor{T, Dim, Dim} + D::TM # Difference operator + stencil_set::StencilSet # Stencil set of the operator +end + +""" + Laplace(grid::Equidistant, stencil_set) + +Creates the `Laplace` operator `Δ` on `grid` given a `stencil_set`. -Creates the Laplace operator operator `Δ` as a `TensorMapping` +See also [`laplace`](@ref). +""" +function Laplace(grid::EquidistantGrid, stencil_set::StencilSet) + inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) + closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) + Δ = laplace(grid, inner_stencil,closure_stencils) + return Laplace(Δ,stencil_set) +end + +LazyTensors.range_size(L::Laplace) = LazyTensors.range_size(L.D) +LazyTensors.domain_size(L::Laplace) = LazyTensors.domain_size(L.D) +LazyTensors.apply(L::Laplace, v::AbstractArray, I...) = LazyTensors.apply(L.D,v,I...) + +# TODO: Implement pretty printing of Laplace once pretty printing of LazyTensors is implemented. +# Base.show(io::IO, L::Laplace) = ... + +""" + laplace(grid::EquidistantGrid, inner_stencil, closure_stencils) + +Creates the Laplace operator operator `Δ` as a `LazyTensor` `Δ` approximates the Laplace operator ∑d²/xᵢ² , i = 1,...,`Dim` on `grid`, using the stencil `inner_stencil` in the interior and a set of stencils `closure_stencils` @@ -10,12 +43,13 @@ On a one-dimensional `grid`, `Δ` is equivalent to `second_derivative`. On a multi-dimensional `grid`, `Δ` is the sum of multi-dimensional `second_derivative`s where the sum is carried out lazily. + +See also: [`second_derivative`](@ref). """ function laplace(grid::EquidistantGrid, inner_stencil, closure_stencils) Δ = second_derivative(grid, inner_stencil, closure_stencils, 1) - for d = 2:dimension(grid) + for d = 2:ndims(grid) Δ += second_derivative(grid, inner_stencil, closure_stencils, d) end return Δ end -export laplace
--- a/src/SbpOperators/volumeops/volume_operator.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/src/SbpOperators/volumeops/volume_operator.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,33 +1,9 @@ """ - volume_operator(grid, inner_stencil, closure_stencils, parity, direction) - -Creates a volume operator on a `Dim`-dimensional grid acting along the -specified coordinate `direction`. The action of the operator is determined by -the stencils `inner_stencil` and `closure_stencils`. When `Dim=1`, the -corresponding `VolumeOperator` tensor mapping is returned. When `Dim>1`, the -returned operator is the appropriate outer product of a one-dimensional -operators and `IdentityMapping`s, e.g for `Dim=3` the volume operator in the -y-direction is `I⊗op⊗I`. -""" -function volume_operator(grid::EquidistantGrid, inner_stencil, closure_stencils, parity, direction) - #TODO: Check that direction <= Dim? + VolumeOperator{T,N,M,K} <: LazyTensor{T,1,1} - # Create 1D volume operator in along coordinate direction - op = VolumeOperator(restrict(grid, direction), inner_stencil, closure_stencils, parity) - # Create 1D IdentityMappings for each coordinate direction - one_d_grids = restrict.(Ref(grid), Tuple(1:dimension(grid))) - Is = IdentityMapping{eltype(grid)}.(size.(one_d_grids)) - # Formulate the correct outer product sequence of the identity mappings and - # the volume operator - parts = Base.setindex(Is, op, direction) - return foldl(⊗, parts) -end - -""" - VolumeOperator{T,N,M,K} <: TensorOperator{T,1} Implements a one-dimensional constant coefficients volume operator """ -struct VolumeOperator{T,N,M,K} <: TensorMapping{T,1,1} +struct VolumeOperator{T,N,M,K} <: LazyTensor{T,1,1} inner_stencil::Stencil{T,N} closure_stencils::NTuple{M,Stencil{T,K}} size::NTuple{1,Int} @@ -43,18 +19,19 @@ LazyTensors.range_size(op::VolumeOperator) = op.size LazyTensors.domain_size(op::VolumeOperator) = op.size -function LazyTensors.apply(op::VolumeOperator{T}, v::AbstractVector{T}, i::Index{Lower}) where T +function LazyTensors.apply(op::VolumeOperator, v::AbstractVector, i::Index{Lower}) return @inbounds apply_stencil(op.closure_stencils[Int(i)], v, Int(i)) end -function LazyTensors.apply(op::VolumeOperator{T}, v::AbstractVector{T}, i::Index{Interior}) where T +function LazyTensors.apply(op::VolumeOperator, v::AbstractVector, i::Index{Interior}) return apply_stencil(op.inner_stencil, v, Int(i)) end -function LazyTensors.apply(op::VolumeOperator{T}, v::AbstractVector{T}, i::Index{Upper}) where T +function LazyTensors.apply(op::VolumeOperator, v::AbstractVector, i::Index{Upper}) return @inbounds Int(op.parity)*apply_stencil_backwards(op.closure_stencils[op.size[1]-Int(i)+1], v, Int(i)) end -function LazyTensors.apply(op::VolumeOperator{T}, v::AbstractVector{T}, i) where T +function LazyTensors.apply(op::VolumeOperator, v::AbstractVector, i) return LazyTensors.apply_with_region(op, v, closure_size(op), op.size[1], i) end +# TODO: Move this to LazyTensors when we have the region communication down.
--- a/test/DiffOps/DiffOps_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/DiffOps/DiffOps_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -22,8 +22,8 @@ # v[:,2] = [7, 8, 9, 10] # v[:,1] = [10, 11, 12, 13] # -# @test e_w isa TensorMapping{T,2,1} where T -# @test e_w' isa TensorMapping{T,1,2} where T +# @test e_w isa LazyTensor{T,2,1} where T +# @test e_w' isa LazyTensor{T,1,2} where T # # @test domain_size(e_w, (3,2)) == (2,) # @test domain_size(e_e, (3,2)) == (2,) @@ -81,8 +81,8 @@ # v∂x = evalOn(g, (x,y)-> 2*x + y) # v∂y = evalOn(g, (x,y)-> 2*(y-1) + x) # -# @test d_w isa TensorMapping{T,2,1} where T -# @test d_w' isa TensorMapping{T,1,2} where T +# @test d_w isa LazyTensor{T,2,1} where T +# @test d_w' isa LazyTensor{T,1,2} where T # # @test domain_size(d_w, (3,2)) == (2,) # @test domain_size(d_e, (3,2)) == (2,)
--- a/test/Grids/EquidistantGrid_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,123 +0,0 @@ -using Sbplib.Grids -using Test -using Sbplib.RegionIndices - - -@testset "EquidistantGrid" begin - @test EquidistantGrid(4,0.0,1.0) isa EquidistantGrid - @test EquidistantGrid(4,0.0,8.0) isa EquidistantGrid - # constuctor - @test_throws DomainError EquidistantGrid(0,0.0,1.0) - @test_throws DomainError EquidistantGrid(1,1.0,1.0) - @test_throws DomainError EquidistantGrid(1,1.0,-1.0) - @test EquidistantGrid(4,0.0,1.0) == EquidistantGrid((4,),(0.0,),(1.0,)) - - @testset "Base" begin - @test eltype(EquidistantGrid(4,0.0,1.0)) == Float64 - @test eltype(EquidistantGrid((4,3),(0,0),(1,3))) == Int - @test size(EquidistantGrid(4,0.0,1.0)) == (4,) - @test size(EquidistantGrid((5,3), (0.0,0.0), (2.0,1.0))) == (5,3) - end - - # dimension - @test dimension(EquidistantGrid(4,0.0,1.0)) == 1 - @test dimension(EquidistantGrid((5,3), (0.0,0.0), (2.0,1.0))) == 2 - - # spacing - @test [spacing(EquidistantGrid(4,0.0,1.0))...] ≈ [(1. /3,)...] atol=5e-13 - @test [spacing(EquidistantGrid((5,3), (0.0,-1.0), (2.0,1.0)))...] ≈ [(0.5, 1.)...] atol=5e-13 - - # inverse_spacing - @test [inverse_spacing(EquidistantGrid(4,0.0,1.0))...] ≈ [(3.,)...] atol=5e-13 - @test [inverse_spacing(EquidistantGrid((5,3), (0.0,-1.0), (2.0,1.0)))...] ≈ [(2, 1.)...] atol=5e-13 - - # points - g = EquidistantGrid((5,3), (-1.0,0.0), (0.0,7.11)) - gp = points(g); - p = [(-1.,0.) (-1.,7.11/2) (-1.,7.11); - (-0.75,0.) (-0.75,7.11/2) (-0.75,7.11); - (-0.5,0.) (-0.5,7.11/2) (-0.5,7.11); - (-0.25,0.) (-0.25,7.11/2) (-0.25,7.11); - (0.,0.) (0.,7.11/2) (0.,7.11)] - for i ∈ eachindex(gp) - @test [gp[i]...] ≈ [p[i]...] atol=5e-13 - end - - # restrict - g = EquidistantGrid((5,3), (0.0,0.0), (2.0,1.0)) - @test restrict(g, 1) == EquidistantGrid(5,0.0,2.0) - @test restrict(g, 2) == EquidistantGrid(3,0.0,1.0) - - g = EquidistantGrid((2,5,3), (0.0,0.0,0.0), (2.0,1.0,3.0)) - @test restrict(g, 1) == EquidistantGrid(2,0.0,2.0) - @test restrict(g, 2) == EquidistantGrid(5,0.0,1.0) - @test restrict(g, 3) == EquidistantGrid(3,0.0,3.0) - @test restrict(g, 1:2) == EquidistantGrid((2,5),(0.0,0.0),(2.0,1.0)) - @test restrict(g, 2:3) == EquidistantGrid((5,3),(0.0,0.0),(1.0,3.0)) - @test restrict(g, [1,3]) == EquidistantGrid((2,3),(0.0,0.0),(2.0,3.0)) - @test restrict(g, [2,1]) == EquidistantGrid((5,2),(0.0,0.0),(1.0,2.0)) - - @testset "boundary_identifiers" begin - g = EquidistantGrid((2,5,3), (0.0,0.0,0.0), (2.0,1.0,3.0)) - bids = (CartesianBoundary{1,Lower}(),CartesianBoundary{1,Upper}(), - CartesianBoundary{2,Lower}(),CartesianBoundary{2,Upper}(), - CartesianBoundary{3,Lower}(),CartesianBoundary{3,Upper}()) - @test boundary_identifiers(g) == bids - @inferred boundary_identifiers(g) - end - - @testset "boundary_grid" begin - @testset "1D" begin - g = EquidistantGrid(5,0.0,2.0) - (id_l, id_r) = boundary_identifiers(g) - @test boundary_grid(g,id_l) == EquidistantGrid{Float64}() - @test boundary_grid(g,id_r) == EquidistantGrid{Float64}() - @test_throws DomainError boundary_grid(g,CartesianBoundary{2,Lower}()) - @test_throws DomainError boundary_grid(g,CartesianBoundary{0,Lower}()) - end - @testset "2D" begin - g = EquidistantGrid((5,3),(0.0,0.0),(1.0,3.0)) - (id_w, id_e, id_s, id_n) = boundary_identifiers(g) - @test boundary_grid(g,id_w) == restrict(g,2) - @test boundary_grid(g,id_e) == restrict(g,2) - @test boundary_grid(g,id_s) == restrict(g,1) - @test boundary_grid(g,id_n) == restrict(g,1) - @test_throws DomainError boundary_grid(g,CartesianBoundary{4,Lower}()) - end - @testset "3D" begin - g = EquidistantGrid((2,5,3), (0.0,0.0,0.0), (2.0,1.0,3.0)) - (id_w, id_e, - id_s, id_n, - id_t, id_b) = boundary_identifiers(g) - @test boundary_grid(g,id_w) == restrict(g,[2,3]) - @test boundary_grid(g,id_e) == restrict(g,[2,3]) - @test boundary_grid(g,id_s) == restrict(g,[1,3]) - @test boundary_grid(g,id_n) == restrict(g,[1,3]) - @test boundary_grid(g,id_t) == restrict(g,[1,2]) - @test boundary_grid(g,id_b) == restrict(g,[1,2]) - @test_throws DomainError boundary_grid(g,CartesianBoundary{4,Lower}()) - end - end - - @testset "refine" begin - @test refine(EquidistantGrid{Float64}(), 1) == EquidistantGrid{Float64}() - @test refine(EquidistantGrid{Float64}(), 2) == EquidistantGrid{Float64}() - - g = EquidistantGrid((10,5),(0.,1.),(2.,3.)) - @test refine(g, 1) == g - @test refine(g, 2) == EquidistantGrid((19,9),(0.,1.),(2.,3.)) - @test refine(g, 3) == EquidistantGrid((28,13),(0.,1.),(2.,3.)) - end - - @testset "coarsen" begin - @test coarsen(EquidistantGrid{Float64}(), 1) == EquidistantGrid{Float64}() - @test coarsen(EquidistantGrid{Float64}(), 2) == EquidistantGrid{Float64}() - - g = EquidistantGrid((7,13),(0.,1.),(2.,3.)) - @test coarsen(g, 1) == g - @test coarsen(g, 2) == EquidistantGrid((4,7),(0.,1.),(2.,3.)) - @test coarsen(g, 3) == EquidistantGrid((3,5),(0.,1.),(2.,3.)) - - @test_throws DomainError(4, "Size minus 1 must be divisible by the ratio.") coarsen(g, 4) == EquidistantGrid((3,5),(0.,1.),(2.,3.)) - end -end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/Grids/equidistant_grid_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,125 @@ +using Sbplib.Grids +using Test +using Sbplib.RegionIndices + + +@testset "EquidistantGrid" begin + @test EquidistantGrid(4,0.0,1.0) isa EquidistantGrid + @test EquidistantGrid(4,0.0,8.0) isa EquidistantGrid + # constuctor + @test_throws DomainError EquidistantGrid(0,0.0,1.0) + @test_throws DomainError EquidistantGrid(1,1.0,1.0) + @test_throws DomainError EquidistantGrid(1,1.0,-1.0) + @test EquidistantGrid(4,0.0,1.0) == EquidistantGrid((4,),(0.0,),(1.0,)) + + @testset "Base" begin + @test eltype(EquidistantGrid(4,0.0,1.0)) == Float64 + @test eltype(EquidistantGrid((4,3),(0,0),(1,3))) == Int + @test size(EquidistantGrid(4,0.0,1.0)) == (4,) + @test size(EquidistantGrid((5,3), (0.0,0.0), (2.0,1.0))) == (5,3) + @test ndims(EquidistantGrid(4,0.0,1.0)) == 1 + @test ndims(EquidistantGrid((5,3), (0.0,0.0), (2.0,1.0))) == 2 + end + + @testset "spacing" begin + @test [spacing(EquidistantGrid(4,0.0,1.0))...] ≈ [(1. /3,)...] atol=5e-13 + @test [spacing(EquidistantGrid((5,3), (0.0,-1.0), (2.0,1.0)))...] ≈ [(0.5, 1.)...] atol=5e-13 + end + + @testset "inverse_spacing" begin + @test [inverse_spacing(EquidistantGrid(4,0.0,1.0))...] ≈ [(3.,)...] atol=5e-13 + @test [inverse_spacing(EquidistantGrid((5,3), (0.0,-1.0), (2.0,1.0)))...] ≈ [(2, 1.)...] atol=5e-13 + end + + @testset "points" begin + g = EquidistantGrid((5,3), (-1.0,0.0), (0.0,7.11)) + gp = points(g); + p = [(-1.,0.) (-1.,7.11/2) (-1.,7.11); + (-0.75,0.) (-0.75,7.11/2) (-0.75,7.11); + (-0.5,0.) (-0.5,7.11/2) (-0.5,7.11); + (-0.25,0.) (-0.25,7.11/2) (-0.25,7.11); + (0.,0.) (0.,7.11/2) (0.,7.11)] + for i ∈ eachindex(gp) + @test [gp[i]...] ≈ [p[i]...] atol=5e-13 + end + end + + @testset "restrict" begin + g = EquidistantGrid((5,3), (0.0,0.0), (2.0,1.0)) + @test restrict(g, 1) == EquidistantGrid(5,0.0,2.0) + @test restrict(g, 2) == EquidistantGrid(3,0.0,1.0) + + g = EquidistantGrid((2,5,3), (0.0,0.0,0.0), (2.0,1.0,3.0)) + @test restrict(g, 1) == EquidistantGrid(2,0.0,2.0) + @test restrict(g, 2) == EquidistantGrid(5,0.0,1.0) + @test restrict(g, 3) == EquidistantGrid(3,0.0,3.0) + @test restrict(g, 1:2) == EquidistantGrid((2,5),(0.0,0.0),(2.0,1.0)) + @test restrict(g, 2:3) == EquidistantGrid((5,3),(0.0,0.0),(1.0,3.0)) + @test restrict(g, [1,3]) == EquidistantGrid((2,3),(0.0,0.0),(2.0,3.0)) + @test restrict(g, [2,1]) == EquidistantGrid((5,2),(0.0,0.0),(1.0,2.0)) + end + + @testset "boundary_identifiers" begin + g = EquidistantGrid((2,5,3), (0.0,0.0,0.0), (2.0,1.0,3.0)) + bids = (CartesianBoundary{1,Lower}(),CartesianBoundary{1,Upper}(), + CartesianBoundary{2,Lower}(),CartesianBoundary{2,Upper}(), + CartesianBoundary{3,Lower}(),CartesianBoundary{3,Upper}()) + @test boundary_identifiers(g) == bids + @inferred boundary_identifiers(g) + end + + @testset "boundary_grid" begin + @testset "1D" begin + g = EquidistantGrid(5,0.0,2.0) + (id_l, id_r) = boundary_identifiers(g) + @test boundary_grid(g,id_l) == EquidistantGrid{Float64}() + @test boundary_grid(g,id_r) == EquidistantGrid{Float64}() + @test_throws DomainError boundary_grid(g,CartesianBoundary{2,Lower}()) + @test_throws DomainError boundary_grid(g,CartesianBoundary{0,Lower}()) + end + @testset "2D" begin + g = EquidistantGrid((5,3),(0.0,0.0),(1.0,3.0)) + (id_w, id_e, id_s, id_n) = boundary_identifiers(g) + @test boundary_grid(g,id_w) == restrict(g,2) + @test boundary_grid(g,id_e) == restrict(g,2) + @test boundary_grid(g,id_s) == restrict(g,1) + @test boundary_grid(g,id_n) == restrict(g,1) + @test_throws DomainError boundary_grid(g,CartesianBoundary{4,Lower}()) + end + @testset "3D" begin + g = EquidistantGrid((2,5,3), (0.0,0.0,0.0), (2.0,1.0,3.0)) + (id_w, id_e, + id_s, id_n, + id_t, id_b) = boundary_identifiers(g) + @test boundary_grid(g,id_w) == restrict(g,[2,3]) + @test boundary_grid(g,id_e) == restrict(g,[2,3]) + @test boundary_grid(g,id_s) == restrict(g,[1,3]) + @test boundary_grid(g,id_n) == restrict(g,[1,3]) + @test boundary_grid(g,id_t) == restrict(g,[1,2]) + @test boundary_grid(g,id_b) == restrict(g,[1,2]) + @test_throws DomainError boundary_grid(g,CartesianBoundary{4,Lower}()) + end + end + + @testset "refine" begin + @test refine(EquidistantGrid{Float64}(), 1) == EquidistantGrid{Float64}() + @test refine(EquidistantGrid{Float64}(), 2) == EquidistantGrid{Float64}() + + g = EquidistantGrid((10,5),(0.,1.),(2.,3.)) + @test refine(g, 1) == g + @test refine(g, 2) == EquidistantGrid((19,9),(0.,1.),(2.,3.)) + @test refine(g, 3) == EquidistantGrid((28,13),(0.,1.),(2.,3.)) + end + + @testset "coarsen" begin + @test coarsen(EquidistantGrid{Float64}(), 1) == EquidistantGrid{Float64}() + @test coarsen(EquidistantGrid{Float64}(), 2) == EquidistantGrid{Float64}() + + g = EquidistantGrid((7,13),(0.,1.),(2.,3.)) + @test coarsen(g, 1) == g + @test coarsen(g, 2) == EquidistantGrid((4,7),(0.,1.),(2.,3.)) + @test coarsen(g, 3) == EquidistantGrid((3,5),(0.,1.),(2.,3.)) + + @test_throws DomainError(4, "Size minus 1 must be divisible by the ratio.") coarsen(g, 4) == EquidistantGrid((3,5),(0.,1.),(2.,3.)) + end +end
--- a/test/LazyTensors/lazy_array_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/LazyTensors/lazy_array_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -59,8 +59,6 @@ end @test_throws BoundsError (v1 +̃ v2)[4] v2 = [1., 2, 3, 4] - # Test that size of arrays is asserted when not specified inbounds - # TODO: Replace these errors with SizeMismatch @test_throws DimensionMismatch v1 +̃ v2 # Test operations on LazyArray @@ -69,15 +67,20 @@ @test isa(v1 + v2, LazyArray) @test isa(v2 + v1, LazyArray) @test isa(v1 - v2, LazyArray) - @test isa(v2 - v1, LazyArray) + @test isa(v2 - v1, LazyArray) + @test isa(v1 + s, LazyArray) + @test isa(s + v1, LazyArray) + @test isa(v1 - s, LazyArray) + @test isa(s - v1, LazyArray) for i ∈ eachindex(v2) @test (v1 + v2)[i] == (v2 + v1)[i] == r_add_v[i] @test (v1 - v2)[i] == -(v2 - v1)[i] == r_sub_v[i] + @test (v1 + s)[i] == (s + v1)[i] == r_add_s[i] + @test (v1 - s)[i] == -(s - v1)[i] == r_sub_s[i] end + @test_throws BoundsError (v1 + v2)[4] v2 = [1., 2, 3, 4] - # Test that size of arrays is asserted when not specified inbounds - # TODO: Replace these errors with SizeMismatch @test_throws DimensionMismatch v1 + v2 end @@ -99,4 +102,7 @@ @test_throws BoundsError LazyFunctionArray((i,j)->i*j, (3,2))[4,2] @test_throws BoundsError LazyFunctionArray((i,j)->i*j, (3,2))[2,3] + # Test that the constructor works with a restrictive function + f(x::Vararg{Int}) = sum(x) + @test LazyFunctionArray(f,(3,4)) isa LazyFunctionArray end
--- a/test/LazyTensors/lazy_tensor_operations_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/LazyTensors/lazy_tensor_operations_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -4,17 +4,28 @@ using Tullio -@testset "Mapping transpose" begin - struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end +struct DummyMapping{T,R,D} <: LazyTensor{T,R,D} end - LazyTensors.apply(m::DummyMapping{T,R}, v, I::Vararg{Any,R}) where {T,R} = :apply - LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Any,D}) where {T,R,D} = :apply_transpose +LazyTensors.apply(m::DummyMapping{T,R}, v, I::Vararg{Any,R}) where {T,R} = :apply +LazyTensors.apply_transpose(m::DummyMapping{T,R,D}, v, I::Vararg{Any,D}) where {T,R,D} = :apply_transpose + +LazyTensors.range_size(m::DummyMapping) = :range_size +LazyTensors.domain_size(m::DummyMapping) = :domain_size + - LazyTensors.range_size(m::DummyMapping) = :range_size - LazyTensors.domain_size(m::DummyMapping) = :domain_size +struct SizeDoublingMapping{T,R,D} <: LazyTensor{T,R,D} + domain_size::NTuple{D,Int} +end +LazyTensors.apply(m::SizeDoublingMapping{T,R}, v, i::Vararg{Any,R}) where {T,R} = (:apply,v,i) +LazyTensors.range_size(m::SizeDoublingMapping) = 2 .* m.domain_size +LazyTensors.domain_size(m::SizeDoublingMapping) = m.domain_size + + + +@testset "Mapping transpose" begin m = DummyMapping{Float64,2,3}() - @test m' isa TensorMapping{Float64, 3,2} + @test m' isa LazyTensor{Float64, 3,2} @test m'' == m @test apply(m',zeros(Float64,(0,0)), 0, 0, 0) == :apply_transpose @test apply(m'',zeros(Float64,(0,0,0)), 0, 0) == :apply @@ -24,71 +35,103 @@ @test domain_size(m') == :range_size end + @testset "TensorApplication" begin - struct SizeDoublingMapping{T,R,D} <: TensorMapping{T,R,D} - domain_size::NTuple{D,Int} - end - - LazyTensors.apply(m::SizeDoublingMapping{T,R}, v, i::Vararg{Any,R}) where {T,R} = (:apply,v,i) - LazyTensors.range_size(m::SizeDoublingMapping) = 2 .* m.domain_size - LazyTensors.domain_size(m::SizeDoublingMapping) = m.domain_size - - m = SizeDoublingMapping{Int, 1, 1}((3,)) + mm = SizeDoublingMapping{Int, 1, 1}((6,)) v = [0,1,2] - @test m*v isa AbstractVector{Int} @test size(m*v) == 2 .*size(v) - @test (m*v)[0] == (:apply,v,(0,)) - @test m*m*v isa AbstractVector{Int} - @test (m*m*v)[1] == (:apply,m*v,(1,)) - @test (m*m*v)[3] == (:apply,m*v,(3,)) - @test (m*m*v)[6] == (:apply,m*v,(6,)) - @test_broken BoundsError == (m*m*v)[0] - @test_broken BoundsError == (m*m*v)[7] + @test (m*v)[1] == (:apply,v,(1,)) + @test (mm*m*v)[1] == (:apply,m*v,(1,)) + @test (mm*m*v)[3] == (:apply,m*v,(3,)) + @test (mm*m*v)[6] == (:apply,m*v,(6,)) @test_throws MethodError m*m - m = SizeDoublingMapping{Int, 2, 1}((3,)) - @test_throws MethodError m*ones(Int,2,2) - @test_throws MethodError m*m*v + @test (m*v)[CartesianIndex(2)] == (:apply,v,(2,)) + @test (mm*m*v)[CartesianIndex(2)] == (:apply,m*v,(2,)) m = SizeDoublingMapping{Float64, 2, 2}((3,3)) + mm = SizeDoublingMapping{Float64, 2, 2}((6,6)) v = ones(3,3) @test size(m*v) == 2 .*size(v) @test (m*v)[1,2] == (:apply,v,(1,2)) - struct ScalingOperator{T,D} <: TensorMapping{T,D,D} - λ::T - size::NTuple{D,Int} - end + @test (m*v)[CartesianIndex(2,3)] == (:apply,v,(2,3)) + @test (mm*m*v)[CartesianIndex(4,3)] == (:apply,m*v,(4,3)) - LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Any,D}) where {T,D} = m.λ*v[I...] - LazyTensors.range_size(m::ScalingOperator) = m.size - LazyTensors.domain_size(m::ScalingOperator) = m.size - - m = ScalingOperator{Int,1}(2,(3,)) + m = ScalingTensor(2,(3,)) v = [1,2,3] @test m*v isa AbstractVector @test m*v == [2,4,6] - m = ScalingOperator{Int,2}(2,(2,2)) + m = ScalingTensor(2,(2,2)) v = [[1 2];[3 4]] @test m*v == [[2 4];[6 8]] @test (m*v)[2,1] == 6 + + @testset "Error on index out of bounds" begin + m = SizeDoublingMapping{Int, 1, 1}((3,)) + v = [0,1,2] + + @test_throws BoundsError (m*v)[0] + @test_throws BoundsError (m*v)[7] + end + + @testset "Error on unmatched dimensions" begin + v = [0,1,2] + m = SizeDoublingMapping{Int, 2, 1}((3,)) + @test_throws MethodError m*ones(Int,2,2) + @test_throws MethodError m*m*v + end + + @testset "Error on unmatched sizes" begin + @test_throws DomainSizeMismatch ScalingTensor(2,(2,))*ones(3) + @test_throws DomainSizeMismatch ScalingTensor(2,(2,))*ScalingTensor(2,(3,))*ones(3) + end + + + @testset "Type calculation" begin + m = ScalingTensor(2,(3,)) + v = [1.,2.,3.] + @test m*v isa AbstractVector{Float64} + @test m*v == [2.,4.,6.] + @inferred m*v + @inferred (m*v)[1] + + m = ScalingTensor(2,(2,2)) + v = [[1. 2.];[3. 4.]] + @test m*v == [[2. 4.];[6. 8.]] + @test (m*v)[2,1] == 6. + @inferred m*v + @inferred (m*v)[1] + + m = ScalingTensor(2. +2. *im,(3,)) + v = [1.,2.,3.] + @test m*v isa AbstractVector{ComplexF64} + @test m*v == [2. + 2. *im, 4. + 4. *im, 6. + 6. *im] + @inferred m*v + @inferred (m*v)[1] + + m = ScalingTensor(1,(3,)) + v = [2. + 2. *im, 4. + 4. *im, 6. + 6. *im] + @test m*v isa AbstractVector{ComplexF64} + @test m*v == [2. + 2. *im, 4. + 4. *im, 6. + 6. *im] + @inferred m*v + @inferred (m*v)[1] + + m = ScalingTensor(2., (3,)) + v = [[1,2,3], [3,2,1],[1,3,1]] + @test m*v isa AbstractVector{Vector{Float64}} + @test m*v == [[2.,4.,6.], [6.,4.,2.],[2.,6.,2.]] + @inferred m*v + @inferred (m*v)[1] + end end -@testset "TensorMapping binary operations" begin - struct ScalarMapping{T,R,D} <: TensorMapping{T,R,D} - λ::T - range_size::NTuple{R,Int} - domain_size::NTuple{D,Int} - end - LazyTensors.apply(m::ScalarMapping{T,R}, v, I::Vararg{Any,R}) where {T,R} = m.λ*v[I...] - LazyTensors.range_size(m::ScalarMapping) = m.domain_size - LazyTensors.domain_size(m::ScalarMapping) = m.range_size - - A = ScalarMapping{Float64,1,1}(2.0, (3,), (3,)) - B = ScalarMapping{Float64,1,1}(3.0, (3,), (3,)) +@testset "LazyTensor binary operations" begin + A = ScalingTensor(2.0, (3,)) + B = ScalingTensor(3.0, (3,)) v = [1.1,1.2,1.3] for i ∈ eachindex(v) @@ -99,22 +142,34 @@ @test ((A-B)*v)[i] == 2*v[i] - 3*v[i] end + @test range_size(A+B) == range_size(A) == range_size(B) @test domain_size(A+B) == domain_size(A) == domain_size(B) + + @test ((A+B)*ComplexF64[1.1,1.2,1.3])[3] isa ComplexF64 + + @testset "Error on unmatched sizes" begin + @test_throws Union{DomainSizeMismatch, RangeSizeMismatch} ScalingTensor(2.0, (3,)) + ScalingTensor(2.0, (4,)) + + @test_throws DomainSizeMismatch ScalingTensor(2.0, (4,)) + SizeDoublingMapping{Float64,1,1}((2,)) + @test_throws DomainSizeMismatch SizeDoublingMapping{Float64,1,1}((2,)) + ScalingTensor(2.0, (4,)) + @test_throws RangeSizeMismatch ScalingTensor(2.0, (2,)) + SizeDoublingMapping{Float64,1,1}((2,)) + @test_throws RangeSizeMismatch SizeDoublingMapping{Float64,1,1}((2,)) + ScalingTensor(2.0, (2,)) + end end -@testset "TensorMappingComposition" begin +@testset "TensorComposition" begin A = rand(2,3) B = rand(3,4) - à = LazyLinearMap(A, (1,), (2,)) - B̃ = LazyLinearMap(B, (1,), (2,)) + à = DenseTensor(A, (1,), (2,)) + B̃ = DenseTensor(B, (1,), (2,)) - @test Ã∘B̃ isa TensorMappingComposition + @test Ã∘B̃ isa TensorComposition @test range_size(Ã∘B̃) == (2,) @test domain_size(Ã∘B̃) == (4,) - @test_throws SizeMismatch B̃∘à + @test_throws DomainSizeMismatch B̃∘à # @test @inbounds B̃∘à # Should not error even though dimensions don't match. (Since ]test runs with forced boundschecking this is currently not testable 2020-10-16) @@ -123,210 +178,125 @@ v = rand(2) @test (Ã∘B̃)'*v ≈ B'*A'*v rtol=1e-14 -end -@testset "LazyLinearMap" begin - # Test a standard matrix-vector product - # mapping vectors of size 4 to vectors of size 3. - A = rand(3,4) - à = LazyLinearMap(A, (1,), (2,)) - v = rand(4) - w = rand(3) - - @test à isa LazyLinearMap{T,1,1} where T - @test à isa TensorMapping{T,1,1} where T - @test range_size(Ã) == (3,) - @test domain_size(Ã) == (4,) - - @test Ã*ones(4) ≈ A*ones(4) atol=5e-13 - @test Ã*v ≈ A*v atol=5e-13 - @test Ã'*w ≈ A'*w - - A = rand(2,3,4) - @test_throws DomainError LazyLinearMap(A, (3,1), (2,)) + @test (Ã∘B̃*ComplexF64[1.,2.,3.,4.])[1] isa ComplexF64 + @test ((Ã∘B̃)'*ComplexF64[1.,2.])[1] isa ComplexF64 - # Test more exotic mappings - B = rand(3,4,2) - # Map vectors of size 2 to matrices of size (3,4) - B̃ = LazyLinearMap(B, (1,2), (3,)) - v = rand(2) - - @test range_size(B̃) == (3,4) - @test domain_size(B̃) == (2,) - @test B̃ isa TensorMapping{T,2,1} where T - @test B̃*ones(2) ≈ B[:,:,1] + B[:,:,2] atol=5e-13 - @test B̃*v ≈ B[:,:,1]*v[1] + B[:,:,2]*v[2] atol=5e-13 - - # Map matrices of size (3,2) to vectors of size 4 - B̃ = LazyLinearMap(B, (2,), (1,3)) - v = rand(3,2) - - @test range_size(B̃) == (4,) - @test domain_size(B̃) == (3,2) - @test B̃ isa TensorMapping{T,1,2} where T - @test B̃*ones(3,2) ≈ B[1,:,1] + B[2,:,1] + B[3,:,1] + - B[1,:,2] + B[2,:,2] + B[3,:,2] atol=5e-13 - @test B̃*v ≈ B[1,:,1]*v[1,1] + B[2,:,1]*v[2,1] + B[3,:,1]*v[3,1] + - B[1,:,2]v[1,2] + B[2,:,2]*v[2,2] + B[3,:,2]*v[3,2] atol=5e-13 - - - # TODO: - # @inferred (B̃*v)[2] + a = 2. + v = rand(3) + @test a*à isa TensorComposition + @test a*à == Ã*a + @test range_size(a*Ã) == range_size(Ã) + @test domain_size(a*Ã) == domain_size(Ã) + @test a*Ã*v == a.*A*v end -@testset "IdentityMapping" begin - @test IdentityMapping{Float64}((4,5)) isa IdentityMapping{T,2} where T - @test IdentityMapping{Float64}((4,5)) isa TensorMapping{T,2,2} where T - @test IdentityMapping{Float64}((4,5)) == IdentityMapping{Float64}(4,5) - - @test IdentityMapping(3,2) isa IdentityMapping{Float64,2} - - for sz ∈ [(4,5),(3,),(5,6,4)] - I = IdentityMapping{Float64}(sz) - v = rand(sz...) - @test I*v == v - @test I'*v == v - - @test range_size(I) == sz - @test domain_size(I) == sz - end - - I = IdentityMapping{Float64}((4,5)) - v = rand(4,5) - @inferred (I*v)[3,2] - @inferred (I'*v)[3,2] - @inferred range_size(I) - - @inferred range_dim(I) - @inferred domain_dim(I) - - à = rand(4,2) - A = LazyLinearMap(Ã,(1,),(2,)) - I1 = IdentityMapping{Float64}(2) - I2 = IdentityMapping{Float64}(4) - @test A∘I1 == A - @test I2∘A == A - @test I1∘I1 == I1 - @test_throws SizeMismatch I1∘A - @test_throws SizeMismatch A∘I2 - @test_throws SizeMismatch I1∘I2 -end - -@testset "InflatedTensorMapping" begin - I(sz...) = IdentityMapping(sz...) +@testset "InflatedTensor" begin + I(sz...) = IdentityTensor(sz...) à = rand(4,2) B̃ = rand(4,2,3) C̃ = rand(4,2,3) - A = LazyLinearMap(Ã,(1,),(2,)) - B = LazyLinearMap(B̃,(1,2),(3,)) - C = LazyLinearMap(C̃,(1,),(2,3)) + A = DenseTensor(Ã,(1,),(2,)) + B = DenseTensor(B̃,(1,2),(3,)) + C = DenseTensor(C̃,(1,),(2,3)) @testset "Constructors" begin - @test InflatedTensorMapping(I(3,2), A, I(4)) isa TensorMapping{Float64, 4, 4} - @test InflatedTensorMapping(I(3,2), B, I(4)) isa TensorMapping{Float64, 5, 4} - @test InflatedTensorMapping(I(3), C, I(2,3)) isa TensorMapping{Float64, 4, 5} - @test InflatedTensorMapping(C, I(2,3)) isa TensorMapping{Float64, 3, 4} - @test InflatedTensorMapping(I(3), C) isa TensorMapping{Float64, 2, 3} - @test InflatedTensorMapping(I(3), I(2,3)) isa TensorMapping{Float64, 3, 3} + @test InflatedTensor(I(3,2), A, I(4)) isa LazyTensor{Float64, 4, 4} + @test InflatedTensor(I(3,2), B, I(4)) isa LazyTensor{Float64, 5, 4} + @test InflatedTensor(I(3), C, I(2,3)) isa LazyTensor{Float64, 4, 5} + @test InflatedTensor(C, I(2,3)) isa LazyTensor{Float64, 3, 4} + @test InflatedTensor(I(3), C) isa LazyTensor{Float64, 2, 3} + @test InflatedTensor(I(3), I(2,3)) isa LazyTensor{Float64, 3, 3} end @testset "Range and domain size" begin - @test range_size(InflatedTensorMapping(I(3,2), A, I(4))) == (3,2,4,4) - @test domain_size(InflatedTensorMapping(I(3,2), A, I(4))) == (3,2,2,4) + @test range_size(InflatedTensor(I(3,2), A, I(4))) == (3,2,4,4) + @test domain_size(InflatedTensor(I(3,2), A, I(4))) == (3,2,2,4) - @test range_size(InflatedTensorMapping(I(3,2), B, I(4))) == (3,2,4,2,4) - @test domain_size(InflatedTensorMapping(I(3,2), B, I(4))) == (3,2,3,4) + @test range_size(InflatedTensor(I(3,2), B, I(4))) == (3,2,4,2,4) + @test domain_size(InflatedTensor(I(3,2), B, I(4))) == (3,2,3,4) - @test range_size(InflatedTensorMapping(I(3), C, I(2,3))) == (3,4,2,3) - @test domain_size(InflatedTensorMapping(I(3), C, I(2,3))) == (3,2,3,2,3) + @test range_size(InflatedTensor(I(3), C, I(2,3))) == (3,4,2,3) + @test domain_size(InflatedTensor(I(3), C, I(2,3))) == (3,2,3,2,3) - @inferred range_size(InflatedTensorMapping(I(3,2), A, I(4))) == (3,2,4,4) - @inferred domain_size(InflatedTensorMapping(I(3,2), A, I(4))) == (3,2,2,4) + @inferred range_size(InflatedTensor(I(3,2), A, I(4))) == (3,2,4,4) + @inferred domain_size(InflatedTensor(I(3,2), A, I(4))) == (3,2,2,4) end @testset "Application" begin # Testing regular application and transposed application with inflation "before", "after" and "before and after". # The inflated tensor mappings are chosen to preserve, reduce and increase the dimension of the result compared to the input. - tests = [ + cases = [ ( - InflatedTensorMapping(I(3,2), A, I(4)), + InflatedTensor(I(3,2), A, I(4)), (v-> @tullio res[a,b,c,d] := Ã[c,i]*v[a,b,i,d]), # Expected result of apply (v-> @tullio res[a,b,c,d] := Ã[i,c]*v[a,b,i,d]), # Expected result of apply_transpose ), ( - InflatedTensorMapping(I(3,2), B, I(4)), + InflatedTensor(I(3,2), B, I(4)), (v-> @tullio res[a,b,c,d,e] := B̃[c,d,i]*v[a,b,i,e]), (v-> @tullio res[a,b,c,d] := B̃[i,j,c]*v[a,b,i,j,d]), ), ( - InflatedTensorMapping(I(3,2), C, I(4)), + InflatedTensor(I(3,2), C, I(4)), (v-> @tullio res[a,b,c,d] := C̃[c,i,j]*v[a,b,i,j,d]), (v-> @tullio res[a,b,c,d,e] := C̃[i,c,d]*v[a,b,i,e]), ), ( - InflatedTensorMapping(I(3,2), A), + InflatedTensor(I(3,2), A), (v-> @tullio res[a,b,c] := Ã[c,i]*v[a,b,i]), (v-> @tullio res[a,b,c] := Ã[i,c]*v[a,b,i]), ), ( - InflatedTensorMapping(I(3,2), B), + InflatedTensor(I(3,2), B), (v-> @tullio res[a,b,c,d] := B̃[c,d,i]*v[a,b,i]), (v-> @tullio res[a,b,c] := B̃[i,j,c]*v[a,b,i,j]), ), ( - InflatedTensorMapping(I(3,2), C), + InflatedTensor(I(3,2), C), (v-> @tullio res[a,b,c] := C̃[c,i,j]*v[a,b,i,j]), (v-> @tullio res[a,b,c,d] := C̃[i,c,d]*v[a,b,i]), ), ( - InflatedTensorMapping(A,I(4)), + InflatedTensor(A,I(4)), (v-> @tullio res[a,b] := Ã[a,i]*v[i,b]), (v-> @tullio res[a,b] := Ã[i,a]*v[i,b]), ), ( - InflatedTensorMapping(B,I(4)), + InflatedTensor(B,I(4)), (v-> @tullio res[a,b,c] := B̃[a,b,i]*v[i,c]), (v-> @tullio res[a,b] := B̃[i,j,a]*v[i,j,b]), ), ( - InflatedTensorMapping(C,I(4)), + InflatedTensor(C,I(4)), (v-> @tullio res[a,b] := C̃[a,i,j]*v[i,j,b]), (v-> @tullio res[a,b,c] := C̃[i,a,b]*v[i,c]), ), ] - @testset "apply" begin - for i ∈ 1:length(tests) - tm = tests[i][1] - v = rand(domain_size(tm)...) - true_value = tests[i][2](v) - @test tm*v ≈ true_value rtol=1e-14 - end + @testset "$tm" for (tm, true_apply, true_apply_transpose) ∈ cases + v = rand(domain_size(tm)...) + @test tm*v ≈ true_apply(v) rtol=1e-14 + + v = rand(range_size(tm)...) + @test tm'*v ≈ true_apply_transpose(v) rtol=1e-14 end - @testset "apply_transpose" begin - for i ∈ 1:length(tests) - tm = tests[i][1] - v = rand(range_size(tm)...) - true_value = tests[i][3](v) - @test tm'*v ≈ true_value rtol=1e-14 - end + @testset "application to other type" begin + tm = InflatedTensor(I(3,2), A, I(4)) + + v = rand(ComplexF64, domain_size(tm)...) + @test (tm*v)[1,2,3,1] isa ComplexF64 + + v = rand(ComplexF64, domain_size(tm')...) + @test (tm'*v)[1,2,2,1] isa ComplexF64 end @testset "Inference of application" begin - struct ScalingOperator{T,D} <: TensorMapping{T,D,D} - λ::T - size::NTuple{D,Int} - end - - LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Any,D}) where {T,D} = m.λ*v[I...] - LazyTensors.range_size(m::ScalingOperator) = m.size - LazyTensors.domain_size(m::ScalingOperator) = m.size - - tm = InflatedTensorMapping(I(2,3),ScalingOperator(2.0, (3,2)),I(3,4)) + tm = InflatedTensor(I(2,3),ScalingTensor(2.0, (3,2)),I(3,4)) v = rand(domain_size(tm)...) @inferred apply(tm,v,1,2,3,2,2,4) @@ -334,94 +304,24 @@ end end - @testset "InflatedTensorMapping of InflatedTensorMapping" begin - A = ScalingOperator(2.0,(2,3)) - itm = InflatedTensorMapping(I(3,2), A, I(4)) - @test InflatedTensorMapping(I(4), itm, I(2)) == InflatedTensorMapping(I(4,3,2), A, I(4,2)) - @test InflatedTensorMapping(itm, I(2)) == InflatedTensorMapping(I(3,2), A, I(4,2)) - @test InflatedTensorMapping(I(4), itm) == InflatedTensorMapping(I(4,3,2), A, I(4)) + @testset "InflatedTensor of InflatedTensor" begin + A = ScalingTensor(2.0,(2,3)) + itm = InflatedTensor(I(3,2), A, I(4)) + @test InflatedTensor(I(4), itm, I(2)) == InflatedTensor(I(4,3,2), A, I(4,2)) + @test InflatedTensor(itm, I(2)) == InflatedTensor(I(3,2), A, I(4,2)) + @test InflatedTensor(I(4), itm) == InflatedTensor(I(4,3,2), A, I(4)) - @test InflatedTensorMapping(I(2), I(2), I(2)) isa InflatedTensorMapping # The constructor should always return its type. + @test InflatedTensor(I(2), I(2), I(2)) isa InflatedTensor # The constructor should always return its type. end end -@testset "split_index" begin - @test LazyTensors.split_index(Val(2),Val(1),Val(2),Val(2),1,2,3,4,5,6) == ((1,2,:,5,6),(3,4)) - @test LazyTensors.split_index(Val(2),Val(3),Val(2),Val(2),1,2,3,4,5,6) == ((1,2,:,:,:,5,6),(3,4)) - @test LazyTensors.split_index(Val(3),Val(1),Val(1),Val(2),1,2,3,4,5,6) == ((1,2,3,:,5,6),(4,)) - @test LazyTensors.split_index(Val(3),Val(2),Val(1),Val(2),1,2,3,4,5,6) == ((1,2,3,:,:,5,6),(4,)) - @test LazyTensors.split_index(Val(1),Val(1),Val(2),Val(3),1,2,3,4,5,6) == ((1,:,4,5,6),(2,3)) - @test LazyTensors.split_index(Val(1),Val(2),Val(2),Val(3),1,2,3,4,5,6) == ((1,:,:,4,5,6),(2,3)) - - @test LazyTensors.split_index(Val(0),Val(1),Val(3),Val(3),1,2,3,4,5,6) == ((:,4,5,6),(1,2,3)) - @test LazyTensors.split_index(Val(3),Val(1),Val(3),Val(0),1,2,3,4,5,6) == ((1,2,3,:),(4,5,6)) - - @inferred LazyTensors.split_index(Val(2),Val(3),Val(2),Val(2),1,2,3,2,2,4) -end - -@testset "slice_tuple" begin - @test LazyTensors.slice_tuple((1,2,3),Val(1), Val(3)) == (1,2,3) - @test LazyTensors.slice_tuple((1,2,3,4,5,6),Val(2), Val(5)) == (2,3,4,5) - @test LazyTensors.slice_tuple((1,2,3,4,5,6),Val(1), Val(3)) == (1,2,3) - @test LazyTensors.slice_tuple((1,2,3,4,5,6),Val(4), Val(6)) == (4,5,6) -end - -@testset "split_tuple" begin - @testset "2 parts" begin - @test LazyTensors.split_tuple((),Val(0)) == ((),()) - @test LazyTensors.split_tuple((1,),Val(0)) == ((),(1,)) - @test LazyTensors.split_tuple((1,),Val(1)) == ((1,),()) - - @test LazyTensors.split_tuple((1,2,3,4),Val(0)) == ((),(1,2,3,4)) - @test LazyTensors.split_tuple((1,2,3,4),Val(1)) == ((1,),(2,3,4)) - @test LazyTensors.split_tuple((1,2,3,4),Val(2)) == ((1,2),(3,4)) - @test LazyTensors.split_tuple((1,2,3,4),Val(3)) == ((1,2,3),(4,)) - @test LazyTensors.split_tuple((1,2,3,4),Val(4)) == ((1,2,3,4),()) - - @test LazyTensors.split_tuple((1,2,true,4),Val(3)) == ((1,2,true),(4,)) - - @inferred LazyTensors.split_tuple((1,2,3,4),Val(3)) - @inferred LazyTensors.split_tuple((1,2,true,4),Val(3)) - end - - @testset "3 parts" begin - @test LazyTensors.split_tuple((),Val(0),Val(0)) == ((),(),()) - @test LazyTensors.split_tuple((1,2,3),Val(1), Val(1)) == ((1,),(2,),(3,)) - @test LazyTensors.split_tuple((1,true,3),Val(1), Val(1)) == ((1,),(true,),(3,)) - - @test LazyTensors.split_tuple((1,2,3,4,5,6),Val(1),Val(2)) == ((1,),(2,3),(4,5,6)) - @test LazyTensors.split_tuple((1,2,3,4,5,6),Val(3),Val(2)) == ((1,2,3),(4,5),(6,)) - - @inferred LazyTensors.split_tuple((1,2,3,4,5,6),Val(3),Val(2)) - @inferred LazyTensors.split_tuple((1,true,3),Val(1), Val(1)) - end -end - -@testset "flatten_tuple" begin - @test LazyTensors.flatten_tuple((1,)) == (1,) - @test LazyTensors.flatten_tuple((1,2,3,4,5,6)) == (1,2,3,4,5,6) - @test LazyTensors.flatten_tuple((1,2,(3,4),5,6)) == (1,2,3,4,5,6) - @test LazyTensors.flatten_tuple((1,2,(3,(4,5)),6)) == (1,2,3,4,5,6) - @test LazyTensors.flatten_tuple(((1,2),(3,4),(5,),6)) == (1,2,3,4,5,6) -end - - @testset "LazyOuterProduct" begin - struct ScalingOperator{T,D} <: TensorMapping{T,D,D} - λ::T - size::NTuple{D,Int} - end - - LazyTensors.apply(m::ScalingOperator{T,D}, v, I::Vararg{Any,D}) where {T,D} = m.λ*v[I...] - LazyTensors.range_size(m::ScalingOperator) = m.size - LazyTensors.domain_size(m::ScalingOperator) = m.size - - A = ScalingOperator(2.0, (5,)) - B = ScalingOperator(3.0, (3,)) - C = ScalingOperator(5.0, (3,2)) + A = ScalingTensor(2.0, (5,)) + B = ScalingTensor(3.0, (3,)) + C = ScalingTensor(5.0, (3,2)) AB = LazyOuterProduct(A,B) - @test AB isa TensorMapping{T,2,2} where T + @test AB isa LazyTensor{T,2,2} where T @test range_size(AB) == (5,3) @test domain_size(AB) == (5,3) @@ -430,7 +330,7 @@ ABC = LazyOuterProduct(A,B,C) - @test ABC isa TensorMapping{T,4,4} where T + @test ABC isa LazyTensor{T,4,4} where T @test range_size(ABC) == (5,3,3,2) @test domain_size(ABC) == (5,3,3,2) @@ -443,8 +343,8 @@ v₁ = rand(2,4,3) v₂ = rand(4,3,2) - à = LazyLinearMap(A,(1,),(2,)) - B̃ = LazyLinearMap(B,(1,),(2,3)) + à = DenseTensor(A,(1,),(2,)) + B̃ = DenseTensor(B,(1,),(2,3)) ÃB̃ = LazyOuterProduct(Ã,B̃) @tullio ABv[i,k] := A[i,j]*B[k,l,m]*v₁[j,l,m] @@ -455,15 +355,28 @@ @test B̃Ã*v₂ ≈ BAv @testset "Indentity mapping arguments" begin - @test LazyOuterProduct(IdentityMapping(3,2), IdentityMapping(1,2)) == IdentityMapping(3,2,1,2) + @test LazyOuterProduct(IdentityTensor(3,2), IdentityTensor(1,2)) == IdentityTensor(3,2,1,2) + + à = DenseTensor(A,(1,),(2,)) + @test LazyOuterProduct(IdentityTensor(3,2), Ã) == InflatedTensor(IdentityTensor(3,2),Ã) + @test LazyOuterProduct(Ã, IdentityTensor(3,2)) == InflatedTensor(Ã,IdentityTensor(3,2)) - à = LazyLinearMap(A,(1,),(2,)) - @test LazyOuterProduct(IdentityMapping(3,2), Ã) == InflatedTensorMapping(IdentityMapping(3,2),Ã) - @test LazyOuterProduct(Ã, IdentityMapping(3,2)) == InflatedTensorMapping(Ã,IdentityMapping(3,2)) + I1 = IdentityTensor(3,2) + I2 = IdentityTensor(4) + @test I1⊗Ã⊗I2 == InflatedTensor(I1, Ã, I2) + end +end - I1 = IdentityMapping(3,2) - I2 = IdentityMapping(4) - @test I1⊗Ã⊗I2 == InflatedTensorMapping(I1, Ã, I2) - end +@testset "inflate" begin + I = LazyTensors.inflate(IdentityTensor(),(3,4,5,6), 2) + @test I isa LazyTensor{Float64, 3,3} + @test range_size(I) == (3,5,6) + @test domain_size(I) == (3,5,6) + @test LazyTensors.inflate(ScalingTensor(1., (4,)),(3,4,5,6), 1) == InflatedTensor(IdentityTensor{Float64}(),ScalingTensor(1., (4,)),IdentityTensor(4,5,6)) + @test LazyTensors.inflate(ScalingTensor(2., (1,)),(3,4,5,6), 2) == InflatedTensor(IdentityTensor(3),ScalingTensor(2., (1,)),IdentityTensor(5,6)) + @test LazyTensors.inflate(ScalingTensor(3., (6,)),(3,4,5,6), 4) == InflatedTensor(IdentityTensor(3,4,5),ScalingTensor(3., (6,)),IdentityTensor{Float64}()) + + @test_throws BoundsError LazyTensors.inflate(ScalingTensor(1., (4,)),(3,4,5,6), 0) + @test_throws BoundsError LazyTensors.inflate(ScalingTensor(1., (4,)),(3,4,5,6), 5) end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/LazyTensors/lazy_tensor_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,12 @@ +using Test +using Sbplib.LazyTensors + +@testset "Generic Mapping methods" begin + struct DummyMapping{T,R,D} <: LazyTensor{T,R,D} end + LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Any,R}) where {T,R,D} = :apply + @test range_dim(DummyMapping{Int,2,3}()) == 2 + @test domain_dim(DummyMapping{Int,2,3}()) == 3 + @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),0,0) == :apply + @test eltype(DummyMapping{Int,2,3}()) == Int + @test eltype(DummyMapping{Float64,2,3}()) == Float64 +end
--- a/test/LazyTensors/tensor_mapping_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,12 +0,0 @@ -using Test -using Sbplib.LazyTensors - -@testset "Generic Mapping methods" begin - struct DummyMapping{T,R,D} <: TensorMapping{T,R,D} end - LazyTensors.apply(m::DummyMapping{T,R,D}, v, I::Vararg{Any,R}) where {T,R,D} = :apply - @test range_dim(DummyMapping{Int,2,3}()) == 2 - @test domain_dim(DummyMapping{Int,2,3}()) == 3 - @test apply(DummyMapping{Int,2,3}(), zeros(Int, (0,0,0)),0,0) == :apply - @test eltype(DummyMapping{Int,2,3}()) == Int - @test eltype(DummyMapping{Float64,2,3}()) == Float64 -end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/LazyTensors/tensor_types_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,151 @@ +using Test +using Sbplib.LazyTensors +using BenchmarkTools + +@testset "IdentityTensor" begin + @test IdentityTensor{Float64}((4,5)) isa IdentityTensor{T,2} where T + @test IdentityTensor{Float64}((4,5)) isa LazyTensor{T,2,2} where T + @test IdentityTensor{Float64}((4,5)) == IdentityTensor{Float64}(4,5) + + @test IdentityTensor(3,2) isa IdentityTensor{Float64,2} + + for sz ∈ [(4,5),(3,),(5,6,4)] + I = IdentityTensor{Float64}(sz) + v = rand(sz...) + @test I*v == v + @test I'*v == v + + v = rand(ComplexF64,sz...) + @test I*v == v + @test I'*v == v + + @test range_size(I) == sz + @test domain_size(I) == sz + end + + I = IdentityTensor{Float64}((4,5)) + v = rand(4,5) + @inferred (I*v)[3,2] + @inferred (I'*v)[3,2] + @inferred range_size(I) + + @inferred range_dim(I) + @inferred domain_dim(I) + + à = rand(4,2) + A = DenseTensor(Ã,(1,),(2,)) + I1 = IdentityTensor{Float64}(2) + I2 = IdentityTensor{Float64}(4) + @test A∘I1 == A + @test I2∘A == A + @test I1∘I1 == I1 + @test_throws DomainSizeMismatch I1∘A + @test_throws DomainSizeMismatch A∘I2 + @test_throws DomainSizeMismatch I1∘I2 +end + + +@testset "ScalingTensor" begin + st = ScalingTensor(2.,(3,4)) + @test st isa LazyTensor{Float64, 2, 2} + @test range_size(st) == (3,4) + @test domain_size(st) == (3,4) + + v = rand(3,4) + @test st*v == 2.0 .* v + @test st'*v == 2.0 .* v + + @inferred (st*v)[2,2] + @inferred (st'*v)[2,2] +end + +@testset "DiagonalTensor" begin + @test DiagonalTensor([1,2,3,4]) isa LazyTensor{Int,1,1} + @test DiagonalTensor([1 2 3; 4 5 6]) isa LazyTensor{Int,2,2} + @test DiagonalTensor([1. 2. 3.; 4. 5. 6.]) isa LazyTensor{Float64,2,2} + + @test range_size(DiagonalTensor([1,2,3,4])) == (4,) + @test domain_size(DiagonalTensor([1,2,3,4])) == (4,) + + @test range_size(DiagonalTensor([1 2 3; 4 5 6])) == (2,3) + @test domain_size(DiagonalTensor([1 2 3; 4 5 6])) == (2,3) + + @testset "apply size=$sz" for sz ∈ [(4,),(3,2),(3,4,2)] + diag = rand(sz...) + tm = DiagonalTensor(diag) + + v = rand(sz...) + + @test tm*v == diag.*v + @test tm'*v == diag.*v + end + + + @testset "allocations size=$sz" for sz ∈ [(4,),(3,2),(3,4,2)] + diag = rand(sz...) + tm = DiagonalTensor(diag) + + v = rand(sz...) + + @test tm*v == diag.*v + @test tm'*v == diag.*v + end + + sz = (3,2) + diag = rand(sz...) + tm = DiagonalTensor(diag) + + v = rand(sz...) + LazyTensors.apply(tm,v, 2,1) + @test (@ballocated LazyTensors.apply($tm,$v, 2,1)) == 0 +end + + +@testset "DenseTensor" begin + # Test a standard matrix-vector product + # mapping vectors of size 4 to vectors of size 3. + A = rand(3,4) + à = DenseTensor(A, (1,), (2,)) + v = rand(4) + w = rand(3) + + @test à isa DenseTensor{T,1,1} where T + @test à isa LazyTensor{T,1,1} where T + @test range_size(Ã) == (3,) + @test domain_size(Ã) == (4,) + + @test Ã*ones(4) ≈ A*ones(4) atol=5e-13 + @test Ã*v ≈ A*v atol=5e-13 + @test Ã'*w ≈ A'*w + + A = rand(2,3,4) + @test_throws DomainError DenseTensor(A, (3,1), (2,)) + + # Test more exotic mappings + B = rand(3,4,2) + # Map vectors of size 2 to matrices of size (3,4) + B̃ = DenseTensor(B, (1,2), (3,)) + v = rand(2) + + @test range_size(B̃) == (3,4) + @test domain_size(B̃) == (2,) + @test B̃ isa LazyTensor{T,2,1} where T + @test B̃*ones(2) ≈ B[:,:,1] + B[:,:,2] atol=5e-13 + @test B̃*v ≈ B[:,:,1]*v[1] + B[:,:,2]*v[2] atol=5e-13 + + # Map matrices of size (3,2) to vectors of size 4 + B̃ = DenseTensor(B, (2,), (1,3)) + v = rand(3,2) + + @test range_size(B̃) == (4,) + @test domain_size(B̃) == (3,2) + @test B̃ isa LazyTensor{T,1,2} where T + @test B̃*ones(3,2) ≈ B[1,:,1] + B[2,:,1] + B[3,:,1] + + B[1,:,2] + B[2,:,2] + B[3,:,2] atol=5e-13 + @test B̃*v ≈ B[1,:,1]*v[1,1] + B[2,:,1]*v[2,1] + B[3,:,1]*v[3,1] + + B[1,:,2]v[1,2] + B[2,:,2]*v[2,2] + B[3,:,2]*v[3,2] atol=5e-13 + + + # TODO: + # @inferred (B̃*v)[2] +end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/LazyTensors/tuple_manipulation_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,62 @@ +using Test +using Sbplib.LazyTensors + +@testset "split_index" begin + @test LazyTensors.split_index(Val(2),Val(1),Val(2),Val(2),1,2,3,4,5,6) == ((1,2,:,5,6),(3,4)) + @test LazyTensors.split_index(Val(2),Val(3),Val(2),Val(2),1,2,3,4,5,6) == ((1,2,:,:,:,5,6),(3,4)) + @test LazyTensors.split_index(Val(3),Val(1),Val(1),Val(2),1,2,3,4,5,6) == ((1,2,3,:,5,6),(4,)) + @test LazyTensors.split_index(Val(3),Val(2),Val(1),Val(2),1,2,3,4,5,6) == ((1,2,3,:,:,5,6),(4,)) + @test LazyTensors.split_index(Val(1),Val(1),Val(2),Val(3),1,2,3,4,5,6) == ((1,:,4,5,6),(2,3)) + @test LazyTensors.split_index(Val(1),Val(2),Val(2),Val(3),1,2,3,4,5,6) == ((1,:,:,4,5,6),(2,3)) + + @test LazyTensors.split_index(Val(0),Val(1),Val(3),Val(3),1,2,3,4,5,6) == ((:,4,5,6),(1,2,3)) + @test LazyTensors.split_index(Val(3),Val(1),Val(3),Val(0),1,2,3,4,5,6) == ((1,2,3,:),(4,5,6)) + + @inferred LazyTensors.split_index(Val(2),Val(3),Val(2),Val(2),1,2,3,2,2,4) +end + +@testset "slice_tuple" begin + @test LazyTensors.slice_tuple((1,2,3),Val(1), Val(3)) == (1,2,3) + @test LazyTensors.slice_tuple((1,2,3,4,5,6),Val(2), Val(5)) == (2,3,4,5) + @test LazyTensors.slice_tuple((1,2,3,4,5,6),Val(1), Val(3)) == (1,2,3) + @test LazyTensors.slice_tuple((1,2,3,4,5,6),Val(4), Val(6)) == (4,5,6) +end + +@testset "split_tuple" begin + @testset "2 parts" begin + @test LazyTensors.split_tuple((),Val(0)) == ((),()) + @test LazyTensors.split_tuple((1,),Val(0)) == ((),(1,)) + @test LazyTensors.split_tuple((1,),Val(1)) == ((1,),()) + + @test LazyTensors.split_tuple((1,2,3,4),Val(0)) == ((),(1,2,3,4)) + @test LazyTensors.split_tuple((1,2,3,4),Val(1)) == ((1,),(2,3,4)) + @test LazyTensors.split_tuple((1,2,3,4),Val(2)) == ((1,2),(3,4)) + @test LazyTensors.split_tuple((1,2,3,4),Val(3)) == ((1,2,3),(4,)) + @test LazyTensors.split_tuple((1,2,3,4),Val(4)) == ((1,2,3,4),()) + + @test LazyTensors.split_tuple((1,2,true,4),Val(3)) == ((1,2,true),(4,)) + + @inferred LazyTensors.split_tuple((1,2,3,4),Val(3)) + @inferred LazyTensors.split_tuple((1,2,true,4),Val(3)) + end + + @testset "3 parts" begin + @test LazyTensors.split_tuple((),Val(0),Val(0)) == ((),(),()) + @test LazyTensors.split_tuple((1,2,3),Val(1), Val(1)) == ((1,),(2,),(3,)) + @test LazyTensors.split_tuple((1,true,3),Val(1), Val(1)) == ((1,),(true,),(3,)) + + @test LazyTensors.split_tuple((1,2,3,4,5,6),Val(1),Val(2)) == ((1,),(2,3),(4,5,6)) + @test LazyTensors.split_tuple((1,2,3,4,5,6),Val(3),Val(2)) == ((1,2,3),(4,5),(6,)) + + @inferred LazyTensors.split_tuple((1,2,3,4,5,6),Val(3),Val(2)) + @inferred LazyTensors.split_tuple((1,true,3),Val(1), Val(1)) + end +end + +@testset "flatten_tuple" begin + @test LazyTensors.flatten_tuple((1,)) == (1,) + @test LazyTensors.flatten_tuple((1,2,3,4,5,6)) == (1,2,3,4,5,6) + @test LazyTensors.flatten_tuple((1,2,(3,4),5,6)) == (1,2,3,4,5,6) + @test LazyTensors.flatten_tuple((1,2,(3,(4,5)),6)) == (1,2,3,4,5,6) + @test LazyTensors.flatten_tuple(((1,2),(3,4),(5,),6)) == (1,2,3,4,5,6) +end
--- a/test/Manifest.toml Mon Feb 21 10:33:58 2022 +0100 +++ b/test/Manifest.toml Fri Feb 03 22:14:47 2023 +0100 @@ -1,10 +1,12 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.7.0" +julia_version = "1.8.5" manifest_format = "2.0" +project_hash = "23260eda65ade7d11fffed313a68520d0bc053fc" [[deps.ArgTools]] uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" +version = "1.1.1" [[deps.Artifacts]] uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" @@ -12,27 +14,34 @@ [[deps.Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" +[[deps.BenchmarkTools]] +deps = ["JSON", "Logging", "Printf", "Profile", "Statistics", "UUIDs"] +git-tree-sha1 = "d9a9701b899b30332bbcb3e1679c41cce81fb0e8" +uuid = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" +version = "1.3.2" + [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra", "SparseArrays"] -git-tree-sha1 = "4c26b4e9e91ca528ea212927326ece5918a04b47" +git-tree-sha1 = "c6d890a52d2c4d55d326439580c3b8d0875a77d9" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "1.11.2" +version = "1.15.7" [[deps.ChangesOfVariables]] deps = ["ChainRulesCore", "LinearAlgebra", "Test"] -git-tree-sha1 = "bf98fa45a0a4cee295de98d4c1462be26345b9a1" +git-tree-sha1 = "844b061c104c408b24537482469400af6075aae4" uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" -version = "0.1.2" +version = "0.1.5" [[deps.Compat]] -deps = ["Base64", "Dates", "DelimitedFiles", "Distributed", "InteractiveUtils", "LibGit2", "Libdl", "LinearAlgebra", "Markdown", "Mmap", "Pkg", "Printf", "REPL", "Random", "SHA", "Serialization", "SharedArrays", "Sockets", "SparseArrays", "Statistics", "Test", "UUIDs", "Unicode"] -git-tree-sha1 = "44c37b4636bc54afac5c574d2d02b625349d6582" +deps = ["Dates", "LinearAlgebra", "UUIDs"] +git-tree-sha1 = "61fdd77467a5c3ad071ef8277ac6bd6af7dd4c04" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "3.41.0" +version = "4.6.0" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" +version = "1.0.1+0" [[deps.Dates]] deps = ["Printf"] @@ -43,15 +52,11 @@ uuid = "ab62b9b5-e342-54a8-a765-a90f495de1a6" version = "1.2.0" -[[deps.DelimitedFiles]] -deps = ["Mmap"] -uuid = "8bb1440f-4735-579b-a4ab-409b98df4dab" - [[deps.DiffRules]] -deps = ["LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "9bc5dac3c8b6706b58ad5ce24cffd9861f07c94f" +deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] +git-tree-sha1 = "c5b6685d53f933c11404a3ae9822afe30d522494" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.9.0" +version = "1.12.2" [[deps.Distributed]] deps = ["Random", "Serialization", "Sockets"] @@ -59,13 +64,17 @@ [[deps.DocStringExtensions]] deps = ["LibGit2"] -git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b" +git-tree-sha1 = "2fb1e02f2b635d0845df5d7c167fec4dd739b00d" uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -version = "0.8.6" +version = "0.9.3" [[deps.Downloads]] -deps = ["ArgTools", "LibCURL", "NetworkOptions"] +deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +version = "1.6.0" + +[[deps.FileWatching]] +uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" [[deps.Glob]] git-tree-sha1 = "4df9f7e06108728ebf00a0a11edee4b29a482bb2" @@ -78,9 +87,9 @@ [[deps.InverseFunctions]] deps = ["Test"] -git-tree-sha1 = "a7254c0acd8e62f1ac75ad24d5db43f5f19f3c65" +git-tree-sha1 = "49510dfcb407e572524ba94aeae2fced1f3feb0f" uuid = "3587e190-3f89-42d0-90ee-14403ec27112" -version = "0.1.2" +version = "0.1.8" [[deps.IrrationalConstants]] git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151" @@ -89,17 +98,25 @@ [[deps.JLLWrappers]] deps = ["Preferences"] -git-tree-sha1 = "642a199af8b68253517b80bd3bfd17eb4e84df6e" +git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1" uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" -version = "1.3.0" +version = "1.4.1" + +[[deps.JSON]] +deps = ["Dates", "Mmap", "Parsers", "Unicode"] +git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e" +uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" +version = "0.21.3" [[deps.LibCURL]] deps = ["LibCURL_jll", "MozillaCACerts_jll"] uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" +version = "0.6.3" [[deps.LibCURL_jll]] deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" +version = "7.84.0+0" [[deps.LibGit2]] deps = ["Base64", "NetworkOptions", "Printf", "SHA"] @@ -108,6 +125,7 @@ [[deps.LibSSH2_jll]] deps = ["Artifacts", "Libdl", "MbedTLS_jll"] uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" +version = "1.10.2+0" [[deps.Libdl]] uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" @@ -118,9 +136,9 @@ [[deps.LogExpFunctions]] deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "e5718a00af0ab9756305a0392832c8952c7426c1" +git-tree-sha1 = "45b288af6956e67e621c5cbb2d75a261ab58300b" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.6" +version = "0.3.20" [[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" @@ -132,28 +150,34 @@ [[deps.MbedTLS_jll]] deps = ["Artifacts", "Libdl"] uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" +version = "2.28.0+0" [[deps.Mmap]] uuid = "a63ad114-7e13-5084-954f-fe012c677804" [[deps.MozillaCACerts_jll]] uuid = "14a3606d-f60d-562e-9121-12d972cd8159" +version = "2022.2.1" [[deps.NaNMath]] -git-tree-sha1 = "f755f36b19a5116bb580de457cda0c140153f283" +deps = ["OpenLibm_jll"] +git-tree-sha1 = "a7c3d1da1189a1c2fe843a3bfa04d18d20eb3211" uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" -version = "0.3.6" +version = "1.0.1" [[deps.NetworkOptions]] uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" +version = "1.2.0" [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" +version = "0.3.20+0" [[deps.OpenLibm_jll]] deps = ["Artifacts", "Libdl"] uuid = "05823500-19ac-5b8b-9628-191a04bc5112" +version = "0.8.1+0" [[deps.OpenSpecFun_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] @@ -161,20 +185,31 @@ uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" version = "0.5.5+0" +[[deps.Parsers]] +deps = ["Dates", "SnoopPrecompile"] +git-tree-sha1 = "151d91d63d8d6c1a5789ecb7de51547e00480f1b" +uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" +version = "2.5.4" + [[deps.Pkg]] deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" +version = "1.8.0" [[deps.Preferences]] deps = ["TOML"] -git-tree-sha1 = "00cfd92944ca9c760982747e9a1d0d5d86ab1e5a" +git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" uuid = "21216c6a-2e73-6563-6e65-726566657250" -version = "1.2.2" +version = "1.3.0" [[deps.Printf]] deps = ["Unicode"] uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" +[[deps.Profile]] +deps = ["Printf"] +uuid = "9abbd945-dff8-562f-b5e8-e1ebf5ef1b79" + [[deps.REPL]] deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" @@ -185,19 +220,22 @@ [[deps.Requires]] deps = ["UUIDs"] -git-tree-sha1 = "8f82019e525f4d5c669692772a6f4b0a58b06a6a" +git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7" uuid = "ae029012-a4dd-5104-9daa-d747884805df" -version = "1.2.0" +version = "1.3.0" [[deps.SHA]] uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" +version = "0.7.0" [[deps.Serialization]] uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" -[[deps.SharedArrays]] -deps = ["Distributed", "Mmap", "Random", "Serialization"] -uuid = "1a1011a3-84de-559e-8e89-a11a2f7dc383" +[[deps.SnoopPrecompile]] +deps = ["Preferences"] +git-tree-sha1 = "e760a70afdcd461cf01a575947738d359234665c" +uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c" +version = "1.0.3" [[deps.Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" @@ -208,9 +246,9 @@ [[deps.SpecialFunctions]] deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] -git-tree-sha1 = "e08890d19787ec25029113e88c34ec20cac1c91e" +git-tree-sha1 = "d75bda01f8c31ebb72df80a46c88b25d1c79c56d" uuid = "276daf66-3868-5448-9aa4-cd146d93841b" -version = "2.0.0" +version = "2.1.7" [[deps.Statistics]] deps = ["LinearAlgebra", "SparseArrays"] @@ -219,10 +257,12 @@ [[deps.TOML]] deps = ["Dates"] uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" +version = "1.0.0" [[deps.Tar]] deps = ["ArgTools", "SHA"] uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" +version = "1.10.1" [[deps.Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] @@ -236,9 +276,9 @@ [[deps.Tullio]] deps = ["ChainRulesCore", "DiffRules", "LinearAlgebra", "Requires"] -git-tree-sha1 = "0288b7a395fc412952baf756fac94e4f28bfec65" +git-tree-sha1 = "7871a39eac745697ee512a87eeff06a048a7905b" uuid = "bc48ee85-29a4-5162-ae0b-a64e1601d4bc" -version = "0.3.2" +version = "0.3.5" [[deps.UUIDs]] deps = ["Random", "SHA"] @@ -250,15 +290,19 @@ [[deps.Zlib_jll]] deps = ["Libdl"] uuid = "83775a58-1f1d-513f-b197-d71354ab007a" +version = "1.2.12+3" [[deps.libblastrampoline_jll]] deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" +version = "5.1.1+0" [[deps.nghttp2_jll]] deps = ["Artifacts", "Libdl"] uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" +version = "1.48.0+0" [[deps.p7zip_jll]] deps = ["Artifacts", "Libdl"] uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" +version = "17.4.0+0"
--- a/test/Project.toml Mon Feb 21 10:33:58 2022 +0100 +++ b/test/Project.toml Fri Feb 03 22:14:47 2023 +0100 @@ -1,4 +1,5 @@ [deps] +BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf" Glob = "c27321d9-0574-5035-807b-f59d2c89b15c" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" TOML = "fa267f1f-6049-4f14-aa54-33bafae1ed76"
--- a/test/SbpOperators/boundaryops/boundary_operator_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/boundaryops/boundary_operator_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -6,126 +6,58 @@ using Sbplib.RegionIndices import Sbplib.SbpOperators.Stencil import Sbplib.SbpOperators.BoundaryOperator -import Sbplib.SbpOperators.boundary_operator + @testset "BoundaryOperator" begin - closure_stencil = Stencil((0,2), (2.,1.,3.)) + closure_stencil = Stencil(2.,1.,3.; center = 1) g_1D = EquidistantGrid(11, 0.0, 1.0) g_2D = EquidistantGrid((11,15), (0.0, 0.0), (1.0,1.0)) @testset "Constructors" begin - @testset "1D" begin - op_l = BoundaryOperator{Lower}(closure_stencil,size(g_1D)[1]) - @test op_l == BoundaryOperator(g_1D,closure_stencil,Lower()) - @test op_l == boundary_operator(g_1D,closure_stencil,CartesianBoundary{1,Lower}()) - @test op_l isa TensorMapping{T,0,1} where T - - op_r = BoundaryOperator{Upper}(closure_stencil,size(g_1D)[1]) - @test op_r == BoundaryOperator(g_1D,closure_stencil,Upper()) - @test op_r == boundary_operator(g_1D,closure_stencil,CartesianBoundary{1,Upper}()) - @test op_r isa TensorMapping{T,0,1} where T - end - - @testset "2D" begin - e_w = boundary_operator(g_2D,closure_stencil,CartesianBoundary{1,Upper}()) - @test e_w isa InflatedTensorMapping - @test e_w isa TensorMapping{T,1,2} where T - end + @test BoundaryOperator(g_1D, closure_stencil, Lower()) isa LazyTensor{T,0,1} where T + @test BoundaryOperator(g_1D, closure_stencil, Upper()) isa LazyTensor{T,0,1} where T end - op_l = boundary_operator(g_1D, closure_stencil, CartesianBoundary{1,Lower}()) - op_r = boundary_operator(g_1D, closure_stencil, CartesianBoundary{1,Upper}()) - - op_w = boundary_operator(g_2D, closure_stencil, CartesianBoundary{1,Lower}()) - op_e = boundary_operator(g_2D, closure_stencil, CartesianBoundary{1,Upper}()) - op_s = boundary_operator(g_2D, closure_stencil, CartesianBoundary{2,Lower}()) - op_n = boundary_operator(g_2D, closure_stencil, CartesianBoundary{2,Upper}()) + op_l = BoundaryOperator(g_1D, closure_stencil, Lower()) + op_r = BoundaryOperator(g_1D, closure_stencil, Upper()) @testset "Sizes" begin - @testset "1D" begin - @test domain_size(op_l) == (11,) - @test domain_size(op_r) == (11,) - - @test range_size(op_l) == () - @test range_size(op_r) == () - end + @test domain_size(op_l) == (11,) + @test domain_size(op_r) == (11,) - @testset "2D" begin - @test domain_size(op_w) == (11,15) - @test domain_size(op_e) == (11,15) - @test domain_size(op_s) == (11,15) - @test domain_size(op_n) == (11,15) - - @test range_size(op_w) == (15,) - @test range_size(op_e) == (15,) - @test range_size(op_s) == (11,) - @test range_size(op_n) == (11,) - end + @test range_size(op_l) == () + @test range_size(op_r) == () end @testset "Application" begin - @testset "1D" begin - v = evalOn(g_1D,x->1+x^2) - u = fill(3.124) - @test (op_l*v)[] == 2*v[1] + v[2] + 3*v[3] - @test (op_r*v)[] == 2*v[end] + v[end-1] + 3*v[end-2] - @test (op_r*v)[1] == 2*v[end] + v[end-1] + 3*v[end-2] - @test op_l'*u == [2*u[]; u[]; 3*u[]; zeros(8)] - @test op_r'*u == [zeros(8); 3*u[]; u[]; 2*u[]] - end + v = evalOn(g_1D,x->1+x^2) + u = fill(3.124) + @test (op_l*v)[] == 2*v[1] + v[2] + 3*v[3] + @test (op_r*v)[] == 2*v[end] + v[end-1] + 3*v[end-2] + @test (op_r*v)[1] == 2*v[end] + v[end-1] + 3*v[end-2] + @test op_l'*u == [2*u[]; u[]; 3*u[]; zeros(8)] + @test op_r'*u == [zeros(8); 3*u[]; u[]; 2*u[]] - @testset "2D" begin - v = rand(size(g_2D)...) - u = fill(3.124) - @test op_w*v ≈ 2*v[1,:] + v[2,:] + 3*v[3,:] rtol = 1e-14 - @test op_e*v ≈ 2*v[end,:] + v[end-1,:] + 3*v[end-2,:] rtol = 1e-14 - @test op_s*v ≈ 2*v[:,1] + v[:,2] + 3*v[:,3] rtol = 1e-14 - @test op_n*v ≈ 2*v[:,end] + v[:,end-1] + 3*v[:,end-2] rtol = 1e-14 - - - g_x = rand(size(g_2D)[1]) - g_y = rand(size(g_2D)[2]) - - G_w = zeros(Float64, size(g_2D)...) - G_w[1,:] = 2*g_y - G_w[2,:] = g_y - G_w[3,:] = 3*g_y + v = evalOn(g_1D, x->1. +x*im) + @test (op_l*v)[] isa ComplexF64 - G_e = zeros(Float64, size(g_2D)...) - G_e[end,:] = 2*g_y - G_e[end-1,:] = g_y - G_e[end-2,:] = 3*g_y - - G_s = zeros(Float64, size(g_2D)...) - G_s[:,1] = 2*g_x - G_s[:,2] = g_x - G_s[:,3] = 3*g_x - - G_n = zeros(Float64, size(g_2D)...) - G_n[:,end] = 2*g_x - G_n[:,end-1] = g_x - G_n[:,end-2] = 3*g_x + u = fill(1. +im) + @test (op_l'*u)[1] isa ComplexF64 + @test (op_l'*u)[5] isa ComplexF64 + @test (op_l'*u)[11] isa ComplexF64 - @test op_w'*g_y == G_w - @test op_e'*g_y == G_e - @test op_s'*g_x == G_s - @test op_n'*g_x == G_n - end + u = fill(3.124) + @test (op_l'*u)[Index(1,Lower)] == 2*u[] + @test (op_l'*u)[Index(2,Lower)] == u[] + @test (op_l'*u)[Index(6,Interior)] == 0 + @test (op_l'*u)[Index(10,Upper)] == 0 + @test (op_l'*u)[Index(11,Upper)] == 0 - @testset "Regions" begin - u = fill(3.124) - @test (op_l'*u)[Index(1,Lower)] == 2*u[] - @test (op_l'*u)[Index(2,Lower)] == u[] - @test (op_l'*u)[Index(6,Interior)] == 0 - @test (op_l'*u)[Index(10,Upper)] == 0 - @test (op_l'*u)[Index(11,Upper)] == 0 - - @test (op_r'*u)[Index(1,Lower)] == 0 - @test (op_r'*u)[Index(2,Lower)] == 0 - @test (op_r'*u)[Index(6,Interior)] == 0 - @test (op_r'*u)[Index(10,Upper)] == u[] - @test (op_r'*u)[Index(11,Upper)] == 2*u[] - end + @test (op_r'*u)[Index(1,Lower)] == 0 + @test (op_r'*u)[Index(2,Lower)] == 0 + @test (op_r'*u)[Index(6,Interior)] == 0 + @test (op_r'*u)[Index(10,Upper)] == u[] + @test (op_r'*u)[Index(11,Upper)] == 2*u[] end @testset "Inferred" begin
--- a/test/SbpOperators/boundaryops/boundary_restriction_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/boundaryops/boundary_restriction_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -2,10 +2,9 @@ using Sbplib.SbpOperators using Sbplib.Grids +using Sbplib.LazyTensors using Sbplib.RegionIndices -using Sbplib.LazyTensors - -import Sbplib.SbpOperators.BoundaryOperator +using Sbplib.SbpOperators: BoundaryOperator, Stencil @testset "boundary_restriction" begin stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order = 4) @@ -15,31 +14,30 @@ @testset "boundary_restriction" begin @testset "1D" begin - e_l = boundary_restriction(g_1D,e_closure,Lower()) - @test e_l == boundary_restriction(g_1D,e_closure,CartesianBoundary{1,Lower}()) + e_l = boundary_restriction(g_1D,e_closure,CartesianBoundary{1,Lower}()) + @test e_l == boundary_restriction(g_1D,stencil_set,CartesianBoundary{1,Lower}()) @test e_l == BoundaryOperator(g_1D,Stencil{Float64}(e_closure),Lower()) @test e_l isa BoundaryOperator{T,Lower} where T - @test e_l isa TensorMapping{T,0,1} where T + @test e_l isa LazyTensor{T,0,1} where T - e_r = boundary_restriction(g_1D,e_closure,Upper()) - @test e_r == boundary_restriction(g_1D,e_closure,CartesianBoundary{1,Upper}()) + e_r = boundary_restriction(g_1D,e_closure,CartesianBoundary{1,Upper}()) + @test e_r == boundary_restriction(g_1D,stencil_set,CartesianBoundary{1,Upper}()) @test e_r == BoundaryOperator(g_1D,Stencil{Float64}(e_closure),Upper()) @test e_r isa BoundaryOperator{T,Upper} where T - @test e_r isa TensorMapping{T,0,1} where T + @test e_r isa LazyTensor{T,0,1} where T end @testset "2D" begin e_w = boundary_restriction(g_2D,e_closure,CartesianBoundary{1,Upper}()) - @test e_w isa InflatedTensorMapping - @test e_w isa TensorMapping{T,1,2} where T + @test e_w == boundary_restriction(g_2D,stencil_set,CartesianBoundary{1,Upper}()) + @test e_w isa InflatedTensor + @test e_w isa LazyTensor{T,1,2} where T end end @testset "Application" begin @testset "1D" begin - e_l = boundary_restriction(g_1D, e_closure, CartesianBoundary{1,Lower}()) - e_r = boundary_restriction(g_1D, e_closure, CartesianBoundary{1,Upper}()) - + e_l, e_r = boundary_restriction.(Ref(g_1D), Ref(e_closure), boundary_identifiers(g_1D)) v = evalOn(g_1D,x->1+x^2) u = fill(3.124) @@ -49,11 +47,7 @@ end @testset "2D" begin - e_w = boundary_restriction(g_2D, e_closure, CartesianBoundary{1,Lower}()) - e_e = boundary_restriction(g_2D, e_closure, CartesianBoundary{1,Upper}()) - e_s = boundary_restriction(g_2D, e_closure, CartesianBoundary{2,Lower}()) - e_n = boundary_restriction(g_2D, e_closure, CartesianBoundary{2,Upper}()) - + e_w, e_e, e_s, e_n = boundary_restriction.(Ref(g_2D), Ref(e_closure), boundary_identifiers(g_2D)) v = rand(11, 15) u = fill(3.124)
--- a/test/SbpOperators/boundaryops/normal_derivative_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/boundaryops/normal_derivative_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -2,9 +2,8 @@ using Sbplib.SbpOperators using Sbplib.Grids +using Sbplib.LazyTensors using Sbplib.RegionIndices -using Sbplib.LazyTensors - import Sbplib.SbpOperators.BoundaryOperator @testset "normal_derivative" begin @@ -14,22 +13,23 @@ stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=4) d_closure = parse_stencil(stencil_set["d1"]["closure"]) @testset "1D" begin - d_l = normal_derivative(g_1D, d_closure, Lower()) - @test d_l == normal_derivative(g_1D, d_closure, CartesianBoundary{1,Lower}()) + d_l = normal_derivative(g_1D, d_closure, CartesianBoundary{1,Lower}()) + @test d_l == normal_derivative(g_1D, stencil_set, CartesianBoundary{1,Lower}()) @test d_l isa BoundaryOperator{T,Lower} where T - @test d_l isa TensorMapping{T,0,1} where T + @test d_l isa LazyTensor{T,0,1} where T end @testset "2D" begin d_w = normal_derivative(g_2D, d_closure, CartesianBoundary{1,Lower}()) d_n = normal_derivative(g_2D, d_closure, CartesianBoundary{2,Upper}()) - Ix = IdentityMapping{Float64}((size(g_2D)[1],)) - Iy = IdentityMapping{Float64}((size(g_2D)[2],)) - d_l = normal_derivative(restrict(g_2D,1),d_closure,Lower()) - d_r = normal_derivative(restrict(g_2D,2),d_closure,Upper()) + Ix = IdentityTensor{Float64}((size(g_2D)[1],)) + Iy = IdentityTensor{Float64}((size(g_2D)[2],)) + d_l = normal_derivative(restrict(g_2D,1),d_closure,CartesianBoundary{1,Lower}()) + d_r = normal_derivative(restrict(g_2D,2),d_closure,CartesianBoundary{1,Upper}()) + @test d_w == normal_derivative(g_2D, stencil_set, CartesianBoundary{1,Lower}()) @test d_w == d_l⊗Iy @test d_n == Ix⊗d_r - @test d_w isa TensorMapping{T,1,2} where T - @test d_n isa TensorMapping{T,1,2} where T + @test d_w isa LazyTensor{T,1,2} where T + @test d_n isa LazyTensor{T,1,2} where T end end @testset "Accuracy" begin @@ -40,29 +40,23 @@ @testset "2nd order" begin stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=2) d_closure = parse_stencil(stencil_set["d1"]["closure"]) - d_w = normal_derivative(g_2D, d_closure, CartesianBoundary{1,Lower}()) - d_e = normal_derivative(g_2D, d_closure, CartesianBoundary{1,Upper}()) - d_s = normal_derivative(g_2D, d_closure, CartesianBoundary{2,Lower}()) - d_n = normal_derivative(g_2D, d_closure, CartesianBoundary{2,Upper}()) + d_w, d_e, d_s, d_n = normal_derivative.(Ref(g_2D), Ref(d_closure), boundary_identifiers(g_2D)) - @test d_w*v ≈ v∂x[1,:] atol = 1e-13 - @test d_e*v ≈ -v∂x[end,:] atol = 1e-13 - @test d_s*v ≈ v∂y[:,1] atol = 1e-13 - @test d_n*v ≈ -v∂y[:,end] atol = 1e-13 + @test d_w*v ≈ -v∂x[1,:] atol = 1e-13 + @test d_e*v ≈ v∂x[end,:] atol = 1e-13 + @test d_s*v ≈ -v∂y[:,1] atol = 1e-13 + @test d_n*v ≈ v∂y[:,end] atol = 1e-13 end @testset "4th order" begin - stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=2) + stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=4) d_closure = parse_stencil(stencil_set["d1"]["closure"]) - d_w = normal_derivative(g_2D, d_closure, CartesianBoundary{1,Lower}()) - d_e = normal_derivative(g_2D, d_closure, CartesianBoundary{1,Upper}()) - d_s = normal_derivative(g_2D, d_closure, CartesianBoundary{2,Lower}()) - d_n = normal_derivative(g_2D, d_closure, CartesianBoundary{2,Upper}()) - - @test d_w*v ≈ v∂x[1,:] atol = 1e-13 - @test d_e*v ≈ -v∂x[end,:] atol = 1e-13 - @test d_s*v ≈ v∂y[:,1] atol = 1e-13 - @test d_n*v ≈ -v∂y[:,end] atol = 1e-13 + d_w, d_e, d_s, d_n = normal_derivative.(Ref(g_2D), Ref(d_closure), boundary_identifiers(g_2D)) + + @test d_w*v ≈ -v∂x[1,:] atol = 1e-13 + @test d_e*v ≈ v∂x[end,:] atol = 1e-13 + @test d_s*v ≈ -v∂y[:,1] atol = 1e-13 + @test d_n*v ≈ v∂y[:,end] atol = 1e-13 end end end
--- a/test/SbpOperators/readoperator_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ /dev/null Thu Jan 01 00:00:00 1970 +0000 @@ -1,172 +0,0 @@ -using Test - -using TOML -using Sbplib.SbpOperators - -import Sbplib.SbpOperators.Stencil - -@testset "readoperator" begin - toml_str = """ - [meta] - authors = "Ken Mattson" - description = "Standard operators for equidistant grids" - type = "equidistant" - cite = "A paper a long time ago in a galaxy far far away." - - [[stencil_set]] - - order = 2 - test = 2 - - H.inner = ["1"] - H.closure = ["1/2"] - - D1.inner_stencil = ["-1/2", "0", "1/2"] - D1.closure_stencils = [ - {s = ["-1", "1"], c = 1}, - ] - - D2.inner_stencil = ["1", "-2", "1"] - D2.closure_stencils = [ - {s = ["1", "-2", "1"], c = 1}, - ] - - e.closure = ["1"] - d1.closure = {s = ["-3/2", "2", "-1/2"], c = 1} - - [[stencil_set]] - - order = 4 - test = 1 - H.inner = ["1"] - H.closure = ["17/48", "59/48", "43/48", "49/48"] - - D2.inner_stencil = ["-1/12","4/3","-5/2","4/3","-1/12"] - D2.closure_stencils = [ - {s = [ "2", "-5", "4", "-1", "0", "0"], c = 1}, - {s = [ "1", "-2", "1", "0", "0", "0"], c = 2}, - {s = [ "-4/43", "59/43", "-110/43", "59/43", "-4/43", "0"], c = 3}, - {s = [ "-1/49", "0", "59/49", "-118/49", "64/49", "-4/49"], c = 4}, - ] - - e.closure = ["1"] - d1.closure = {s = ["-11/6", "3", "-3/2", "1/3"], c = 1} - - [[stencil_set]] - order = 4 - test = 2 - - H.closure = ["-1/49", "0", "59/49", "-118/49", "64/49", "-4/49"] - """ - - parsed_toml = TOML.parse(toml_str) - - @testset "get_stencil_set" begin - @test get_stencil_set(parsed_toml; order = 2) isa Dict - @test get_stencil_set(parsed_toml; order = 2) == parsed_toml["stencil_set"][1] - @test get_stencil_set(parsed_toml; test = 1) == parsed_toml["stencil_set"][2] - @test get_stencil_set(parsed_toml; order = 4, test = 2) == parsed_toml["stencil_set"][3] - - @test_throws ArgumentError get_stencil_set(parsed_toml; test = 2) - @test_throws ArgumentError get_stencil_set(parsed_toml; order = 4) - end - - @testset "parse_stencil" begin - toml = """ - s1 = ["-1/12","4/3","-5/2","4/3","-1/12"] - s2 = {s = ["2", "-5", "4", "-1", "0", "0"], c = 1} - s3 = {s = ["1", "-2", "1", "0", "0", "0"], c = 2} - s4 = "not a stencil" - s5 = [-1, 4, 3] - s6 = {k = ["1", "-2", "1", "0", "0", "0"], c = 2} - s7 = {s = [-1, 4, 3], c = 2} - s8 = {s = ["1", "-2", "1", "0", "0", "0"], c = [2,2]} - """ - - @test parse_stencil(TOML.parse(toml)["s1"]) == CenteredStencil(-1//12, 4//3, -5//2, 4//3, -1//12) - @test parse_stencil(TOML.parse(toml)["s2"]) == Stencil(2//1, -5//1, 4//1, -1//1, 0//1, 0//1; center=1) - @test parse_stencil(TOML.parse(toml)["s3"]) == Stencil(1//1, -2//1, 1//1, 0//1, 0//1, 0//1; center=2) - - @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s4"]) - @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s5"]) - @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s6"]) - @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s7"]) - @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s8"]) - - stencil_set = get_stencil_set(parsed_toml; order = 4, test = 1) - - @test parse_stencil.(stencil_set["D2"]["closure_stencils"]) == [ - Stencil( 2//1, -5//1, 4//1, -1//1, 0//1, 0//1; center=1), - Stencil( 1//1, -2//1, 1//1, 0//1, 0//1, 0//1; center=2), - Stencil(-4//43, 59//43, -110//43, 59//43, -4//43, 0//1; center=3), - Stencil(-1//49, 0//1, 59//49, -118//49, 64//49, -4//49; center=4), - ] - - - @test parse_stencil(Float64, TOML.parse(toml)["s1"]) == CenteredStencil(-1/12, 4/3, -5/2, 4/3, -1/12) - @test parse_stencil(Float64, TOML.parse(toml)["s2"]) == Stencil(2/1, -5/1, 4/1, -1/1, 0/1, 0/1; center=1) - @test parse_stencil(Float64, TOML.parse(toml)["s3"]) == Stencil(1/1, -2/1, 1/1, 0/1, 0/1, 0/1; center=2) - end - - @testset "parse_scalar" begin - toml = TOML.parse(""" - a1 = 1 - a2 = 1.5 - a3 = 1.0 - a4 = 10 - a5 = "1/2" - a6 = "1.5" - - e1 = [1,2,3] - e2 = "a string value" - """) - - @test parse_scalar(toml["a1"]) == 1//1 - @test parse_scalar(toml["a2"]) == 3//2 - @test parse_scalar(toml["a3"]) == 1//1 - @test parse_scalar(toml["a4"]) == 10//1 - @test parse_scalar(toml["a5"]) == 1//2 - @test parse_scalar(toml["a6"]) == 3//2 - - @test_throws ArgumentError parse_scalar(toml["e1"]) - @test_throws ArgumentError parse_scalar(toml["e2"]) - end - - @testset "parse_tuple" begin - toml = TOML.parse(""" - t1 = [1,3,4] - t2 = ["1/2","3/4","2/1"] - - e1 = "not a tuple" - e2.a="1" - e3 = 1 - e4 = ["1/2","3/4","not a number"] - """) - - @test parse_tuple(toml["t1"]) == (1//1,3//1,4//1) - @test parse_tuple(toml["t2"]) == (1//2,3//4,2//1) - - @test_throws ArgumentError parse_tuple(toml["e1"]) - @test_throws ArgumentError parse_tuple(toml["e2"]) - @test_throws ArgumentError parse_tuple(toml["e3"]) - @test_throws ArgumentError parse_tuple(toml["e4"]) - end -end - -@testset "parse_rational" begin - @test SbpOperators.parse_rational("1") isa Rational - @test SbpOperators.parse_rational("1") == 1//1 - @test SbpOperators.parse_rational("1/2") isa Rational - @test SbpOperators.parse_rational("1/2") == 1//2 - @test SbpOperators.parse_rational("37/13") isa Rational - @test SbpOperators.parse_rational("37/13") == 37//13 - - @test SbpOperators.parse_rational(0.5) isa Rational - @test SbpOperators.parse_rational(0.5) == 1//2 - - @test SbpOperators.parse_rational("0.5") isa Rational - @test SbpOperators.parse_rational("0.5") == 1//2 - - @test SbpOperators.parse_rational(2) isa Rational - @test SbpOperators.parse_rational(2) == 2//1 -end
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/SbpOperators/stencil_set_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,172 @@ +using Test + +using TOML +using Sbplib.SbpOperators + +import Sbplib.SbpOperators.Stencil + +@testset "readoperator" begin + toml_str = """ + [meta] + authors = "Ken Mattson" + description = "Standard operators for equidistant grids" + type = "equidistant" + cite = "A paper a long time ago in a galaxy far far away." + + [[stencil_set]] + + order = 2 + test = 2 + + H.inner = ["1"] + H.closure = ["1/2"] + + D1.inner_stencil = ["-1/2", "0", "1/2"] + D1.closure_stencils = [ + {s = ["-1", "1"], c = 1}, + ] + + D2.inner_stencil = ["1", "-2", "1"] + D2.closure_stencils = [ + {s = ["1", "-2", "1"], c = 1}, + ] + + e.closure = ["1"] + d1.closure = {s = ["-3/2", "2", "-1/2"], c = 1} + + [[stencil_set]] + + order = 4 + test = 1 + H.inner = ["1"] + H.closure = ["17/48", "59/48", "43/48", "49/48"] + + D2.inner_stencil = ["-1/12","4/3","-5/2","4/3","-1/12"] + D2.closure_stencils = [ + {s = [ "2", "-5", "4", "-1", "0", "0"], c = 1}, + {s = [ "1", "-2", "1", "0", "0", "0"], c = 2}, + {s = [ "-4/43", "59/43", "-110/43", "59/43", "-4/43", "0"], c = 3}, + {s = [ "-1/49", "0", "59/49", "-118/49", "64/49", "-4/49"], c = 4}, + ] + + e.closure = ["1"] + d1.closure = {s = ["-11/6", "3", "-3/2", "1/3"], c = 1} + + [[stencil_set]] + order = 4 + test = 2 + + H.closure = ["-1/49", "0", "59/49", "-118/49", "64/49", "-4/49"] + """ + + parsed_toml = TOML.parse(toml_str) + + @testset "get_stencil_set" begin + @test get_stencil_set(parsed_toml; order = 2) isa Dict + @test get_stencil_set(parsed_toml; order = 2) == parsed_toml["stencil_set"][1] + @test get_stencil_set(parsed_toml; test = 1) == parsed_toml["stencil_set"][2] + @test get_stencil_set(parsed_toml; order = 4, test = 2) == parsed_toml["stencil_set"][3] + + @test_throws ArgumentError get_stencil_set(parsed_toml; test = 2) + @test_throws ArgumentError get_stencil_set(parsed_toml; order = 4) + end + + @testset "parse_stencil" begin + toml = """ + s1 = ["-1/12","4/3","-5/2","4/3","-1/12"] + s2 = {s = ["2", "-5", "4", "-1", "0", "0"], c = 1} + s3 = {s = ["1", "-2", "1", "0", "0", "0"], c = 2} + s4 = "not a stencil" + s5 = [-1, 4, 3] + s6 = {k = ["1", "-2", "1", "0", "0", "0"], c = 2} + s7 = {s = [-1, 4, 3], c = 2} + s8 = {s = ["1", "-2", "1", "0", "0", "0"], c = [2,2]} + """ + + @test parse_stencil(TOML.parse(toml)["s1"]) == CenteredStencil(-1//12, 4//3, -5//2, 4//3, -1//12) + @test parse_stencil(TOML.parse(toml)["s2"]) == Stencil(2//1, -5//1, 4//1, -1//1, 0//1, 0//1; center=1) + @test parse_stencil(TOML.parse(toml)["s3"]) == Stencil(1//1, -2//1, 1//1, 0//1, 0//1, 0//1; center=2) + + @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s4"]) + @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s5"]) + @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s6"]) + @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s7"]) + @test_throws ArgumentError parse_stencil(TOML.parse(toml)["s8"]) + + stencil_set = get_stencil_set(parsed_toml; order = 4, test = 1) + + @test parse_stencil.(stencil_set["D2"]["closure_stencils"]) == [ + Stencil( 2//1, -5//1, 4//1, -1//1, 0//1, 0//1; center=1), + Stencil( 1//1, -2//1, 1//1, 0//1, 0//1, 0//1; center=2), + Stencil(-4//43, 59//43, -110//43, 59//43, -4//43, 0//1; center=3), + Stencil(-1//49, 0//1, 59//49, -118//49, 64//49, -4//49; center=4), + ] + + + @test parse_stencil(Float64, TOML.parse(toml)["s1"]) == CenteredStencil(-1/12, 4/3, -5/2, 4/3, -1/12) + @test parse_stencil(Float64, TOML.parse(toml)["s2"]) == Stencil(2/1, -5/1, 4/1, -1/1, 0/1, 0/1; center=1) + @test parse_stencil(Float64, TOML.parse(toml)["s3"]) == Stencil(1/1, -2/1, 1/1, 0/1, 0/1, 0/1; center=2) + end + + @testset "parse_scalar" begin + toml = TOML.parse(""" + a1 = 1 + a2 = 1.5 + a3 = 1.0 + a4 = 10 + a5 = "1/2" + a6 = "1.5" + + e1 = [1,2,3] + e2 = "a string value" + """) + + @test parse_scalar(toml["a1"]) == 1//1 + @test parse_scalar(toml["a2"]) == 3//2 + @test parse_scalar(toml["a3"]) == 1//1 + @test parse_scalar(toml["a4"]) == 10//1 + @test parse_scalar(toml["a5"]) == 1//2 + @test parse_scalar(toml["a6"]) == 3//2 + + @test_throws ArgumentError parse_scalar(toml["e1"]) + @test_throws ArgumentError parse_scalar(toml["e2"]) + end + + @testset "parse_tuple" begin + toml = TOML.parse(""" + t1 = [1,3,4] + t2 = ["1/2","3/4","2/1"] + + e1 = "not a tuple" + e2.a="1" + e3 = 1 + e4 = ["1/2","3/4","not a number"] + """) + + @test parse_tuple(toml["t1"]) == (1//1,3//1,4//1) + @test parse_tuple(toml["t2"]) == (1//2,3//4,2//1) + + @test_throws ArgumentError parse_tuple(toml["e1"]) + @test_throws ArgumentError parse_tuple(toml["e2"]) + @test_throws ArgumentError parse_tuple(toml["e3"]) + @test_throws ArgumentError parse_tuple(toml["e4"]) + end +end + +@testset "parse_rational" begin + @test SbpOperators.parse_rational("1") isa Rational + @test SbpOperators.parse_rational("1") == 1//1 + @test SbpOperators.parse_rational("1/2") isa Rational + @test SbpOperators.parse_rational("1/2") == 1//2 + @test SbpOperators.parse_rational("37/13") isa Rational + @test SbpOperators.parse_rational("37/13") == 37//13 + + @test SbpOperators.parse_rational(0.5) isa Rational + @test SbpOperators.parse_rational(0.5) == 1//2 + + @test SbpOperators.parse_rational("0.5") isa Rational + @test SbpOperators.parse_rational("0.5") == 1//2 + + @test SbpOperators.parse_rational(2) isa Rational + @test SbpOperators.parse_rational(2) == 2//1 +end
--- a/test/SbpOperators/stencil_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/stencil_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -1,19 +1,25 @@ using Test using Sbplib.SbpOperators import Sbplib.SbpOperators.Stencil +import Sbplib.SbpOperators.NestedStencil +import Sbplib.SbpOperators.scale @testset "Stencil" begin - s = Stencil((-2,2), (1.,2.,2.,3.,4.)) + s = Stencil(-2:2, (1.,2.,2.,3.,4.)) @test s isa Stencil{Float64, 5} @test eltype(s) == Float64 - @test SbpOperators.scale(s, 2) == Stencil((-2,2), (2.,4.,4.,6.,8.)) + + @test length(s) == 5 + @test length(Stencil(-1:2, (1,2,3,4))) == 4 + + @test SbpOperators.scale(s, 2) == Stencil(-2:2, (2.,4.,4.,6.,8.)) - @test Stencil(1,2,3,4; center=1) == Stencil((0, 3),(1,2,3,4)) - @test Stencil(1,2,3,4; center=2) == Stencil((-1, 2),(1,2,3,4)) - @test Stencil(1,2,3,4; center=4) == Stencil((-3, 0),(1,2,3,4)) + @test Stencil(1,2,3,4; center=1) == Stencil(0:3,(1,2,3,4)) + @test Stencil(1,2,3,4; center=2) == Stencil(-1:2,(1,2,3,4)) + @test Stencil(1,2,3,4; center=4) == Stencil(-3:0,(1,2,3,4)) - @test CenteredStencil(1,2,3,4,5) == Stencil((-2, 2), (1,2,3,4,5)) + @test CenteredStencil(1,2,3,4,5) == Stencil(-2:2, (1,2,3,4,5)) @test_throws ArgumentError CenteredStencil(1,2,3,4) # Changing the type of the weights @@ -24,8 +30,145 @@ @testset "convert" begin @test convert(Stencil{Float64}, Stencil(1,2,3,4,5; center=2)) == Stencil(1.,2.,3.,4.,5.; center=2) - @test convert(Stencil{Float64}, CenteredStencil(1,2,3,4,5)) == CenteredStencil(1.,2.,3.,4.,5.) - @test convert(Stencil{Int}, Stencil(1.,2.,3.,4.,5.; center=2)) == Stencil(1,2,3,4,5; center=2) - @test convert(Stencil{Rational}, Stencil(1.,2.,3.,4.,5.; center=2)) == Stencil(1//1,2//1,3//1,4//1,5//1; center=2) + @test convert(Stencil{Float64,5}, CenteredStencil(1,2,3,4,5)) == CenteredStencil(1.,2.,3.,4.,5.) + @test convert(Stencil{Int,5}, Stencil(1.,2.,3.,4.,5.; center=2)) == Stencil(1,2,3,4,5; center=2) + @test convert(Stencil{Rational,5}, Stencil(1.,2.,3.,4.,5.; center=2)) == Stencil(1//1,2//1,3//1,4//1,5//1; center=2) + end + + @testset "promotion of weights" begin + @test Stencil(1.,2; center = 1) isa Stencil{Float64, 2} + @test Stencil(1,2//2; center = 1) isa Stencil{Rational{Int64}, 2} + end + + @testset "promotion" begin + @test promote(Stencil(1,1;center=1), Stencil(2.,2.;center=2)) == (Stencil(1.,1.;center=1), Stencil(2.,2.;center=2)) + end + + @testset "type stability" begin + s_int = CenteredStencil(1,2,3) + s_float = CenteredStencil(1.,2.,3.) + v_int = rand(1:10,10); + v_float = rand(10); + + @inferred SbpOperators.apply_stencil(s_int, v_int, 2) + @inferred SbpOperators.apply_stencil(s_float, v_float, 2) + @inferred SbpOperators.apply_stencil(s_int, v_float, 2) + @inferred SbpOperators.apply_stencil(s_float, v_int, 2) + + @inferred SbpOperators.apply_stencil_backwards(s_int, v_int, 5) + @inferred SbpOperators.apply_stencil_backwards(s_float, v_float, 5) + @inferred SbpOperators.apply_stencil_backwards(s_int, v_float, 5) + @inferred SbpOperators.apply_stencil_backwards(s_float, v_int, 5) end end + +@testset "NestedStencil" begin + + @testset "Constructors" begin + s1 = CenteredStencil(-1, 1, 0) + s2 = CenteredStencil(-1, 0, 1) + s3 = CenteredStencil( 0,-1, 1) + + ns = NestedStencil(CenteredStencil(s1,s2,s3)) + @test ns isa NestedStencil{Int,3} + + @test CenteredNestedStencil(s1,s2,s3) == ns + + @test NestedStencil(s1,s2,s3, center = 2) == ns + @test NestedStencil(s1,s2,s3, center = 1) == NestedStencil(Stencil(s1,s2,s3, center=1)) + + @test NestedStencil((-1,1,0),(-1,0,1),(0,-1,1), center=2) == ns + @test CenteredNestedStencil((-1,1,0),(-1,0,1),(0,-1,1)) == ns + @test NestedStencil((-1,1,0),(-1,0,1),(0,-1,1), center=1) == NestedStencil(Stencil( + Stencil(-1, 1, 0; center=1), + Stencil(-1, 0, 1; center=1), + Stencil( 0,-1, 1; center=1); + center=1 + )) + + @testset "Error handling" begin + end + end + + @testset "scale" begin + ns = NestedStencil((-1,1,0),(-1,0,1),(0,-1,1), center=2) + @test SbpOperators.scale(ns, 2) == NestedStencil((-2,2,0),(-2,0,2),(0,-2,2), center=2) + end + + @testset "conversion" begin + ns = NestedStencil((-1,1,0),(-1,0,1),(0,-1,1), center=2) + @test NestedStencil{Float64}(ns) == NestedStencil((-1.,1.,0.),(-1.,0.,1.),(0.,-1.,1.), center=2) + @test NestedStencil{Rational}(ns) == NestedStencil((-1//1,1//1,0//1),(-1//1,0//1,1//1),(0//1,-1//1,1//1), center=2) + + @test convert(NestedStencil{Float64}, ns) == NestedStencil((-1.,1.,0.),(-1.,0.,1.),(0.,-1.,1.), center=2) + @test convert(NestedStencil{Rational}, ns) == NestedStencil((-1//1,1//1,0//1),(-1//1,0//1,1//1),(0//1,-1//1,1//1), center=2) + end + + @testset "promotion of weights" begin + @test NestedStencil((-1,1,0),(-1.,0.,1.),(0,-1,1), center=2) isa NestedStencil{Float64,3,3} + @test NestedStencil((-1,1,0),(-1,0,1),(0//1,-1,1), center=2) isa NestedStencil{Rational{Int64},3,3} + end + + @testset "promotion" begin + promote( + CenteredNestedStencil((-1,1,0),(-1,0,1),(0,-1,1)), + CenteredNestedStencil((-1.,1.,0.),(-1.,0.,1.),(0.,-1.,1.)) + ) == ( + CenteredNestedStencil((-1.,1.,0.),(-1.,0.,1.),(0.,-1.,1.)), + CenteredNestedStencil((-1.,1.,0.),(-1.,0.,1.),(0.,-1.,1.)) + ) + end + + @testset "apply" begin + c = [ 1, 3, 6, 10, 15, 21, 28, 36, 45, 55] + v = [ 2, 3, 5, 7, 11, 13, 17, 19, 23, 29] + + # Centered + ns = NestedStencil((-1,1,0),(-1,0,1),(0,-2,2), center=2) + @test SbpOperators.apply_inner_stencils(ns, c, 4) == Stencil(4,9,10; center=2) + @test SbpOperators.apply_inner_stencils_backwards(ns, c, 4) == Stencil(-5,-9,-8; center=2) + + @test SbpOperators.apply_stencil(ns, c, v, 4) == 4*5 + 9*7 + 10*11 + @test SbpOperators.apply_stencil_backwards(ns, c, v, 4) == -8*5 - 9*7 - 5*11 + + # Non-centered + ns = NestedStencil((-1,1,0),(-1,0,1),(0,-1,1), center=1) + @test SbpOperators.apply_inner_stencils(ns, c, 4) == Stencil(5,11,6; center=1) + @test SbpOperators.apply_inner_stencils_backwards(ns, c, 4) == Stencil(-4,-7,-3; center=1) + + @test SbpOperators.apply_stencil(ns, c, v, 4) == 5*7 + 11*11 + 6*13 + @test SbpOperators.apply_stencil_backwards(ns, c, v, 4) == -3*3 - 7*5 - 4*7 + end + + @testset "type stability" begin + s_int = CenteredNestedStencil((1,2,3),(1,2,3),(1,2,3)) + s_float = CenteredNestedStencil((1.,2.,3.),(1.,2.,3.),(1.,2.,3.)) + + v_int = rand(1:10,10); + v_float = rand(10); + + c_int = rand(1:10,10); + c_float = rand(10); + + @inferred SbpOperators.apply_stencil(s_int, c_int, v_int, 2) + @inferred SbpOperators.apply_stencil(s_float, c_int, v_float, 2) + @inferred SbpOperators.apply_stencil(s_int, c_int, v_float, 2) + @inferred SbpOperators.apply_stencil(s_float, c_int, v_int, 2) + + @inferred SbpOperators.apply_stencil(s_int, c_float, v_int, 2) + @inferred SbpOperators.apply_stencil(s_float, c_float, v_float, 2) + @inferred SbpOperators.apply_stencil(s_int, c_float, v_float, 2) + @inferred SbpOperators.apply_stencil(s_float, c_float, v_int, 2) + + @inferred SbpOperators.apply_stencil_backwards(s_int, c_int, v_int, 2) + @inferred SbpOperators.apply_stencil_backwards(s_float, c_int, v_float, 2) + @inferred SbpOperators.apply_stencil_backwards(s_int, c_int, v_float, 2) + @inferred SbpOperators.apply_stencil_backwards(s_float, c_int, v_int, 2) + + @inferred SbpOperators.apply_stencil_backwards(s_int, c_float, v_int, 2) + @inferred SbpOperators.apply_stencil_backwards(s_float, c_float, v_float, 2) + @inferred SbpOperators.apply_stencil_backwards(s_int, c_float, v_float, 2) + @inferred SbpOperators.apply_stencil_backwards(s_float, c_float, v_int, 2) + end + +end
--- a/test/SbpOperators/volumeops/constant_interior_scaling_operator_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/volumeops/constant_interior_scaling_operator_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -24,6 +24,9 @@ @test a*v == [.1,.2,.5,.5,.5,.2,.1] @test a'*v == [.1,.2,.5,.5,.5,.2,.1] + @test (a*rand(ComplexF64, domain_size(a)... ))[1] isa ComplexF64 + @test (a'*rand(ComplexF64, domain_size(a')...))[1] isa ComplexF64 + @test range_size(a) == (7,) @test domain_size(a) == (7,)
--- /dev/null Thu Jan 01 00:00:00 1970 +0000 +++ b/test/SbpOperators/volumeops/derivatives/first_derivative_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -0,0 +1,97 @@ +using Test + + +using Sbplib.SbpOperators +using Sbplib.Grids +using Sbplib.LazyTensors + +using Sbplib.SbpOperators: closure_size, Stencil, VolumeOperator + +""" + monomial(x,k) + +Evaluates ``x^k/k!` with the convetion that it is ``0`` for all ``k<0``. +Has the property that ``d/dx monomial(x,k) = monomial(x,k-1)`` +""" +function monomial(x,k) + if k < 0 + return zero(x) + end + x^k/factorial(k) +end + +@testset "first_derivative" begin + @testset "Constructors" begin + stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=2) + + g₁ = EquidistantGrid(11, 0., 1.) + g₂ = EquidistantGrid((11,14), (0.,1.), (1.,3.)) + + @test first_derivative(g₁, stencil_set, 1) isa LazyTensor{Float64,1,1} + @test first_derivative(g₂, stencil_set, 2) isa LazyTensor{Float64,2,2} + @test first_derivative(g₁, stencil_set, 1) == first_derivative(g₁, stencil_set) + + interior_stencil = CenteredStencil(-1,0,1) + closure_stencils = [Stencil(-1,1, center=1)] + + @test first_derivative(g₁, interior_stencil, closure_stencils, 1) isa LazyTensor{Float64,1,1} + @test first_derivative(g₁, interior_stencil, closure_stencils, 1) isa VolumeOperator + @test first_derivative(g₁, interior_stencil, closure_stencils, 1) == first_derivative(g₁, interior_stencil, closure_stencils) + @test first_derivative(g₂, interior_stencil, closure_stencils, 2) isa LazyTensor{Float64,2,2} + end + + @testset "Accuracy conditions" begin + N = 20 + g = EquidistantGrid(N, 0//1,2//1) + @testset for order ∈ [2,4] + stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order) + D₁ = first_derivative(g, stencil_set, 1) + + @testset "boundary x^$k" for k ∈ 0:order÷2 + v = evalOn(g, x->monomial(x,k)) + + @testset for i ∈ 1:closure_size(D₁) + x, = points(g)[i] + @test (D₁*v)[i] == monomial(x,k-1) + end + + @testset for i ∈ (N-closure_size(D₁)+1):N + x, = points(g)[i] + @test (D₁*v)[i] == monomial(x,k-1) + end + end + + @testset "interior x^$k" for k ∈ 0:order + v = evalOn(g, x->monomial(x,k)) + + x, = points(g)[10] + @test (D₁*v)[10] == monomial(x,k-1) + end + end + end + + @testset "Accuracy on function" begin + # 1D + g = EquidistantGrid(30, 0.,1.) + v = evalOn(g, x->exp(x)) + @testset for (order, tol) ∈ [(2, 6e-3),(4, 2e-4)] + stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order) + D₁ = first_derivative(g, stencil_set, 1) + + @test D₁*v ≈ v rtol=tol + end + + # 2D + g = EquidistantGrid((30,60), (0.,0.),(1.,2.)) + v = evalOn(g, (x,y)->exp(0.8x+1.2*y)) + @testset for (order, tol) ∈ [(2, 6e-3),(4, 3e-4)] + stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order) + Dx = first_derivative(g, stencil_set, 1) + Dy = first_derivative(g, stencil_set, 2) + + @test Dx*v ≈ 0.8v rtol=tol + @test Dy*v ≈ 1.2v rtol=tol + end + end +end +
--- a/test/SbpOperators/volumeops/derivatives/second_derivative_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/volumeops/derivatives/second_derivative_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -6,8 +6,11 @@ import Sbplib.SbpOperators.VolumeOperator +# TODO: Refactor these test to look more like the tests in first_derivative_test.jl. + @testset "SecondDerivative" begin - stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=4) + operator_path = sbp_operators_path()*"standard_diagonal.toml" + stencil_set = read_stencil_set(operator_path; order=4) inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) Lx = 3.5 @@ -17,16 +20,19 @@ @testset "Constructors" begin @testset "1D" begin - Dₓₓ = second_derivative(g_1D,inner_stencil,closure_stencils) - @test Dₓₓ == second_derivative(g_1D,inner_stencil,closure_stencils,1) + Dₓₓ = second_derivative(g_1D,inner_stencil,closure_stencils,1) + @test Dₓₓ == second_derivative(g_1D,inner_stencil,closure_stencils) + @test Dₓₓ == second_derivative(g_1D,stencil_set,1) + @test Dₓₓ == second_derivative(g_1D,stencil_set) @test Dₓₓ isa VolumeOperator end @testset "2D" begin Dₓₓ = second_derivative(g_2D,inner_stencil,closure_stencils,1) - D2 = second_derivative(g_1D,inner_stencil,closure_stencils) - I = IdentityMapping{Float64}(size(g_2D)[2]) + D2 = second_derivative(g_1D,inner_stencil,closure_stencils,1) + I = IdentityTensor{Float64}(size(g_2D)[2]) @test Dₓₓ == D2⊗I - @test Dₓₓ isa TensorMapping{T,2,2} where T + @test Dₓₓ == second_derivative(g_2D,stencil_set,1) + @test Dₓₓ isa LazyTensor{T,2,2} where T end end @@ -47,10 +53,8 @@ # 2nd order interior stencil, 1nd order boundary stencil, # implies that L*v should be exact for monomials up to order 2. @testset "2nd order" begin - stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=2) - inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) - closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) - Dₓₓ = second_derivative(g_1D,inner_stencil,closure_stencils) + stencil_set = read_stencil_set(operator_path; order=2) + Dₓₓ = second_derivative(g_1D,stencil_set) @test Dₓₓ*monomials[1] ≈ zeros(Float64,size(g_1D)...) atol = 5e-10 @test Dₓₓ*monomials[2] ≈ zeros(Float64,size(g_1D)...) atol = 5e-10 @test Dₓₓ*monomials[3] ≈ monomials[1] atol = 5e-10 @@ -60,10 +64,8 @@ # 4th order interior stencil, 2nd order boundary stencil, # implies that L*v should be exact for monomials up to order 3. @testset "4th order" begin - stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=4) - inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) - closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) - Dₓₓ = second_derivative(g_1D,inner_stencil,closure_stencils) + stencil_set = read_stencil_set(operator_path; order=4) + Dₓₓ = second_derivative(g_1D,stencil_set) # NOTE: high tolerances for checking the "exact" differentiation # due to accumulation of round-off errors/cancellation errors? @test Dₓₓ*monomials[1] ≈ zeros(Float64,size(g_1D)...) atol = 5e-10 @@ -88,10 +90,8 @@ # 2nd order interior stencil, 1st order boundary stencil, # implies that L*v should be exact for binomials up to order 2. @testset "2nd order" begin - stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=2) - inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) - closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) - Dyy = second_derivative(g_2D,inner_stencil,closure_stencils,2) + stencil_set = read_stencil_set(operator_path; order=2) + Dyy = second_derivative(g_2D,stencil_set,2) @test Dyy*binomials[1] ≈ zeros(Float64,size(g_2D)...) atol = 5e-9 @test Dyy*binomials[2] ≈ zeros(Float64,size(g_2D)...) atol = 5e-9 @test Dyy*binomials[3] ≈ evalOn(g_2D,(x,y)->1.) atol = 5e-9 @@ -101,10 +101,8 @@ # 4th order interior stencil, 2nd order boundary stencil, # implies that L*v should be exact for binomials up to order 3. @testset "4th order" begin - stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=4) - inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) - closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) - Dyy = second_derivative(g_2D,inner_stencil,closure_stencils,2) + stencil_set = read_stencil_set(operator_path; order=4) + Dyy = second_derivative(g_2D,stencil_set,2) # NOTE: high tolerances for checking the "exact" differentiation # due to accumulation of round-off errors/cancellation errors? @test Dyy*binomials[1] ≈ zeros(Float64,size(g_2D)...) atol = 5e-9
--- a/test/SbpOperators/volumeops/inner_products/inner_product_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/volumeops/inner_products/inner_product_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -4,6 +4,7 @@ using Sbplib.Grids using Sbplib.LazyTensors +import Sbplib.SbpOperators.ConstantInteriorScalingOperator @testset "Diagonal-stencil inner_product" begin Lx = π/2. @@ -19,20 +20,23 @@ quadrature_closure = parse_tuple(stencil_set["H"]["closure"]) @testset "0D" begin H = inner_product(EquidistantGrid{Float64}(), quadrature_interior, quadrature_closure) - @test H == IdentityMapping{Float64}() - @test H isa TensorMapping{T,0,0} where T + @test H == inner_product(EquidistantGrid{Float64}(), stencil_set) + @test H == IdentityTensor{Float64}() + @test H isa LazyTensor{T,0,0} where T end @testset "1D" begin H = inner_product(g_1D, quadrature_interior, quadrature_closure) - @test H == inner_product(g_1D, quadrature_interior, quadrature_closure) - @test H isa TensorMapping{T,1,1} where T + @test H == inner_product(g_1D, stencil_set) + @test H isa ConstantInteriorScalingOperator + @test H isa LazyTensor{T,1,1} where T end @testset "2D" begin H = inner_product(g_2D, quadrature_interior, quadrature_closure) H_x = inner_product(restrict(g_2D,1), quadrature_interior, quadrature_closure) H_y = inner_product(restrict(g_2D,2), quadrature_interior, quadrature_closure) + @test H == inner_product(g_2D, stencil_set) @test H == H_x⊗H_y - @test H isa TensorMapping{T,2,2} where T + @test H isa LazyTensor{T,2,2} where T end end
--- a/test/SbpOperators/volumeops/inner_products/inverse_inner_product_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/volumeops/inner_products/inverse_inner_product_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -4,7 +4,7 @@ using Sbplib.Grids using Sbplib.LazyTensors -import Sbplib.SbpOperators.Stencil +import Sbplib.SbpOperators.ConstantInteriorScalingOperator @testset "Diagonal-stencil inverse_inner_product" begin Lx = π/2. @@ -17,19 +17,23 @@ quadrature_closure = parse_tuple(stencil_set["H"]["closure"]) @testset "0D" begin Hi = inverse_inner_product(EquidistantGrid{Float64}(), quadrature_interior, quadrature_closure) - @test Hi == IdentityMapping{Float64}() - @test Hi isa TensorMapping{T,0,0} where T + @test Hi == inverse_inner_product(EquidistantGrid{Float64}(), stencil_set) + @test Hi == IdentityTensor{Float64}() + @test Hi isa LazyTensor{T,0,0} where T end @testset "1D" begin Hi = inverse_inner_product(g_1D, quadrature_interior, quadrature_closure) - @test Hi isa TensorMapping{T,1,1} where T + @test Hi == inverse_inner_product(g_1D, stencil_set) + @test Hi isa ConstantInteriorScalingOperator + @test Hi isa LazyTensor{T,1,1} where T end @testset "2D" begin Hi = inverse_inner_product(g_2D, quadrature_interior, quadrature_closure) Hi_x = inverse_inner_product(restrict(g_2D,1), quadrature_interior, quadrature_closure) Hi_y = inverse_inner_product(restrict(g_2D,2), quadrature_interior, quadrature_closure) + @test Hi == inverse_inner_product(g_2D, stencil_set) @test Hi == Hi_x⊗Hi_y - @test Hi isa TensorMapping{T,2,2} where T + @test Hi isa LazyTensor{T,2,2} where T end end
--- a/test/SbpOperators/volumeops/laplace/laplace_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/volumeops/laplace/laplace_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -4,25 +4,25 @@ using Sbplib.Grids using Sbplib.LazyTensors +# Default stencils (4th order) +operator_path = sbp_operators_path()*"standard_diagonal.toml" +stencil_set = read_stencil_set(operator_path; order=4) +inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) +closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) +g_1D = EquidistantGrid(101, 0.0, 1.) +g_3D = EquidistantGrid((51,101,52), (0.0, -1.0, 0.0), (1., 1., 1.)) + @testset "Laplace" begin - g_1D = EquidistantGrid(101, 0.0, 1.) - g_3D = EquidistantGrid((51,101,52), (0.0, -1.0, 0.0), (1., 1., 1.)) @testset "Constructors" begin - stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=4) - inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) - closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) @testset "1D" begin - L = laplace(g_1D, inner_stencil, closure_stencils) - @test L == second_derivative(g_1D, inner_stencil, closure_stencils) - @test L isa TensorMapping{T,1,1} where T + Δ = laplace(g_1D, inner_stencil, closure_stencils) + @test Laplace(g_1D, stencil_set) == Laplace(Δ, stencil_set) + @test Laplace(g_1D, stencil_set) isa LazyTensor{T,1,1} where T end @testset "3D" begin - L = laplace(g_3D, inner_stencil, closure_stencils) - @test L isa TensorMapping{T,3,3} where T - Dxx = second_derivative(g_3D, inner_stencil, closure_stencils, 1) - Dyy = second_derivative(g_3D, inner_stencil, closure_stencils, 2) - Dzz = second_derivative(g_3D, inner_stencil, closure_stencils, 3) - @test L == Dxx + Dyy + Dzz + Δ = laplace(g_3D, inner_stencil, closure_stencils) + @test Laplace(g_3D, stencil_set) == Laplace(Δ,stencil_set) + @test Laplace(g_3D, stencil_set) isa LazyTensor{T,3,3} where T end end @@ -42,30 +42,44 @@ # 2nd order interior stencil, 1st order boundary stencil, # implies that L*v should be exact for binomials up to order 2. @testset "2nd order" begin - stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=2) - inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) - closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) - L = laplace(g_3D, inner_stencil, closure_stencils) - @test L*polynomials[1] ≈ zeros(Float64, size(g_3D)...) atol = 5e-9 - @test L*polynomials[2] ≈ zeros(Float64, size(g_3D)...) atol = 5e-9 - @test L*polynomials[3] ≈ polynomials[1] atol = 5e-9 - @test L*v ≈ Δv rtol = 5e-2 norm = l2 + stencil_set = read_stencil_set(operator_path; order=2) + Δ = Laplace(g_3D, stencil_set) + @test Δ*polynomials[1] ≈ zeros(Float64, size(g_3D)...) atol = 5e-9 + @test Δ*polynomials[2] ≈ zeros(Float64, size(g_3D)...) atol = 5e-9 + @test Δ*polynomials[3] ≈ polynomials[1] atol = 5e-9 + @test Δ*v ≈ Δv rtol = 5e-2 norm = l2 end # 4th order interior stencil, 2nd order boundary stencil, # implies that L*v should be exact for binomials up to order 3. @testset "4th order" begin - stencil_set = read_stencil_set(sbp_operators_path()*"standard_diagonal.toml"; order=4) - inner_stencil = parse_stencil(stencil_set["D2"]["inner_stencil"]) - closure_stencils = parse_stencil.(stencil_set["D2"]["closure_stencils"]) - L = laplace(g_3D, inner_stencil, closure_stencils) + stencil_set = read_stencil_set(operator_path; order=4) + Δ = Laplace(g_3D, stencil_set) # NOTE: high tolerances for checking the "exact" differentiation # due to accumulation of round-off errors/cancellation errors? - @test L*polynomials[1] ≈ zeros(Float64, size(g_3D)...) atol = 5e-9 - @test L*polynomials[2] ≈ zeros(Float64, size(g_3D)...) atol = 5e-9 - @test L*polynomials[3] ≈ polynomials[1] atol = 5e-9 - @test L*polynomials[4] ≈ polynomials[2] atol = 5e-9 - @test L*v ≈ Δv rtol = 5e-4 norm = l2 + @test Δ*polynomials[1] ≈ zeros(Float64, size(g_3D)...) atol = 5e-9 + @test Δ*polynomials[2] ≈ zeros(Float64, size(g_3D)...) atol = 5e-9 + @test Δ*polynomials[3] ≈ polynomials[1] atol = 5e-9 + @test Δ*polynomials[4] ≈ polynomials[2] atol = 5e-9 + @test Δ*v ≈ Δv rtol = 5e-4 norm = l2 end end end + +@testset "laplace" begin + @testset "1D" begin + Δ = laplace(g_1D, inner_stencil, closure_stencils) + @test Δ == second_derivative(g_1D, inner_stencil, closure_stencils, 1) + @test Δ isa LazyTensor{T,1,1} where T + end + @testset "3D" begin + Δ = laplace(g_3D, inner_stencil, closure_stencils) + @test Δ isa LazyTensor{T,3,3} where T + Dxx = second_derivative(g_3D, inner_stencil, closure_stencils, 1) + Dyy = second_derivative(g_3D, inner_stencil, closure_stencils, 2) + Dzz = second_derivative(g_3D, inner_stencil, closure_stencils, 3) + @test Δ == Dxx + Dyy + Dzz + @test Δ isa LazyTensor{T,3,3} where T + end +end +
--- a/test/SbpOperators/volumeops/volume_operator_test.jl Mon Feb 21 10:33:58 2022 +0100 +++ b/test/SbpOperators/volumeops/volume_operator_test.jl Fri Feb 03 22:14:47 2023 +0100 @@ -7,118 +7,76 @@ import Sbplib.SbpOperators.Stencil import Sbplib.SbpOperators.VolumeOperator -import Sbplib.SbpOperators.volume_operator import Sbplib.SbpOperators.odd import Sbplib.SbpOperators.even + @testset "VolumeOperator" begin inner_stencil = CenteredStencil(1/4, 2/4, 1/4) - closure_stencils = (Stencil(1/2, 1/2; center=1), Stencil(0.,1.; center=2)) - g_1D = EquidistantGrid(11,0.,1.) - g_2D = EquidistantGrid((11,12),(0.,0.),(1.,1.)) - g_3D = EquidistantGrid((11,12,10),(0.,0.,0.),(1.,1.,1.)) + closure_stencils = (Stencil(1/2, 1/2; center=1), Stencil(2.,1.; center=2)) + g = EquidistantGrid(11,0.,1.) + @testset "Constructors" begin - @testset "1D" begin - op = VolumeOperator(inner_stencil,closure_stencils,(11,),even) - @test op == VolumeOperator(g_1D,inner_stencil,closure_stencils,even) - @test op == volume_operator(g_1D,inner_stencil,closure_stencils,even,1) - @test op isa TensorMapping{T,1,1} where T - end - @testset "2D" begin - op_x = volume_operator(g_2D,inner_stencil,closure_stencils,even,1) - op_y = volume_operator(g_2D,inner_stencil,closure_stencils,even,2) - Ix = IdentityMapping{Float64}((11,)) - Iy = IdentityMapping{Float64}((12,)) - @test op_x == VolumeOperator(inner_stencil,closure_stencils,(11,),even)⊗Iy - @test op_y == Ix⊗VolumeOperator(inner_stencil,closure_stencils,(12,),even) - @test op_x isa TensorMapping{T,2,2} where T - @test op_y isa TensorMapping{T,2,2} where T - end - @testset "3D" begin - op_x = volume_operator(g_3D,inner_stencil,closure_stencils,even,1) - op_y = volume_operator(g_3D,inner_stencil,closure_stencils,even,2) - op_z = volume_operator(g_3D,inner_stencil,closure_stencils,even,3) - Ix = IdentityMapping{Float64}((11,)) - Iy = IdentityMapping{Float64}((12,)) - Iz = IdentityMapping{Float64}((10,)) - @test op_x == VolumeOperator(inner_stencil,closure_stencils,(11,),even)⊗Iy⊗Iz - @test op_y == Ix⊗VolumeOperator(inner_stencil,closure_stencils,(12,),even)⊗Iz - @test op_z == Ix⊗Iy⊗VolumeOperator(inner_stencil,closure_stencils,(10,),even) - @test op_x isa TensorMapping{T,3,3} where T - @test op_y isa TensorMapping{T,3,3} where T - @test op_z isa TensorMapping{T,3,3} where T - end + op = VolumeOperator(inner_stencil,closure_stencils,(11,),even) + @test op == VolumeOperator(g,inner_stencil,closure_stencils,even) + @test op isa LazyTensor{T,1,1} where T end @testset "Sizes" begin - @testset "1D" begin - op = volume_operator(g_1D,inner_stencil,closure_stencils,even,1) - @test range_size(op) == domain_size(op) == size(g_1D) - end - - @testset "2D" begin - op_x = volume_operator(g_2D,inner_stencil,closure_stencils,even,1) - op_y = volume_operator(g_2D,inner_stencil,closure_stencils,even,2) - @test range_size(op_y) == domain_size(op_y) == - range_size(op_x) == domain_size(op_x) == size(g_2D) - end - @testset "3D" begin - op_x = volume_operator(g_3D,inner_stencil,closure_stencils,even,1) - op_y = volume_operator(g_3D,inner_stencil,closure_stencils,even,2) - op_z = volume_operator(g_3D,inner_stencil,closure_stencils,even,3) - @test range_size(op_z) == domain_size(op_z) == - range_size(op_y) == domain_size(op_y) == - range_size(op_x) == domain_size(op_x) == size(g_3D) - end + op = VolumeOperator(g,inner_stencil,closure_stencils,even) + @test range_size(op) == domain_size(op) == size(g) end - op_x = volume_operator(g_2D,inner_stencil,closure_stencils,even,1) - op_y = volume_operator(g_2D,inner_stencil,closure_stencils,odd,2) - v = zeros(size(g_2D)) - Nx = size(g_2D)[1] - Ny = size(g_2D)[2] - for i = 1:Nx - v[i,:] .= i + + op_even = VolumeOperator(g, inner_stencil, closure_stencils, even) + op_odd = VolumeOperator(g, inner_stencil, closure_stencils, odd) + + N = size(g)[1] + v = rand(N) + + r_even = copy(v) + r_odd = copy(v) + + r_even[1] = (v[1] + v[2])/2 + r_odd[1] = (v[1] + v[2])/2 + + r_even[2] = 2v[1] + v[2] + r_odd[2] = 2v[1] + v[2] + + for i ∈ 3:N-2 + r_even[i] = (v[i-1] + 2v[i] + v[i+1])/4 + r_odd[i] = (v[i-1] + 2v[i] + v[i+1])/4 end - rx = copy(v) - rx[1,:] .= 1.5 - rx[Nx,:] .= (2*Nx-1)/2 - ry = copy(v) - ry[:,Ny-1:Ny] = -v[:,Ny-1:Ny] + + r_even[N-1] = v[N-1] + 2v[N] + r_odd[N-1] = -v[N-1] - 2v[N] + + r_even[N] = (v[N-1] + v[N])/2 + r_odd[N] = -(v[N-1] + v[N])/2 + @testset "Application" begin - @test op_x*v ≈ rx rtol = 1e-14 - @test op_y*v ≈ ry rtol = 1e-14 + @test op_even*v ≈ r_even + @test op_odd*v ≈ r_odd + + @test (op_even*rand(ComplexF64,size(g)))[2] isa ComplexF64 end @testset "Regions" begin - @test (op_x*v)[Index(1,Lower),Index(3,Interior)] ≈ rx[1,3] rtol = 1e-14 - @test (op_x*v)[Index(2,Lower),Index(3,Interior)] ≈ rx[2,3] rtol = 1e-14 - @test (op_x*v)[Index(6,Interior),Index(3,Interior)] ≈ rx[6,3] rtol = 1e-14 - @test (op_x*v)[Index(10,Upper),Index(3,Interior)] ≈ rx[10,3] rtol = 1e-14 - @test (op_x*v)[Index(11,Upper),Index(3,Interior)] ≈ rx[11,3] rtol = 1e-14 - - @test_throws BoundsError (op_x*v)[Index(3,Lower),Index(3,Interior)] - @test_throws BoundsError (op_x*v)[Index(9,Upper),Index(3,Interior)] + @test (op_even*v)[Index(1,Lower)] ≈ r_even[1] + @test (op_even*v)[Index(2,Lower)] ≈ r_even[2] + @test (op_even*v)[Index(6,Interior)] ≈ r_even[6] + @test (op_even*v)[Index(10,Upper)] ≈ r_even[10] + @test (op_even*v)[Index(11,Upper)] ≈ r_even[11] - @test (op_y*v)[Index(3,Interior),Index(1,Lower)] ≈ ry[3,1] rtol = 1e-14 - @test (op_y*v)[Index(3,Interior),Index(2,Lower)] ≈ ry[3,2] rtol = 1e-14 - @test (op_y*v)[Index(3,Interior),Index(6,Interior)] ≈ ry[3,6] rtol = 1e-14 - @test (op_y*v)[Index(3,Interior),Index(11,Upper)] ≈ ry[3,11] rtol = 1e-14 - @test (op_y*v)[Index(3,Interior),Index(12,Upper)] ≈ ry[3,12] rtol = 1e-14 - - @test_throws BoundsError (op_y*v)[Index(3,Interior),Index(10,Upper)] - @test_throws BoundsError (op_y*v)[Index(3,Interior),Index(3,Lower)] + @test_throws BoundsError (op_even*v)[Index(3,Lower)] + @test_throws BoundsError (op_even*v)[Index(9,Upper)] end @testset "Inferred" begin - @test_skip @inferred apply(op_x, v,1,1) - @inferred apply(op_x, v, Index(1,Lower),Index(1,Lower)) - @inferred apply(op_x, v, Index(6,Interior),Index(1,Lower)) - @inferred apply(op_x, v, Index(11,Upper),Index(1,Lower)) - @test_skip @inferred apply(op_y, v,1,1) - @inferred apply(op_y, v, Index(1,Lower),Index(1,Lower)) - @inferred apply(op_y, v, Index(1,Lower),Index(6,Interior)) - @inferred apply(op_y, v, Index(1,Lower),Index(11,Upper)) + @inferred apply(op_even, v, 1) + @inferred apply(op_even, v, Index(1,Lower)) + @inferred apply(op_even, v, Index(6,Interior)) + @inferred apply(op_even, v, Index(11,Upper)) end end