From 8b55ce47fc83a41175e9e37c1230992f1e0ebfa5 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 5 Oct 2022 16:46:28 +1100 Subject: [PATCH 01/95] Added some packages, Manifest.toml is required. - GridapDistributed is tracking #p4est-migration - GridapP4est is tracking #generate_mesh_hierarchy --- Manifest.toml | 653 ++++++++++++++++++++++++++++++++++++++++++++++++++ Project.toml | 7 + 2 files changed, 660 insertions(+) create mode 100644 Manifest.toml diff --git a/Manifest.toml b/Manifest.toml new file mode 100644 index 00000000..e706583f --- /dev/null +++ b/Manifest.toml @@ -0,0 +1,653 @@ +# This file is machine-generated - editing it directly is not advised + +julia_version = "1.8.1" +manifest_format = "2.0" +project_hash = "4c2033753bdd94d2611b146aec7777ca1783803f" + +[[deps.AbstractFFTs]] +deps = ["ChainRulesCore", "LinearAlgebra"] +git-tree-sha1 = "69f7020bd72f069c219b5e8c236c1fa90d2cb409" +uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" +version = "1.2.1" + +[[deps.AbstractTrees]] +git-tree-sha1 = "5c0b629df8a5566a06f5fef5100b53ea56e465a0" +uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" +version = "0.4.2" + +[[deps.ArgCheck]] +git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" +uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197" +version = "2.3.0" + +[[deps.ArgParse]] +deps = ["Logging", "TextWrap"] +git-tree-sha1 = "3102bce13da501c9104df33549f511cd25264d7d" +uuid = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" +version = "1.1.4" + +[[deps.ArgTools]] +uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" +version = "1.1.1" + +[[deps.ArrayInterfaceCore]] +deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "5bb0f8292405a516880a3809954cb832ae7a31c5" +uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" +version = "0.1.20" + +[[deps.ArrayLayouts]] +deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] +git-tree-sha1 = "56c347caf09ad8acb3e261fe75f8e09652b7b05b" +uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" +version = "0.7.10" + +[[deps.Artifacts]] +uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" + +[[deps.BSON]] +git-tree-sha1 = "306bb5574b0c1c56d7e1207581516c557d105cad" +uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" +version = "0.3.5" + +[[deps.Base64]] +uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" + +[[deps.BlockArrays]] +deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra"] +git-tree-sha1 = "21490270d1fcf2efa9ddb2126d6958e9b72a4db0" +uuid = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" +version = "0.16.11" + +[[deps.CEnum]] +git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90" +uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" +version = "0.4.2" + +[[deps.ChainRulesCore]] +deps = ["Compat", "LinearAlgebra", "SparseArrays"] +git-tree-sha1 = "e7ff6cadf743c098e08fca25c91103ee4303c9bb" +uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" +version = "1.15.6" + +[[deps.ChangesOfVariables]] +deps = ["ChainRulesCore", "LinearAlgebra", "Test"] +git-tree-sha1 = "38f7a08f19d8810338d4f5085211c7dfa5d5bdd8" +uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" +version = "0.1.4" + +[[deps.CodecZlib]] +deps = ["TranscodingStreams", "Zlib_jll"] +git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da" +uuid = "944b1d66-785c-5afd-91f1-9de20f533193" +version = "0.7.0" + +[[deps.Combinatorics]] +git-tree-sha1 = "08c8b6831dc00bfea825826be0bc8336fc369860" +uuid = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" +version = "1.0.2" + +[[deps.CommonSubexpressions]] +deps = ["MacroTools", "Test"] +git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7" +uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" +version = "0.3.0" + +[[deps.Compat]] +deps = ["Dates", "LinearAlgebra", "UUIDs"] +git-tree-sha1 = "5856d3031cdb1f3b2b6340dfdc66b6d9a149a374" +uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" +version = "4.2.0" + +[[deps.CompilerSupportLibraries_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" +version = "0.5.2+0" + +[[deps.ConstructionBase]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "fb21ddd70a051d882a1686a5a550990bbe371a95" +uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" +version = "1.4.1" + +[[deps.DataStructures]] +deps = ["Compat", "InteractiveUtils", "OrderedCollections"] +git-tree-sha1 = "d1fff3a548102f48987a52a2e0d114fa97d730f0" +uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" +version = "0.18.13" + +[[deps.Dates]] +deps = ["Printf"] +uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" + +[[deps.DiffResults]] +deps = ["StaticArraysCore"] +git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621" +uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" +version = "1.1.0" + +[[deps.DiffRules]] +deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] +git-tree-sha1 = "992a23afdb109d0d2f8802a30cf5ae4b1fe7ea68" +uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" +version = "1.11.1" + +[[deps.Distances]] +deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"] +git-tree-sha1 = "3258d0659f812acde79e8a74b11f17ac06d0ca04" +uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" +version = "0.10.7" + +[[deps.Distributed]] +deps = ["Random", "Serialization", "Sockets"] +uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" + +[[deps.DocStringExtensions]] +deps = ["LibGit2"] +git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b" +uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" +version = "0.8.6" + +[[deps.Downloads]] +deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] +uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" +version = "1.6.0" + +[[deps.FFTW]] +deps = ["AbstractFFTs", "FFTW_jll", "LinearAlgebra", "MKL_jll", "Preferences", "Reexport"] +git-tree-sha1 = "90630efff0894f8142308e334473eba54c433549" +uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" +version = "1.5.0" + +[[deps.FFTW_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "c6033cc3892d0ef5bb9cd29b7f2f0331ea5184ea" +uuid = "f5851436-0d7a-5f13-b9de-f02708fd171a" +version = "3.3.10+0" + +[[deps.FastGaussQuadrature]] +deps = ["LinearAlgebra", "SpecialFunctions", "StaticArrays"] +git-tree-sha1 = "58d83dd5a78a36205bdfddb82b1bb67682e64487" +uuid = "442a2c76-b920-505d-bb47-c5924d526838" +version = "0.4.9" + +[[deps.FileIO]] +deps = ["Pkg", "Requires", "UUIDs"] +git-tree-sha1 = "94f5101b96d2d968ace56f7f2db19d0a5f592e28" +uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" +version = "1.15.0" + +[[deps.FileWatching]] +uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" + +[[deps.FillArrays]] +deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"] +git-tree-sha1 = "deed294cde3de20ae0b2e0355a6c4e1c6a5ceffc" +uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" +version = "0.12.8" + +[[deps.FiniteDiff]] +deps = ["ArrayInterfaceCore", "LinearAlgebra", "Requires", "Setfield", "SparseArrays", "StaticArrays"] +git-tree-sha1 = "5a2cff9b6b77b33b89f3d97a4d367747adce647e" +uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" +version = "2.15.0" + +[[deps.ForwardDiff]] +deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] +git-tree-sha1 = "187198a4ed8ccd7b5d99c41b69c679269ea2b2d4" +uuid = "f6369f11-7733-5829-9624-2563aa707210" +version = "0.10.32" + +[[deps.Future]] +deps = ["Random"] +uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" + +[[deps.Gridap]] +deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] +git-tree-sha1 = "e66749aba5b8d2e41155c2b12dea9bc7c2a71440" +uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" +version = "0.17.14" + +[[deps.GridapDistributed]] +deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] +git-tree-sha1 = "0d9de731ebf89b0d2e15a9c53b1aad1ed6e0a017" +repo-rev = "p4est-migration" +repo-url = "https://github.com/gridap/GridapDistributed.jl.git" +uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" +version = "0.2.6" + +[[deps.GridapP4est]] +deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] +git-tree-sha1 = "7448e5e64569db6138ddee945e223746195b4504" +repo-rev = "generate_mesh_hierarchy" +repo-url = "https://github.com/gridap/GridapP4est.jl.git" +uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" +version = "0.1.3" + +[[deps.IntelOpenMP_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "d979e54b71da82f3a65b62553da4fc3d18c9004c" +uuid = "1d5cc7b8-4909-519e-a0f8-d0f5ad9712d0" +version = "2018.0.3+2" + +[[deps.InteractiveUtils]] +deps = ["Markdown"] +uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" + +[[deps.InverseFunctions]] +deps = ["Test"] +git-tree-sha1 = "49510dfcb407e572524ba94aeae2fced1f3feb0f" +uuid = "3587e190-3f89-42d0-90ee-14403ec27112" +version = "0.1.8" + +[[deps.IrrationalConstants]] +git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151" +uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" +version = "0.1.1" + +[[deps.IterativeSolvers]] +deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"] +git-tree-sha1 = "1169632f425f79429f245113b775a0e3d121457c" +uuid = "42fd0dbc-a981-5370-80f2-aaf504508153" +version = "0.9.2" + +[[deps.JLD2]] +deps = ["FileIO", "MacroTools", "Mmap", "OrderedCollections", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"] +git-tree-sha1 = "0d0ad913e827d13c5e88a73f9333d7e33c424576" +uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" +version = "0.4.24" + +[[deps.JLLWrappers]] +deps = ["Preferences"] +git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1" +uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" +version = "1.4.1" + +[[deps.JSON]] +deps = ["Dates", "Mmap", "Parsers", "Unicode"] +git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e" +uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" +version = "0.21.3" + +[[deps.LazyArtifacts]] +deps = ["Artifacts", "Pkg"] +uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" + +[[deps.LibCURL]] +deps = ["LibCURL_jll", "MozillaCACerts_jll"] +uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" +version = "0.6.3" + +[[deps.LibCURL_jll]] +deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] +uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" +version = "7.84.0+0" + +[[deps.LibGit2]] +deps = ["Base64", "NetworkOptions", "Printf", "SHA"] +uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" + +[[deps.LibSSH2_jll]] +deps = ["Artifacts", "Libdl", "MbedTLS_jll"] +uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" +version = "1.10.2+0" + +[[deps.Libdl]] +uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" + +[[deps.Libiconv_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "42b62845d70a619f063a7da093d995ec8e15e778" +uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" +version = "1.16.1+1" + +[[deps.LightXML]] +deps = ["Libdl", "XML2_jll"] +git-tree-sha1 = "e129d9391168c677cd4800f5c0abb1ed8cb3794f" +uuid = "9c8b4983-aa76-5018-a973-4c85ecc9e179" +version = "0.9.0" + +[[deps.LineSearches]] +deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf"] +git-tree-sha1 = "7bbea35cec17305fc70a0e5b4641477dc0789d9d" +uuid = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" +version = "7.2.0" + +[[deps.LinearAlgebra]] +deps = ["Libdl", "libblastrampoline_jll"] +uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" + +[[deps.LogExpFunctions]] +deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] +git-tree-sha1 = "94d9c52ca447e23eac0c0f074effbcd38830deb5" +uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" +version = "0.3.18" + +[[deps.Logging]] +uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" + +[[deps.MKL_jll]] +deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] +git-tree-sha1 = "41d162ae9c868218b1f3fe78cba878aa348c2d26" +uuid = "856f044c-d86e-5d09-b602-aeab76dc8ba7" +version = "2022.1.0+0" + +[[deps.MPI]] +deps = ["Distributed", "DocStringExtensions", "Libdl", "MPICH_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Pkg", "Random", "Requires", "Serialization", "Sockets"] +git-tree-sha1 = "d56a80d8cf8b9dc3050116346b3d83432b1912c0" +uuid = "da04e1cc-30fd-572f-bb4f-1f8673147195" +version = "0.19.2" + +[[deps.MPICH_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] +git-tree-sha1 = "6d4fa43afab4611d090b11617ecea1a144b21d35" +uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4" +version = "4.0.2+5" + +[[deps.MPIPreferences]] +deps = ["Libdl", "Preferences"] +git-tree-sha1 = "9959c42b41220206eeda9004f695d913e2245658" +uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" +version = "0.1.5" + +[[deps.MPItrampoline_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] +git-tree-sha1 = "b3f9e42685b4ad614eca0b44bd863cd41b1c86ea" +uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748" +version = "5.0.2+1" + +[[deps.MacroTools]] +deps = ["Markdown", "Random"] +git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf" +uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" +version = "0.5.9" + +[[deps.Markdown]] +deps = ["Base64"] +uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" + +[[deps.MbedTLS_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" +version = "2.28.0+0" + +[[deps.MicrosoftMPI_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "a16aa086d335ed7e0170c5265247db29172af2f9" +uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf" +version = "10.1.3+2" + +[[deps.Mmap]] +uuid = "a63ad114-7e13-5084-954f-fe012c677804" + +[[deps.MozillaCACerts_jll]] +uuid = "14a3606d-f60d-562e-9121-12d972cd8159" +version = "2022.2.1" + +[[deps.NLSolversBase]] +deps = ["DiffResults", "Distributed", "FiniteDiff", "ForwardDiff"] +git-tree-sha1 = "50310f934e55e5ca3912fb941dec199b49ca9b68" +uuid = "d41bc354-129a-5804-8e4c-c37616107c6c" +version = "7.8.2" + +[[deps.NLsolve]] +deps = ["Distances", "LineSearches", "LinearAlgebra", "NLSolversBase", "Printf", "Reexport"] +git-tree-sha1 = "019f12e9a1a7880459d0173c182e6a99365d7ac1" +uuid = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" +version = "4.5.1" + +[[deps.NaNMath]] +deps = ["OpenLibm_jll"] +git-tree-sha1 = "a7c3d1da1189a1c2fe843a3bfa04d18d20eb3211" +uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" +version = "1.0.1" + +[[deps.NearestNeighbors]] +deps = ["Distances", "StaticArrays"] +git-tree-sha1 = "440165bf08bc500b8fe4a7be2dc83271a00c0716" +uuid = "b8a86587-4115-5ab1-83bc-aa920d37bbce" +version = "0.4.12" + +[[deps.NetworkOptions]] +uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" +version = "1.2.0" + +[[deps.OpenBLAS_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] +uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" +version = "0.3.20+0" + +[[deps.OpenLibm_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "05823500-19ac-5b8b-9628-191a04bc5112" +version = "0.8.1+0" + +[[deps.OpenMPI_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] +git-tree-sha1 = "346d6b357a480300ed7854dbc70e746ac52e10fd" +uuid = "fe0851c0-eecd-5654-98d4-656369965a5c" +version = "4.1.3+3" + +[[deps.OpenSpecFun_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1" +uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" +version = "0.5.5+0" + +[[deps.OrderedCollections]] +git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c" +uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" +version = "1.4.1" + +[[deps.P4est_jll]] +deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Pkg", "TOML", "Zlib_jll"] +git-tree-sha1 = "d4b48fd3ca75a398916c58c1e4628bf0ce11a7b6" +uuid = "6b5a15aa-cf52-5330-8376-5e5d90283449" +version = "2.8.1+0" + +[[deps.P4est_wrapper]] +deps = ["CEnum", "Libdl", "MPI", "P4est_jll"] +git-tree-sha1 = "e3231d995bdaa7b00e34934c036551b66f25b4c3" +uuid = "3743d7c0-8adf-11ea-380b-7d33b0ecc1da" +version = "0.1.2" + +[[deps.Parameters]] +deps = ["OrderedCollections", "UnPack"] +git-tree-sha1 = "34c0e9ad262e5f7fc75b10a9952ca7692cfc5fbe" +uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a" +version = "0.12.3" + +[[deps.Parsers]] +deps = ["Dates"] +git-tree-sha1 = "3d5bf43e3e8b412656404ed9466f1dcbf7c50269" +uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" +version = "2.4.0" + +[[deps.PartitionedArrays]] +deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] +git-tree-sha1 = "88ff2293fd57089a4036a3056ba058ae9806111b" +uuid = "5a9dfac6-5c52-46f7-8278-5e2210713be9" +version = "0.2.10" + +[[deps.Pkg]] +deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] +uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" +version = "1.8.0" + +[[deps.PolynomialBases]] +deps = ["ArgCheck", "FFTW", "FastGaussQuadrature", "LinearAlgebra", "Requires", "SpecialFunctions", "UnPack"] +git-tree-sha1 = "cb0ab14725f574a45b873b03934c2e57b934f7c0" +uuid = "c74db56a-226d-5e98-8bb0-a6049094aeea" +version = "0.4.13" + +[[deps.Preferences]] +deps = ["TOML"] +git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" +uuid = "21216c6a-2e73-6563-6e65-726566657250" +version = "1.3.0" + +[[deps.Printf]] +deps = ["Unicode"] +uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" + +[[deps.QuadGK]] +deps = ["DataStructures", "LinearAlgebra"] +git-tree-sha1 = "3c009334f45dfd546a16a57960a821a1a023d241" +uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" +version = "2.5.0" + +[[deps.REPL]] +deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] +uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" + +[[deps.Random]] +deps = ["SHA", "Serialization"] +uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" + +[[deps.RecipesBase]] +deps = ["SnoopPrecompile"] +git-tree-sha1 = "612a4d76ad98e9722c8ba387614539155a59e30c" +uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" +version = "1.3.0" + +[[deps.Reexport]] +git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" +uuid = "189a3867-3050-52da-a836-e630ba90ab69" +version = "1.2.2" + +[[deps.Requires]] +deps = ["UUIDs"] +git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7" +uuid = "ae029012-a4dd-5104-9daa-d747884805df" +version = "1.3.0" + +[[deps.SHA]] +uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" +version = "0.7.0" + +[[deps.Serialization]] +uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" + +[[deps.Setfield]] +deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"] +git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac" +uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46" +version = "1.1.1" + +[[deps.SnoopPrecompile]] +git-tree-sha1 = "f604441450a3c0569830946e5b33b78c928e1a85" +uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c" +version = "1.0.1" + +[[deps.Sockets]] +uuid = "6462fe0b-24de-5631-8697-dd941f90decc" + +[[deps.SparseArrays]] +deps = ["LinearAlgebra", "Random"] +uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" + +[[deps.SparseMatricesCSR]] +deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "4870b3e7db7063927b163fb981bd579410b68b2d" +uuid = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1" +version = "0.6.6" + +[[deps.SpecialFunctions]] +deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] +git-tree-sha1 = "d75bda01f8c31ebb72df80a46c88b25d1c79c56d" +uuid = "276daf66-3868-5448-9aa4-cd146d93841b" +version = "2.1.7" + +[[deps.StaticArrays]] +deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] +git-tree-sha1 = "f86b3a049e5d05227b10e15dbb315c5b90f14988" +uuid = "90137ffa-7385-5640-81b9-e52037218182" +version = "1.5.9" + +[[deps.StaticArraysCore]] +git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a" +uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" +version = "1.4.0" + +[[deps.Statistics]] +deps = ["LinearAlgebra", "SparseArrays"] +uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" + +[[deps.StatsAPI]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "f9af7f195fb13589dd2e2d57fdb401717d2eb1f6" +uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0" +version = "1.5.0" + +[[deps.SuiteSparse]] +deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] +uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" + +[[deps.TOML]] +deps = ["Dates"] +uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" +version = "1.0.0" + +[[deps.Tar]] +deps = ["ArgTools", "SHA"] +uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" +version = "1.10.0" + +[[deps.Test]] +deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] +uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" + +[[deps.TextWrap]] +git-tree-sha1 = "9250ef9b01b66667380cf3275b3f7488d0e25faf" +uuid = "b718987f-49a8-5099-9789-dcd902bef87d" +version = "1.0.1" + +[[deps.TranscodingStreams]] +deps = ["Random", "Test"] +git-tree-sha1 = "8a75929dcd3c38611db2f8d08546decb514fcadf" +uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" +version = "0.9.9" + +[[deps.UUIDs]] +deps = ["Random", "SHA"] +uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" + +[[deps.UnPack]] +git-tree-sha1 = "387c1f73762231e86e0c9c5443ce3b4a0a9a0c2b" +uuid = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" +version = "1.0.2" + +[[deps.Unicode]] +uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" + +[[deps.WriteVTK]] +deps = ["Base64", "CodecZlib", "FillArrays", "LightXML", "TranscodingStreams"] +git-tree-sha1 = "f50c47d715199601a54afdd5267f24c8174842ae" +uuid = "64499a7a-5c06-52f2-abe2-ccb03c286192" +version = "1.16.0" + +[[deps.XML2_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] +git-tree-sha1 = "58443b63fb7e465a8a7210828c91c08b92132dff" +uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" +version = "2.9.14+0" + +[[deps.Zlib_jll]] +deps = ["Libdl"] +uuid = "83775a58-1f1d-513f-b197-d71354ab007a" +version = "1.2.12+3" + +[[deps.libblastrampoline_jll]] +deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] +uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" +version = "5.1.1+0" + +[[deps.nghttp2_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" +version = "1.48.0+0" + +[[deps.p7zip_jll]] +deps = ["Artifacts", "Libdl"] +uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" +version = "17.4.0+0" diff --git a/Project.toml b/Project.toml index e852d64b..92829267 100644 --- a/Project.toml +++ b/Project.toml @@ -3,6 +3,13 @@ uuid = "6d3209ee-5e3c-4db7-a716-942eb12ed534" authors = ["Santiago Badia ", "Jordi Manyer ", "Alberto F. Martin ", "Javier Principe "] version = "0.1.0" +[deps] +FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" +Gridap = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" +GridapDistributed = "f9701e48-63b3-45aa-9a63-9bc6c271f355" +GridapP4est = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" +PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" + [compat] julia = "1.7" From 7d11644fbae9fbed74407f267a65935c19d4cb6b Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 5 Oct 2022 17:10:57 +1100 Subject: [PATCH 02/95] First draft of ModelHierarchy --- .gitignore | 1 + src/GridapSolvers.jl | 4 +- src/ModelHierarchies.jl | 81 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 85 insertions(+), 1 deletion(-) create mode 100644 .gitignore create mode 100644 src/ModelHierarchies.jl diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..722d5e71 --- /dev/null +++ b/.gitignore @@ -0,0 +1 @@ +.vscode diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 76aaa77c..c9c6a6c2 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -1,5 +1,7 @@ module GridapSolvers -# Write your package code here. + using Gridap + using PartitionedArrays + using GridapDistributed end diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl new file mode 100644 index 00000000..c29fa486 --- /dev/null +++ b/src/ModelHierarchies.jl @@ -0,0 +1,81 @@ + + +""" + Single level for a ModelHierarchy. + + 1. `model_red` and `red_glue` might be of type `Nothing` + whenever there is no redistribution in a given level. + 2. `ref_glue` is of type `Nothing` for the coarsest model. +""" +struct ModelHierarchyLevel{A,B,C,D} + level :: Int + model :: A + ref_glue :: B + model_red :: C + red_glue :: D +end + +# COMMENT: We could have free_model! be a method that can be implemented by the different types +# AbstractDistributedDiscreteModel instances. +function free_model!(model::GridapDistributed.AbstractDistributedDiscreteModel) + @abstract_method +end + +function model_hierarchy_level_free!(a::ModelHierarchyLevel{A,B,Nothing,Nothing}) where {A,B} + free_model!(a.model) +end + +function model_hierarchy_level_free!(a::ModelHierarchyLevel{A,B,C,D}) where {A,B,C,D} + free_model!(a.model) + free_model!(a.model_red) +end + + +""" +""" +struct ModelHierarchy + level_parts :: Vector{PartitionedArrays.MPIData} + levels :: Vector{ModelHierarchyLevel} +end + +function model_hierarchy_free!(a::ModelHierarchy) + for level in a.levels + model_hierarchy_level_free!(level) + end +end + +num_levels(a::ModelHierarchy)= length(a.levels) +get_level(a::ModelHierarchy,level::Integer) = a.levels[level] + +get_level_model(a::ModelHierarchy,level::Integer)=get_level_model(get_level(a,level)) +get_level_model(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B}=a.model +get_level_model(a::ModelHierarchyLevel{A,B,C}) where {A,B,C}=a.model_red + +get_level_model_before_redist(a::ModelHierarchy,level::Integer)= + get_level_model_before_redist(get_level(a,level)) +get_level_model_before_redist(a::ModelHierarchyLevel) where {A,B}=a.model + +# TODO: Implement support for num_refs_x_level? (future work) +function ModelHierarchy(parts,model::AbstractDistributedDiscreteModel,num_procs_x_level; num_refs_coarse_model=0, num_refs_x_level=nothing) + num_levels = length(num_procs_x_level) + level_parts = generate_level_parts(parts,num_procs_x_level) + + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + meshes[num_levels] = ModelHierarchyLevel(num_levels,model,nothing,nothing,nothing) + + for i=num_levels-1:-1:1 + modelH = get_level_model(meshes[i+1]) + if (num_procs_x_level[i]!=num_procs_x_level[i+1]) + # meshes[i+1].model is distributed among P processors + # model_ref is distributed among Q processors, with P!=Q + model_ref,ref_glue = refine(modelH,level_parts[i]) + model_red,red_glue = redistribute(model_ref) + else + model_ref,ref_glue = refine(modelH) + model_red,red_glue = nothing,nothing + end + meshes[i] = ModelHierarchyLevel(i,model_ref,ref_glue,model_red,red_glue) + end + + return ModelHierarchy(level_parts,meshes) +end \ No newline at end of file From 6fa137eccda910cc7109882e34bab666517d9e9a Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 6 Oct 2022 09:02:42 +1100 Subject: [PATCH 03/95] Scope fixes --- src/GridapSolvers.jl | 5 +++++ src/ModelHierarchies.jl | 4 ++-- 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index c9c6a6c2..9a3150eb 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -1,7 +1,12 @@ module GridapSolvers using Gridap + using Gridap.Helpers using PartitionedArrays using GridapDistributed + export ModelHierarchy + + include("ModelHierarchies.jl") + end diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index c29fa486..26cc290d 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -18,7 +18,7 @@ end # COMMENT: We could have free_model! be a method that can be implemented by the different types # AbstractDistributedDiscreteModel instances. function free_model!(model::GridapDistributed.AbstractDistributedDiscreteModel) - @abstract_method + @abstractmethod end function model_hierarchy_level_free!(a::ModelHierarchyLevel{A,B,Nothing,Nothing}) where {A,B} @@ -56,7 +56,7 @@ get_level_model_before_redist(a::ModelHierarchy,level::Integer)= get_level_model_before_redist(a::ModelHierarchyLevel) where {A,B}=a.model # TODO: Implement support for num_refs_x_level? (future work) -function ModelHierarchy(parts,model::AbstractDistributedDiscreteModel,num_procs_x_level; num_refs_coarse_model=0, num_refs_x_level=nothing) +function ModelHierarchy(parts,model::GridapDistributed.AbstractDistributedDiscreteModel,num_procs_x_level; num_refs_coarse_model=0, num_refs_x_level=nothing) num_levels = length(num_procs_x_level) level_parts = generate_level_parts(parts,num_procs_x_level) From 1ce2c0e3be4ddde96f0a4cf62a005bfddd933a54 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 6 Oct 2022 09:32:22 +1100 Subject: [PATCH 04/95] Added some extensions to PartitionedArrays --- Manifest.toml | 2 +- Project.toml | 1 + src/GridapSolvers.jl | 4 ++ src/ModelHierarchies.jl | 23 +++++-- src/PartitionedArraysExtensions.jl | 106 +++++++++++++++++++++++++++++ 5 files changed, 128 insertions(+), 8 deletions(-) create mode 100644 src/PartitionedArraysExtensions.jl diff --git a/Manifest.toml b/Manifest.toml index e706583f..a61cb0e7 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.8.1" manifest_format = "2.0" -project_hash = "4c2033753bdd94d2611b146aec7777ca1783803f" +project_hash = "5ac9e17a19d8ac73982456bbee25de06e349e104" [[deps.AbstractFFTs]] deps = ["ChainRulesCore", "LinearAlgebra"] diff --git a/Project.toml b/Project.toml index 92829267..16c6c2b0 100644 --- a/Project.toml +++ b/Project.toml @@ -8,6 +8,7 @@ FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" Gridap = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" GridapDistributed = "f9701e48-63b3-45aa-9a63-9bc6c271f355" GridapP4est = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" +MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" [compat] diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 9a3150eb..53dd355e 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -1,12 +1,16 @@ module GridapSolvers + using MPI using Gridap using Gridap.Helpers using PartitionedArrays using GridapDistributed + + export ModelHierarchy + include("PartitionedArraysExtensions.jl") include("ModelHierarchies.jl") end diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index 26cc290d..1aeb7a1b 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -47,16 +47,25 @@ end num_levels(a::ModelHierarchy)= length(a.levels) get_level(a::ModelHierarchy,level::Integer) = a.levels[level] -get_level_model(a::ModelHierarchy,level::Integer)=get_level_model(get_level(a,level)) -get_level_model(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B}=a.model -get_level_model(a::ModelHierarchyLevel{A,B,C}) where {A,B,C}=a.model_red +get_level_model(a::ModelHierarchy,level::Integer) = get_level_model(get_level(a,level)) +get_level_model(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B} = a.model +get_level_model(a::ModelHierarchyLevel{A,B,C}) where {A,B,C} = a.model_red -get_level_model_before_redist(a::ModelHierarchy,level::Integer)= +get_level_model_before_redist(a::ModelHierarchy,level::Integer) = get_level_model_before_redist(get_level(a,level)) -get_level_model_before_redist(a::ModelHierarchyLevel) where {A,B}=a.model +get_level_model_before_redist(a::ModelHierarchyLevel) = a.model -# TODO: Implement support for num_refs_x_level? (future work) -function ModelHierarchy(parts,model::GridapDistributed.AbstractDistributedDiscreteModel,num_procs_x_level; num_refs_coarse_model=0, num_refs_x_level=nothing) +""" + ModelHierarchy(parts,model,num_procs_x_level;num_refs_x_level) + + - `model`: Initial refinable distributed model. Will be set as coarsest level. + + - `num_procs_x_level`: Vector containing the number of processors we want to distribute + each level into. We need `num_procs_x_level[end]` to be equal to + the number of parts of `model`. +""" +function ModelHierarchy(parts,model::GridapDistributed.AbstractDistributedDiscreteModel,num_procs_x_level::Vector{Int}; num_refs_x_level=nothing) + # TODO: Implement support for num_refs_x_level? (future work) num_levels = length(num_procs_x_level) level_parts = generate_level_parts(parts,num_procs_x_level) diff --git a/src/PartitionedArraysExtensions.jl b/src/PartitionedArraysExtensions.jl new file mode 100644 index 00000000..5f8e32f7 --- /dev/null +++ b/src/PartitionedArraysExtensions.jl @@ -0,0 +1,106 @@ + +function PartitionedArrays.num_parts(parts::PartitionedArrays.MPIData) + num_parts(parts.comm) +end + +function PartitionedArrays.num_parts(comm::MPI.Comm) + if comm != MPI.COMM_NULL + nparts = MPI.Comm_size(comm) + else + nparts = -1 + end + nparts +end + +function PartitionedArrays.get_part_id(comm::MPI.Comm) + if comm != MPI.COMM_NULL + id = MPI.Comm_rank(comm)+1 + else + id = -1 + end + id +end + +function i_am_in(comm::MPI.Comm) + PartitionedArrays.get_part_id(comm) >=0 +end + +function i_am_in(parts) + i_am_in(parts.comm) +end + +function PartitionedArrays.get_part_ids(comm::MPI.Comm) + rank = PartitionedArrays.get_part_id(comm) + nparts = PartitionedArrays.num_parts(comm) + PartitionedArrays.MPIData(rank,comm,(nparts,)) +end + +function PartitionedArrays.get_part_ids(b::MPIBackend,nparts::Union{Int,NTuple{N,Int} where N}) + root_comm = MPI.Comm_dup(MPI.COMM_WORLD) + size = MPI.Comm_size(root_comm) + rank = MPI.Comm_rank(root_comm) + need = prod(nparts) + if size < need + throw("Not enough MPI ranks, please run mpiexec with -n $need (at least)") + elseif size > need + if rank < need + comm = MPI.Comm_split(root_comm, 0, 0) + MPIData(PartitionedArrays.get_part_id(comm),comm,Tuple(nparts)) + else + comm = MPI.Comm_split(root_comm, MPI.MPI_UNDEFINED, MPI.MPI_UNDEFINED) + MPIData(PartitionedArrays.get_part_id(comm),comm,(-1,)) + end + else + comm = root_comm + MPIData(PartitionedArrays.get_part_id(comm),comm,Tuple(nparts)) + end +end + +function PartitionedArrays.prun(driver::Function,b::MPIBackend,nparts::Union{Int,NTuple{N,Int} where N},args...;kwargs...) + if !MPI.Initialized() + MPI.Init() + end + if MPI.Comm_size(MPI.COMM_WORLD) == 1 + part = get_part_ids(b,nparts) + driver(part,args...;kwargs...) + else + try + part = get_part_ids(b,nparts) + if i_am_in(part) + driver(part,args...;kwargs...) + end + catch e + @error "" exception=(e, catch_backtrace()) + if MPI.Initialized() && !MPI.Finalized() + MPI.Abort(MPI.COMM_WORLD,1) + end + end + end + # We are NOT invoking MPI.Finalize() here because we rely on + # MPI.jl, which registers MPI.Finalize() in atexit() +end + +function generate_level_parts(parts,num_procs_x_level) + root_comm = parts.comm + rank = MPI.Comm_rank(root_comm) + size = MPI.Comm_size(root_comm) + Gridap.Helpers.@check all(num_procs_x_level .<= size) + Gridap.Helpers.@check all(num_procs_x_level .>= 1) + + num_levels = length(num_procs_x_level) + level_parts = Vector{typeof(parts)}(undef,num_levels) + for l = 1:num_levels + lsize = num_procs_x_level[l] + if l>1 && lsize==num_procs_x_level[l-1] + level_parts[l] = level_parts[l-1] + else + if rank < lsize + comm = MPI.Comm_split(root_comm, 0, 0) + else + comm = MPI.Comm_split(root_comm, MPI.MPI_UNDEFINED, MPI.MPI_UNDEFINED) + end + level_parts[l] = get_part_ids(comm) + end + end + return level_parts +end \ No newline at end of file From a494a37752bd087f575295deaf7ebb69c24397b3 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 6 Oct 2022 09:41:58 +1100 Subject: [PATCH 05/95] Now tracking GridapP4est#p4est-migration --- Manifest.toml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index a61cb0e7..d719fc70 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -218,8 +218,8 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "7448e5e64569db6138ddee945e223746195b4504" -repo-rev = "generate_mesh_hierarchy" +git-tree-sha1 = "29dd610eb64c974e29d1f80dcc797eb40daaa6fa" +repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" version = "0.1.3" From f7c0d8bfada2e599600fa06cb9cbcbd2b646971e Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 6 Oct 2022 10:34:56 +1100 Subject: [PATCH 06/95] ModelHierarchy tests working --- Manifest.toml | 12 +++---- Project.toml | 1 + src/GridapSolvers.jl | 2 ++ src/ModelHierarchies.jl | 15 +++++---- test/mpi/ModelHierarchiesTests.jl | 28 ++++++++++++++++ test/runtests.jl | 55 +++++++++++++++++++++++++++++-- 6 files changed, 99 insertions(+), 14 deletions(-) create mode 100644 test/mpi/ModelHierarchiesTests.jl diff --git a/Manifest.toml b/Manifest.toml index d719fc70..43326e83 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.8.1" manifest_format = "2.0" -project_hash = "5ac9e17a19d8ac73982456bbee25de06e349e104" +project_hash = "b04342400b43974a6b57ec92c1d69804f8b5dc91" [[deps.AbstractFFTs]] deps = ["ChainRulesCore", "LinearAlgebra"] @@ -218,7 +218,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "29dd610eb64c974e29d1f80dcc797eb40daaa6fa" +git-tree-sha1 = "107dc7701cde83db6ee33cad6426acaeee92eba5" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -358,9 +358,9 @@ version = "5.0.2+1" [[deps.MacroTools]] deps = ["Markdown", "Random"] -git-tree-sha1 = "3d3e902b31198a27340d0bf00d6ac452866021cf" +git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2" uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -version = "0.5.9" +version = "0.5.10" [[deps.Markdown]] deps = ["Base64"] @@ -465,9 +465,9 @@ version = "2.4.0" [[deps.PartitionedArrays]] deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] -git-tree-sha1 = "88ff2293fd57089a4036a3056ba058ae9806111b" +git-tree-sha1 = "94291b7ddeac39816572660383055870b41bca64" uuid = "5a9dfac6-5c52-46f7-8278-5e2210713be9" -version = "0.2.10" +version = "0.2.11" [[deps.Pkg]] deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] diff --git a/Project.toml b/Project.toml index 16c6c2b0..7b699574 100644 --- a/Project.toml +++ b/Project.toml @@ -4,6 +4,7 @@ authors = ["Santiago Badia ", "Jordi Manyer Date: Thu, 6 Oct 2022 13:24:41 +1100 Subject: [PATCH 07/95] Fixes to CI.yml --- .github/workflows/CI.yml | 53 ++++++++++++++++++++++++++++++++++------ 1 file changed, 46 insertions(+), 7 deletions(-) diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index c5f8f3f5..64c74e48 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -14,29 +14,68 @@ jobs: test: name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }} runs-on: ${{ matrix.os }} + env: + JULIA_MPI_BINARY: "system" + P4EST_ROOT_DIR: "/opt/p4est/2.2/" strategy: fail-fast: false matrix: version: - '1.7' - - 'nightly' os: - ubuntu-latest arch: - x64 steps: - uses: actions/checkout@v2 + - name: Cache p4est + id: cache-p4est + uses: actions/cache@v2 + with: + path: ${{env.P4EST_ROOT_DIR}} + key: ${{ runner.os }}-build-${{ env.P4EST_ROOT_DIR }}- + restore-keys: | + ${{ runner.os }}-build-${{ env.P4EST_ROOT_DIR }}- + ${{ runner.os }}-build- + ${{ runner.os }}- - uses: julia-actions/setup-julia@v1 with: version: ${{ matrix.version }} arch: ${{ matrix.arch }} - - uses: julia-actions/cache@v1 - - uses: julia-actions/julia-buildpkg@v1 - - uses: julia-actions/julia-runtest@v1 - - uses: julia-actions/julia-processcoverage@v1 - - uses: codecov/codecov-action@v2 + - name: Install p4est/petsc dependencies + run: | + sudo apt-get update + sudo apt-get install -y wget gfortran g++ openmpi-bin libopenmpi-dev + - name: Install p4est + if: steps.cache-p4est.outputs.cache-hit != 'true' + run: | + # Install p4est 2.2 from sources + CURR_DIR=$(pwd) + PACKAGE=p4est + VERSION=2.2 + INSTALL_ROOT=/opt + P4EST_INSTALL=$INSTALL_ROOT/$PACKAGE/$VERSION + TAR_FILE=$PACKAGE-$VERSION.tar.gz + URL="https://github.com/p4est/p4est.github.io/raw/master/release" + ROOT_DIR=/tmp + SOURCES_DIR=$ROOT_DIR/$PACKAGE-$VERSION + BUILD_DIR=$SOURCES_DIR/build + wget -q $URL/$TAR_FILE -O $ROOT_DIR/$TAR_FILE + mkdir -p $SOURCES_DIR + tar xzf $ROOT_DIR/$TAR_FILE -C $SOURCES_DIR --strip-components=1 + cd $SOURCES_DIR + ./configure --prefix=$P4EST_INSTALL --without-blas --without-lapack --enable-mpi -disable-dependency-tracking + make --quiet + make --quiet install + rm -rf $ROOT_DIR/$TAR_FILE $SOURCES_DIR + cd $CURR_DIR + - uses: julia-actions/julia-buildpkg@latest + - run: echo $PWD + - run: julia --project=. --color=yes -e 'using Pkg; Pkg.instantiate()' + - run: julia --project=. --color=yes --check-bounds=yes test/runtests.jl + - uses: codecov/codecov-action@v1 with: - files: lcov.info + file: lcov.info docs: name: Documentation runs-on: ubuntu-latest From 0e5f03dfb34558cf953a2b4a95c01f5fe99580d3 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 17 Oct 2022 16:35:41 +1100 Subject: [PATCH 08/95] Changed variable name --- src/ModelHierarchies.jl | 10 ++++------ 1 file changed, 4 insertions(+), 6 deletions(-) diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index 5265222a..923a4bc3 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -42,9 +42,7 @@ struct ModelHierarchy end function model_hierarchy_free!(a::ModelHierarchy) - for level in a.levels - model_hierarchy_level_free!(level) - end + map(model_hierarchy_level_free!,a.levels) end num_levels(a::ModelHierarchy)= length(a.levels) @@ -67,15 +65,15 @@ get_level_model_before_redist(a::ModelHierarchyLevel) = a.model each level into. We need `num_procs_x_level[end]` to be equal to the number of parts of `model`. """ -function ModelHierarchy(parts,model::GridapDistributed.AbstractDistributedDiscreteModel,num_procs_x_level::Vector{Int}; num_refs_x_level=nothing) +function ModelHierarchy(parts,coarsest_model::GridapDistributed.AbstractDistributedDiscreteModel,num_procs_x_level::Vector{Int}; num_refs_x_level=nothing) # TODO: Implement support for num_refs_x_level? (future work) num_levels = length(num_procs_x_level) level_parts = generate_level_parts(parts,num_procs_x_level) meshes = Vector{ModelHierarchyLevel}(undef,num_levels) - meshes[num_levels] = ModelHierarchyLevel(num_levels,model,nothing,nothing,nothing) + meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing,nothing) - for i=num_levels-1:-1:1 + for i = num_levels-1:-1:1 modelH = get_level_model(meshes[i+1]) if (num_procs_x_level[i]!=num_procs_x_level[i+1]) # meshes[i+1].model is distributed among P processors From 5b6cbe2bcc9d68c62fc5ecb39f66c4142a54c362 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 17 Oct 2022 16:38:02 +1100 Subject: [PATCH 09/95] MPIData changed by AbstractPData --- src/ModelHierarchies.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index 923a4bc3..cd3da9ca 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -37,7 +37,7 @@ end """ """ struct ModelHierarchy - level_parts :: Vector{PartitionedArrays.MPIData} + level_parts :: Vector{PartitionedArrays.AbstractPData} levels :: Vector{ModelHierarchyLevel} end From 5bb325a42873e567c2d1243b2a0b6ee335a1b0bc Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 17 Oct 2022 16:58:10 +1100 Subject: [PATCH 10/95] Removed free_model() --- src/GridapSolvers.jl | 1 - src/ModelHierarchies.jl | 25 +------------------------ 2 files changed, 1 insertion(+), 25 deletions(-) diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 4c1df6f4..1be60c04 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -10,7 +10,6 @@ module GridapSolvers export ModelHierarchy - export model_hierarchy_free! include("PartitionedArraysExtensions.jl") include("ModelHierarchies.jl") diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index cd3da9ca..c5dc4f56 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -15,25 +15,6 @@ struct ModelHierarchyLevel{A,B,C,D} red_glue :: D end -function model_hierarchy_level_free!(a::ModelHierarchyLevel{A,B,Nothing,Nothing}) where {A,B} - free_model!(a.model) -end - -function model_hierarchy_level_free!(a::ModelHierarchyLevel{A,B,C,D}) where {A,B,C,D} - free_model!(a.model) - free_model!(a.model_red) -end - -# COMMENT: We could have free_model! be a method that can be implemented by the different types -# AbstractDistributedDiscreteModel instances. -function free_model!(model::GridapDistributed.AbstractDistributedDiscreteModel) - @abstractmethod -end - -function free_model!(model::OctreeDistributedDiscreteModel) - octree_distributed_discrete_model_free!(model) -end - """ """ struct ModelHierarchy @@ -41,11 +22,7 @@ struct ModelHierarchy levels :: Vector{ModelHierarchyLevel} end -function model_hierarchy_free!(a::ModelHierarchy) - map(model_hierarchy_level_free!,a.levels) -end - -num_levels(a::ModelHierarchy)= length(a.levels) +num_levels(a::ModelHierarchy) = length(a.levels) get_level(a::ModelHierarchy,level::Integer) = a.levels[level] get_level_model(a::ModelHierarchy,level::Integer) = get_level_model(get_level(a,level)) From 695a38d8e6f297986544bc85ec0e20fa317f9d42 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 19 Oct 2022 17:27:09 +1100 Subject: [PATCH 11/95] Redistribution functions --- src/RedistributeTools.jl | 185 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 185 insertions(+) create mode 100644 src/RedistributeTools.jl diff --git a/src/RedistributeTools.jl b/src/RedistributeTools.jl new file mode 100644 index 00000000..a9f1b300 --- /dev/null +++ b/src/RedistributeTools.jl @@ -0,0 +1,185 @@ +function _allocate_cell_wise_dofs(cell_to_ldofs) + map_parts(cell_to_ldofs) do cell_to_ldofs + cache = array_cache(cell_to_ldofs) + ncells = length(cell_to_ldofs) + ptrs = Vector{Int32}(undef,ncells+1) + for cell in 1:ncells + ldofs = getindex!(cache,cell_to_ldofs,cell) + ptrs[cell+1] = length(ldofs) + end + PArrays.length_to_ptrs!(ptrs) + ndata = ptrs[end]-1 + data = Vector{Float64}(undef,ndata) + PArrays.Table(data,ptrs) + end +end + +function _update_cell_dof_values_with_local_info!(cell_dof_values_new, + cell_dof_values_old, + new2old) + map_parts(cell_dof_values_new, + cell_dof_values_old, + new2old) do cell_dof_values_new,cell_dof_values_old,new2old + ocache = array_cache(cell_dof_values_old) + for (ncell,ocell) in enumerate(new2old) + if ocell!=0 + # Copy ocell to ncell + oentry = getindex!(ocache,cell_dof_values_old,ocell) + range = cell_dof_values_new.ptrs[ncell]:cell_dof_values_new.ptrs[ncell+1]-1 + cell_dof_values_new.data[range] .= oentry + end + end + end +end + +function allocate_comm_data(num_dofs_x_cell,lids) + map_parts(num_dofs_x_cell,lids) do num_dofs_x_cell,lids + n = length(lids) + ptrs = Vector{Int32}(undef,n+1) + ptrs.= 0 + for i = 1:n + for j = lids.ptrs[i]:lids.ptrs[i+1]-1 + ptrs[i+1] = ptrs[i+1] + num_dofs_x_cell.data[j] + end + end + PArrays.length_to_ptrs!(ptrs) + ndata = ptrs[end]-1 + data = Vector{Float64}(undef,ndata) + PArrays.Table(data,ptrs) + end +end + +function pack_snd_data!(snd_data,cell_dof_values,snd_lids) + map_parts(snd_data,cell_dof_values,snd_lids) do snd_data,cell_dof_values,snd_lids + cache = array_cache(cell_dof_values) + s = 1 + for i = 1:length(snd_lids) + for j = snd_lids.ptrs[i]:snd_lids.ptrs[i+1]-1 + cell = snd_lids.data[j] + ldofs = getindex!(cache,cell_dof_values,cell) + + e = s+length(ldofs)-1 + range = s:e + snd_data.data[range] .= ldofs + s = e+1 + end + end + end +end + +function unpack_rcv_data!(cell_dof_values,rcv_data,rcv_lids) + map_parts(cell_dof_values,rcv_data,rcv_lids) do cell_dof_values,rcv_data,rcv_lids + s = 1 + for i = 1:length(rcv_lids.ptrs)-1 + for j = rcv_lids.ptrs[i]:rcv_lids.ptrs[i+1]-1 + cell = rcv_lids.data[j] + range_cell_dof_values = cell_dof_values.ptrs[cell]:cell_dof_values.ptrs[cell+1]-1 + + e = s+length(range_cell_dof_values)-1 + range_rcv_data = s:e + cell_dof_values.data[range_cell_dof_values] .= rcv_data.data[range_rcv_data] + s = e+1 + end + end + end +end + +function get_glue_components(glue::GridapDistributed.RedistributeGlue,reverse::Val{false}) + return glue.lids_rcv, glue.lids_snd, glue.parts_rcv, glue.parts_snd, glue.new2old +end + +function get_glue_components(glue::GridapDistributed.RedistributeGlue,reverse::Val{true}) + return glue.lids_snd, glue.lids_rcv, glue.parts_snd, glue.parts_rcv, glue.old2new +end + +function num_dofs_x_cell(cell_dofs_array,lids) + map_parts(cell_dofs_array,lids) do cell_dofs_array, lids + data = [length(cell_dofs_array[i]) for i = 1:length(cell_dofs_array) ] + PArrays.Table(data,lids.ptrs) + end +end + + +function redistribute_cell_dofs(cell_dof_values_old::GridapDistributed.DistributedCellDatum, + Uh_new::GridapDistributed.DistributedSingleFieldFESpace, + model_new, + glue::GridapDistributed.RedistributeGlue; + reverse=false) + + lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) + cell_dof_ids_new = map_parts(get_cell_dof_ids, Uh_new.spaces) + + num_dofs_x_cell_snd = num_dofs_x_cell(cell_dof_values_old, lids_snd) + num_dofs_x_cell_rcv = num_dofs_x_cell(cell_dof_ids_new, lids_rcv) + snd_data = allocate_comm_data(num_dofs_x_cell_snd, lids_snd) + rcv_data = allocate_comm_data(num_dofs_x_cell_rcv, lids_rcv) + + pack_snd_data!(snd_data,cell_dof_values_old,lids_snd) + + tout = async_exchange!(rcv_data, + snd_data, + parts_rcv, + parts_snd, + PArrays._empty_tasks(parts_rcv)) + map_parts(schedule,tout) + + cell_dof_values_new = _allocate_cell_wise_dofs(cell_dof_ids_new) + + # We have to build the owned part of "cell_dof_values_new" out of + # 1. cell_dof_values_old (for those cells s.t. new2old[:]!=0) + # 2. cell_dof_values_new_rcv (for those cells s.t. new2old[:]=0) + _update_cell_dof_values_with_local_info!(cell_dof_values_new, + cell_dof_values_old, + new2old) + + map_parts(wait,tout) + unpack_rcv_data!(cell_dof_values_new,rcv_data,lids_rcv) + + # Why are we exchanging something that has already been exchanged? + fgids = get_cell_gids(model_new) + exchange!(cell_dof_values_new,fgids.exchanger) + + return cell_dof_values_new +end + +function redistribute_free_values!(fv_new::PVector, + Uh_new::GridapDistributed.DistributedSingleFieldFESpace, + fv_old::PVector, + Uh_old::GridapDistributed.DistributedSingleFieldFESpace, + model_new, + glue::GridapDistributed.RedistributeGlue; + reverse=false) + + uh_old = FEFunction(Uh_old,fv_old) + cell_dof_values_old = map_parts(get_cell_dof_values,uh_old.fields) + cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,Uh_new,model_new,glue;reverse=reverse) + + # Assemble the new FEFunction + Gridap.FESpaces.gather_free_values!(fv_new, Uh_new.spaces,cell_dof_values_new) + return fv_new +end + + +function redistribute_fe_function(uh_old::GridapDistributed.DistributedSingleFieldFEFunction, + Uh_new::GridapDistributed.DistributedSingleFieldFESpace, + model_new, + glue::GridapDistributed.RedistributeGlue; + reverse=false) + + cell_dof_values_old = map_parts(get_cell_dof_values,uh_old.fields) + cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,Uh_new,model_new,glue;reverse=reverse) + + # Assemble the new FEFunction + free_values, dirichlet_values = Gridap.FESpaces.gather_free_and_dirichlet_values(Uh_new.spaces,cell_dof_values_new) + free_values = PVector(free_values,Uh_new.gids) + uh_new = FEFunction(Uh_new,free_values,dirichlet_values) + return uh_new +end + + +function Gridap.FESpaces.gather_free_and_dirichlet_values(f::Gridap.Distributed.AbstractDistributedFESpace,cv) + free_values, dirichlet_values = map_parts(local_views(f),cv) do f, cv + Gridap.FESpaces.gather_free_and_dirichlet_values(f,cv) + end + return free_values, dirichlet_values +end \ No newline at end of file From 15f3e4d89aec17b95a48f48a2ec74f1c1a8ba388 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 19 Oct 2022 17:28:56 +1100 Subject: [PATCH 12/95] Added LinearAlgebra to Project.toml --- Manifest.toml | 2 +- Project.toml | 1 + src/GridapSolvers.jl | 1 + 3 files changed, 3 insertions(+), 1 deletion(-) diff --git a/Manifest.toml b/Manifest.toml index 43326e83..93f9919b 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.8.1" manifest_format = "2.0" -project_hash = "b04342400b43974a6b57ec92c1d69804f8b5dc91" +project_hash = "ed5df3dccfd63f0f6cd0890dd8cf640652010af8" [[deps.AbstractFFTs]] deps = ["ChainRulesCore", "LinearAlgebra"] diff --git a/Project.toml b/Project.toml index 7b699574..f4b20679 100644 --- a/Project.toml +++ b/Project.toml @@ -9,6 +9,7 @@ FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" Gridap = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" GridapDistributed = "f9701e48-63b3-45aa-9a63-9bc6c271f355" GridapP4est = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" +LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 1be60c04..9b964a80 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -1,6 +1,7 @@ module GridapSolvers using MPI + using LinearAlgebra using Gridap using Gridap.Helpers using PartitionedArrays From 5ee89d632a1c2ed1634db6776447007571456390 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 20 Oct 2022 10:43:51 +1100 Subject: [PATCH 13/95] Added InterGridTransferOperators --- src/FESpaceHierarchies.jl | 95 ++++++++++++++++++++++++++++ src/InterGridTransferOperators.jl | 100 ++++++++++++++++++++++++++++++ src/ModelHierarchies.jl | 30 ++++++--- 3 files changed, 215 insertions(+), 10 deletions(-) create mode 100644 src/FESpaceHierarchies.jl create mode 100644 src/InterGridTransferOperators.jl diff --git a/src/FESpaceHierarchies.jl b/src/FESpaceHierarchies.jl new file mode 100644 index 00000000..85db0a77 --- /dev/null +++ b/src/FESpaceHierarchies.jl @@ -0,0 +1,95 @@ +struct FESpaceHierarchyLevel{A,B} + level :: Int + fe_space :: A + fe_space_red :: B +end + +struct FESpaceHierarchy + mh :: ModelHierarchy + levels :: Vector{FESpaceHierarchyLevel} +end + + +function Base.getindex(fh::FESpaceHierarchy,level::Integer) + fh.levels[level] +end + +function Base.length(fh::FESpaceHierarchy) + length(fh.levels) +end + +function num_levels(fh::FESpaceHierarchy) + length(fh) +end + +function get_space(a::FESpaceHierarchyLevel{A,Nothing}) where {A} + a.fe_space +end + +function get_space(a::FESpaceHierarchyLevel{A,B}) where {A,B} + a.fe_space_red +end + +function get_space_before_redist(a::FESpaceHierarchyLevel) + a.fe_space +end + +function Gridap.FESpaces.TestFESpace( + mh::ModelHierarchyLevel{A,B,Nothing},args...;kwargs...) where {A,B} + Vh = TestFESpace(get_model(mh.model),args...;kwargs...) + FESpaceHierarchyLevel(mh.level,Vh,nothing) +end + +function Gridap.FESpaces.TestFESpace( + mh::ModelHierarchyLevel{A,B,C},args...;kwargs...) where {A,B,C} + Vh = TestFESpace(get_model(mh.model),args...;kwargs...) + Vh_red = TestFESpace(get_model(mh.model_red),args...;kwargs...) + FESpaceHierarchyLevel(mh.level,Vh,Vh_red) +end + +function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchyLevel{A,Nothing}) where {A} + Uh = TrialFESpace(u,a.fe_space) + FESpaceHierarchyLevel(a.level,Uh,nothing) +end + +function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchyLevel{A,B}) where {A,B} + Uh = TrialFESpace(u,a.fe_space) + Uh_red = TrialFESpace(u,a.fe_space_red) + FESpaceHierarchyLevel(a.level,Uh,Uh_red) +end + +function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) where {A,B} + test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh)) + for i=1:num_levels(mh) + model = get_model(mh,i) + if (GridapP4est.i_am_in(model.parts)) + Vh = TestFESpace(get_level(mh,i),args...;kwargs...) + test_spaces[i] = Vh + end + end + FESpaceHierarchy(mh,test_spaces) +end + +function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchy) + trial_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(a.mh)) + for i=1:num_levels(a.mh) + model = get_model(a.mh,i) + if (GridapP4est.i_am_in(model.parts)) + Uh = TrialFESpace(u,a[i]) + trial_spaces[i] = Uh + end + end + FESpaceHierarchy(a.mh,trial_spaces) +end + +function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) + trial_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(a.mh)) + for i=1:num_levels(a.mh) + model = get_model(a.mh,i) + if (GridapP4est.i_am_in(model.parts)) + Uh = TrialFESpace(a[i]) + trial_spaces[i] = Uh + end + end + FESpaceHierarchy(a.mh,trial_spaces) +end diff --git a/src/InterGridTransferOperators.jl b/src/InterGridTransferOperators.jl new file mode 100644 index 00000000..b39f29b2 --- /dev/null +++ b/src/InterGridTransferOperators.jl @@ -0,0 +1,100 @@ + + +struct InterGridTransferOperator{T,R,A,B,C} + sh :: A + ref_op :: B + cache :: C + + function InterGridTransferOperator(op_type::Symbol,redist::Bool,sh::FESpaceHierarchy,ref_op,cache) + T = typeof(Val(op_type)) + R = typeof(Val(redist)) + A = typeof(sh) + B = typeof(ref_op) + C = typeof(cache) + new{T,R,A,B,C}(sh,cache) + end +end + +### Constructors + +RestrictionOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = InterGridTransferOperator(lev,sh,qdegree,:restriction) +ProlongationOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = InterGridTransferOperator(lev,sh,qdegree,:prolongation) + +function InterGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol) + mh = sh.mh + @check lev != num_levels(mh) + @check op_type ∈ [:restriction, :prolongation] + + UH = get_space(sh,lev+1) + Uh = get_space_before_redist(sh,lev) + Uh_red = get_space(sh,lev) + + # Refinement + from, to = (op_type == :restriction) ? (Uh, UH) : (UH, Uh) + ref_op = RefinementTransferOperator(from,to;qdegree=qdegree) + + # Redistribution + redist = has_redistribution(mh,lev) + if redist + model_h = get_model_before_redist(mh,lev) + model_h_red = get_model(mh,lev) + fv_h = PVector(0.0,Uh.gids) + fv_h_red = PVector(0.0,Uh_red.gids) + + cache = fv_h, Uh, fv_h_red, Uh_red, model_h, model_h_red, glue + else + cache = nothing + end + + return InterGridTransferOperator(:op_type,redist,sh,ref_op,cache) +end + +function setup_transfer_operators(sh::FESpaceHierarchy, qdegree::Int) + restrictions = Vector{InterGridTransferOperator}(undef,num_levels(sh)-1) + interpolations = Vector{InterGridTransferOperator}(undef,num_levels(sh)-1) + for lev in 1:num_levels(sh)-1 + restrictions[lev] = RestrictionOperator(lev,sh,qdegree) + interpolations[lev] = InterpolationOperator(lev,sh,qdegree) + end + return restrictions, interpolations +end + +### Applying the operators: + +## A) Without redistribution (same for interpolation/restriction) +function LinearAlgebra.mul!(y::PVector,A::InterGridTransferOperator{T,Val{false}},x::PVector) where T + + map_parts(y,A.ref_op,x) do y, ref_op, x + mul!(y,ref_op,x) + end + + return y +end + +## B) Prolongation (coarse to fine), with redistribution +function LinearAlgebra.mul!(y::PVector,A::InterGridTransferOperator{Val{:prolongation},Val{true}},x::PVector) + fv_h, Uh, fv_h_red, Uh_red, model_h, model_h_red, glue = A.cache + + # 1 - Solve c2f projection in coarse partition + mul!(fv_h,ref_op,x) + + # 2 - Redistribute from coarse partition to fine partition + redistribute_free_values!(fv_h_red,Uh_red,fv_h,Uh,model_h_red,glue;reverse=false) + copy!(y,fv_h_red) + + return y +end + +## C) Restriction (fine to coarse), with redistribution +function LinearAlgebra.mul!(y::PVector,A::InterGridTransferOperator{Val{:restriction},Val{true}},x::PVector) + fv_h, Uh, fv_h_red, Uh_red, model_h, model_h_red, glue = A.cache + + # 1 - Redistribute from coarse partition to fine partition + copy!(fv_h_red,x) + redistribute_free_values!(fv_h,Uh,fv_h_red,Uh_red,model_h,glue;reverse=true) + + # 2 - Solve f2c projection in fine partition + mul!(y,ref_op,fv_h) + + return y +end diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index c5dc4f56..c3f6e486 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -1,10 +1,15 @@ - """ Single level for a ModelHierarchy. + In each level, `cmodel` is the coarse model which is + first redistributed to obtain `cmodel_red` and then + refined to obtain `fmodel_red`. + Two considerations: + 1. `model_red` and `red_glue` might be of type `Nothing` - whenever there is no redistribution in a given level. + whenever there is no redistribution in a given level. + 2. `ref_glue` is of type `Nothing` for the coarsest model. """ struct ModelHierarchyLevel{A,B,C,D} @@ -25,19 +30,24 @@ end num_levels(a::ModelHierarchy) = length(a.levels) get_level(a::ModelHierarchy,level::Integer) = a.levels[level] -get_level_model(a::ModelHierarchy,level::Integer) = get_level_model(get_level(a,level)) -get_level_model(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B} = a.model -get_level_model(a::ModelHierarchyLevel{A,B,C}) where {A,B,C} = a.model_red +get_model(a::ModelHierarchy,level::Integer) = get_model(get_level(a,level)) +get_model(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B} = a.model +get_model(a::ModelHierarchyLevel{A,B,C}) where {A,B,C} = a.model_red + +get_model_before_redist(a::ModelHierarchy,level::Integer) = get_model_before_redist(get_level(a,level)) +get_model_before_redist(a::ModelHierarchyLevel) = a.model -get_level_model_before_redist(a::ModelHierarchy,level::Integer) = - get_level_model_before_redist(get_level(a,level)) -get_level_model_before_redist(a::ModelHierarchyLevel) = a.model +has_refinement(a::ModelHierarchy,level::Integer) = has_refinement(a.levels[level]) +has_refinement(a::ModelHierarchyLevel{A,B,C,D}) where {A,B,C,D} = true +has_refinement(a::ModelHierarchyLevel{A,Nothing,C,D}) where {A,C,D} = false + +has_redistribution(a::ModelHierarchy,level::Integer) = has_redistribution(a.levels[level]) +has_redistribution(a::ModelHierarchyLevel{A,B,C,D}) where {A,B,C,D} = true +has_redistribution(a::ModelHierarchyLevel{A,B,C,Nothing}) where {A,B,C} = false """ ModelHierarchy(parts,model,num_procs_x_level;num_refs_x_level) - - `model`: Initial refinable distributed model. Will be set as coarsest level. - - `num_procs_x_level`: Vector containing the number of processors we want to distribute each level into. We need `num_procs_x_level[end]` to be equal to the number of parts of `model`. From 154077743ac2e6418e58b75d5200725880f6f1ae Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 20 Oct 2022 10:52:21 +1100 Subject: [PATCH 14/95] Added exports --- src/GridapSolvers.jl | 15 ++++++++++++++- 1 file changed, 14 insertions(+), 1 deletion(-) diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 9b964a80..c03fd8f5 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -4,15 +4,28 @@ module GridapSolvers using LinearAlgebra using Gridap using Gridap.Helpers + using Gridap.Geometry + using Gridap.FESpaces using PartitionedArrays using GridapDistributed using GridapP4est - export ModelHierarchy + export num_levels, get_level, get_model, get_model_before_redist, has_refinement, has_redistribution + + export FESpaceHierarchy + export get_space, get_space_before_redist + + export InterGridTransferOperator + export RestrictionOperator, ProlongationOperator + export setup_transfer_operators include("PartitionedArraysExtensions.jl") include("ModelHierarchies.jl") + include("FESpaceHierarchies.jl") + include("RedistributeTools.jl") + include("InterGridTransferOperators.jl") + end From 53680acafab32a1c4a60892b2feb9c716bf3e38d Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 20 Oct 2022 13:02:49 +1100 Subject: [PATCH 15/95] Tracking Gridap#refined-discrete-models --- Manifest.toml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 93f9919b..27ddac1b 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -203,8 +203,10 @@ deps = ["Random"] uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] -deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "e66749aba5b8d2e41155c2b12dea9bc7c2a71440" +deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "IterativeSolvers", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] +git-tree-sha1 = "a3b73b5209abe4b5ad08351ccb37082263cf7064" +repo-rev = "refined-discrete-models" +repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" version = "0.17.14" From 1788cc8fea572ef2fae42882c54452cedf6b3ec6 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 20 Oct 2022 14:22:47 +1100 Subject: [PATCH 16/95] Bugfix --- Manifest.toml | 44 ++++++++++++++++++++-------------------- src/RedistributeTools.jl | 2 +- 2 files changed, 23 insertions(+), 23 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 27ddac1b..72c7fae5 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -11,9 +11,9 @@ uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" version = "1.2.1" [[deps.AbstractTrees]] -git-tree-sha1 = "5c0b629df8a5566a06f5fef5100b53ea56e465a0" +git-tree-sha1 = "52b3b436f8f73133d7bc3a6c71ee7ed6ab2ab754" uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" -version = "0.4.2" +version = "0.4.3" [[deps.ArgCheck]] git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" @@ -32,9 +32,9 @@ version = "1.1.1" [[deps.ArrayInterfaceCore]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "5bb0f8292405a516880a3809954cb832ae7a31c5" +git-tree-sha1 = "e9f7992287edfc27b3cbe0046c544bace004ca5b" uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.20" +version = "0.1.22" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] @@ -95,9 +95,9 @@ version = "0.3.0" [[deps.Compat]] deps = ["Dates", "LinearAlgebra", "UUIDs"] -git-tree-sha1 = "5856d3031cdb1f3b2b6340dfdc66b6d9a149a374" +git-tree-sha1 = "3ca828fe1b75fa84b021a7860bd039eaea84d2f2" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.2.0" +version = "4.3.0" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] @@ -128,9 +128,9 @@ version = "1.1.0" [[deps.DiffRules]] deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "992a23afdb109d0d2f8802a30cf5ae4b1fe7ea68" +git-tree-sha1 = "8b7a4d23e22f5d44883671da70865ca98f2ebf9d" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.11.1" +version = "1.12.0" [[deps.Distances]] deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"] @@ -173,9 +173,9 @@ version = "0.4.9" [[deps.FileIO]] deps = ["Pkg", "Requires", "UUIDs"] -git-tree-sha1 = "94f5101b96d2d968ace56f7f2db19d0a5f592e28" +git-tree-sha1 = "7be5f99f7d15578798f338f5433b6c432ea8037b" uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" -version = "1.15.0" +version = "1.16.0" [[deps.FileWatching]] uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" @@ -212,7 +212,7 @@ version = "0.17.14" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "0d9de731ebf89b0d2e15a9c53b1aad1ed6e0a017" +git-tree-sha1 = "e948c30fb1e3e0e50f6155b132a4a6effa435517" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapDistributed.jl.git" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" @@ -220,7 +220,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "107dc7701cde83db6ee33cad6426acaeee92eba5" +git-tree-sha1 = "ca8f9fecad3f74a6ac830463cef7a969d092db17" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -255,9 +255,9 @@ version = "0.9.2" [[deps.JLD2]] deps = ["FileIO", "MacroTools", "Mmap", "OrderedCollections", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"] -git-tree-sha1 = "0d0ad913e827d13c5e88a73f9333d7e33c424576" +git-tree-sha1 = "1c3ff7416cb727ebf4bab0491a56a296d7b8cf1d" uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -version = "0.4.24" +version = "0.4.25" [[deps.JLLWrappers]] deps = ["Preferences"] @@ -330,9 +330,9 @@ uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" [[deps.MKL_jll]] deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] -git-tree-sha1 = "41d162ae9c868218b1f3fe78cba878aa348c2d26" +git-tree-sha1 = "2ce8695e1e699b68702c03402672a69f54b8aca9" uuid = "856f044c-d86e-5d09-b602-aeab76dc8ba7" -version = "2022.1.0+0" +version = "2022.2.0+0" [[deps.MPI]] deps = ["Distributed", "DocStringExtensions", "Libdl", "MPICH_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Pkg", "Random", "Requires", "Serialization", "Sockets"] @@ -348,9 +348,9 @@ version = "4.0.2+5" [[deps.MPIPreferences]] deps = ["Libdl", "Preferences"] -git-tree-sha1 = "9959c42b41220206eeda9004f695d913e2245658" +git-tree-sha1 = "34892fb69751a76bcf8b7add84ec77015208a1ec" uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" -version = "0.1.5" +version = "0.1.6" [[deps.MPItrampoline_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] @@ -461,9 +461,9 @@ version = "0.12.3" [[deps.Parsers]] deps = ["Dates"] -git-tree-sha1 = "3d5bf43e3e8b412656404ed9466f1dcbf7c50269" +git-tree-sha1 = "6c01a9b494f6d2a9fc180a08b182fcb06f0958a0" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.4.0" +version = "2.4.2" [[deps.PartitionedArrays]] deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] @@ -508,9 +508,9 @@ uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[deps.RecipesBase]] deps = ["SnoopPrecompile"] -git-tree-sha1 = "612a4d76ad98e9722c8ba387614539155a59e30c" +git-tree-sha1 = "d12e612bba40d189cead6ff857ddb67bd2e6a387" uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" -version = "1.3.0" +version = "1.3.1" [[deps.Reexport]] git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" diff --git a/src/RedistributeTools.jl b/src/RedistributeTools.jl index a9f1b300..9382d0af 100644 --- a/src/RedistributeTools.jl +++ b/src/RedistributeTools.jl @@ -177,7 +177,7 @@ function redistribute_fe_function(uh_old::GridapDistributed.DistributedSingleFie end -function Gridap.FESpaces.gather_free_and_dirichlet_values(f::Gridap.Distributed.AbstractDistributedFESpace,cv) +function Gridap.FESpaces.gather_free_and_dirichlet_values(f::GridapDistributed.DistributedFESpace,cv) free_values, dirichlet_values = map_parts(local_views(f),cv) do f, cv Gridap.FESpaces.gather_free_and_dirichlet_values(f,cv) end From 3a77a545a0f828d1c6f5cb23adf06852ee5e9f46 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 20 Oct 2022 14:31:22 +1100 Subject: [PATCH 17/95] Bugfix --- src/ModelHierarchies.jl | 2 +- test/mpi/ModelHierarchiesTests.jl | 13 ++++++++++--- 2 files changed, 11 insertions(+), 4 deletions(-) diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index c3f6e486..359f68f3 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -61,7 +61,7 @@ function ModelHierarchy(parts,coarsest_model::GridapDistributed.AbstractDistribu meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing,nothing) for i = num_levels-1:-1:1 - modelH = get_level_model(meshes[i+1]) + modelH = get_model(meshes[i+1]) if (num_procs_x_level[i]!=num_procs_x_level[i+1]) # meshes[i+1].model is distributed among P processors # model_ref is distributed among Q processors, with P!=Q diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index 88f7f725..2c662cc8 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -7,6 +7,13 @@ using PartitionedArrays using GridapSolvers using GridapP4est +function model_hierarchy_free!(mh::ModelHierarchy) + for lev in 1:num_levels(mh) + model = get_model(mh,lev) + octree_distributed_discrete_model_free!(model) + end +end + function main(parts,num_parts_x_level,num_trees,num_refs_coarse) domain = (0,1,0,1) cmodel = CartesianDiscreteModel(domain,num_trees) @@ -17,9 +24,9 @@ function main(parts,num_parts_x_level,num_trees,num_refs_coarse) model_hierarchy_free!(mh) end -num_parts_x_level = [4,2,2,2,2,2] # Procs in each refinement level -num_trees = (1,1) # Number of initial P4est trees -num_refs_coarse = 2 # Number of initial refinements +num_parts_x_level = [4,4,2,2] # Procs in each refinement level +num_trees = (1,1) # Number of initial P4est trees +num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] prun(main,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) From fafd484020fbcf8531a8ca0213c757e442e40057 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 20 Oct 2022 16:00:58 +1100 Subject: [PATCH 18/95] Added tests for FESpaceHierarchies --- Manifest.toml | 4 ++-- src/FESpaceHierarchies.jl | 6 +++--- test/mpi/ModelHierarchiesTests.jl | 7 +++++++ 3 files changed, 12 insertions(+), 5 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 72c7fae5..6c8fbda8 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -212,7 +212,7 @@ version = "0.17.14" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "e948c30fb1e3e0e50f6155b132a4a6effa435517" +git-tree-sha1 = "56869b57300d601878184c6af6eea9cfee851ce6" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapDistributed.jl.git" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" @@ -220,7 +220,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "ca8f9fecad3f74a6ac830463cef7a969d092db17" +git-tree-sha1 = "1b895da0bef1a00fa2b4167bea591df44555b5cc" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" diff --git a/src/FESpaceHierarchies.jl b/src/FESpaceHierarchies.jl index 85db0a77..71527674 100644 --- a/src/FESpaceHierarchies.jl +++ b/src/FESpaceHierarchies.jl @@ -36,14 +36,14 @@ end function Gridap.FESpaces.TestFESpace( mh::ModelHierarchyLevel{A,B,Nothing},args...;kwargs...) where {A,B} - Vh = TestFESpace(get_model(mh.model),args...;kwargs...) + Vh = TestFESpace(mh.model,args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,nothing) end function Gridap.FESpaces.TestFESpace( mh::ModelHierarchyLevel{A,B,C},args...;kwargs...) where {A,B,C} - Vh = TestFESpace(get_model(mh.model),args...;kwargs...) - Vh_red = TestFESpace(get_model(mh.model_red),args...;kwargs...) + Vh = TestFESpace(mh.model,args...;kwargs...) + Vh_red = TestFESpace(mh.model_red,args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,Vh_red) end diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index 2c662cc8..ead0219e 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -2,6 +2,7 @@ module ModelHierarchiesTests using MPI using Gridap +using Gridap.FESpaces using GridapDistributed using PartitionedArrays using GridapSolvers @@ -21,6 +22,12 @@ function main(parts,num_parts_x_level,num_trees,num_refs_coarse) num_levels = length(num_parts_x_level) coarse_model = OctreeDistributedDiscreteModel(parts,cmodel,num_refs_coarse) mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,1) + tests = TestFESpace(mh,reffe,conformity=:H1) + trials = TrialFESpace(sol,tests) + model_hierarchy_free!(mh) end From abe468b8acc38ac0ba523bfb53c437362279759f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 20 Oct 2022 17:29:46 +1100 Subject: [PATCH 19/95] Added RedistributeToolsTests --- Manifest.toml | 4 +- src/GridapSolvers.jl | 2 + src/RedistributeTools.jl | 16 ++--- test/mpi/InterGridTransferOperatorsTests.jl | 0 test/mpi/RedistributeToolsTests.jl | 77 +++++++++++++++++++++ test/runtests.jl | 2 +- 6 files changed, 90 insertions(+), 11 deletions(-) create mode 100644 test/mpi/InterGridTransferOperatorsTests.jl create mode 100644 test/mpi/RedistributeToolsTests.jl diff --git a/Manifest.toml b/Manifest.toml index 6c8fbda8..50e10e3c 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -212,7 +212,7 @@ version = "0.17.14" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "56869b57300d601878184c6af6eea9cfee851ce6" +git-tree-sha1 = "2950eceffd1b4a35946e92489e3827aa077561ac" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapDistributed.jl.git" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" @@ -220,7 +220,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "1b895da0bef1a00fa2b4167bea591df44555b5cc" +git-tree-sha1 = "4a267b7c10531b6482b1488ab43616847d49d49c" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index c03fd8f5..ad3d9528 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -10,6 +10,8 @@ module GridapSolvers using GridapDistributed using GridapP4est + import GridapDistributed: local_views + export ModelHierarchy export num_levels, get_level, get_model, get_model_before_redist, has_refinement, has_redistribution diff --git a/src/RedistributeTools.jl b/src/RedistributeTools.jl index 9382d0af..bc14a226 100644 --- a/src/RedistributeTools.jl +++ b/src/RedistributeTools.jl @@ -7,10 +7,10 @@ function _allocate_cell_wise_dofs(cell_to_ldofs) ldofs = getindex!(cache,cell_to_ldofs,cell) ptrs[cell+1] = length(ldofs) end - PArrays.length_to_ptrs!(ptrs) + PartitionedArrays.length_to_ptrs!(ptrs) ndata = ptrs[end]-1 data = Vector{Float64}(undef,ndata) - PArrays.Table(data,ptrs) + PartitionedArrays.Table(data,ptrs) end end @@ -42,10 +42,10 @@ function allocate_comm_data(num_dofs_x_cell,lids) ptrs[i+1] = ptrs[i+1] + num_dofs_x_cell.data[j] end end - PArrays.length_to_ptrs!(ptrs) + PartitionedArrays.length_to_ptrs!(ptrs) ndata = ptrs[end]-1 data = Vector{Float64}(undef,ndata) - PArrays.Table(data,ptrs) + PartitionedArrays.Table(data,ptrs) end end @@ -95,12 +95,12 @@ end function num_dofs_x_cell(cell_dofs_array,lids) map_parts(cell_dofs_array,lids) do cell_dofs_array, lids data = [length(cell_dofs_array[i]) for i = 1:length(cell_dofs_array) ] - PArrays.Table(data,lids.ptrs) + PartitionedArrays.Table(data,lids.ptrs) end end -function redistribute_cell_dofs(cell_dof_values_old::GridapDistributed.DistributedCellDatum, +function redistribute_cell_dofs(cell_dof_values_old, Uh_new::GridapDistributed.DistributedSingleFieldFESpace, model_new, glue::GridapDistributed.RedistributeGlue; @@ -120,7 +120,7 @@ function redistribute_cell_dofs(cell_dof_values_old::GridapDistributed.Distribut snd_data, parts_rcv, parts_snd, - PArrays._empty_tasks(parts_rcv)) + PartitionedArrays._empty_tasks(parts_rcv)) map_parts(schedule,tout) cell_dof_values_new = _allocate_cell_wise_dofs(cell_dof_ids_new) @@ -170,7 +170,7 @@ function redistribute_fe_function(uh_old::GridapDistributed.DistributedSingleFie cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,Uh_new,model_new,glue;reverse=reverse) # Assemble the new FEFunction - free_values, dirichlet_values = Gridap.FESpaces.gather_free_and_dirichlet_values(Uh_new.spaces,cell_dof_values_new) + free_values, dirichlet_values = Gridap.FESpaces.gather_free_and_dirichlet_values(Uh_new,cell_dof_values_new) free_values = PVector(free_values,Uh_new.gids) uh_new = FEFunction(Uh_new,free_values,dirichlet_values) return uh_new diff --git a/test/mpi/InterGridTransferOperatorsTests.jl b/test/mpi/InterGridTransferOperatorsTests.jl new file mode 100644 index 00000000..e69de29b diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl new file mode 100644 index 00000000..fe97ff55 --- /dev/null +++ b/test/mpi/RedistributeToolsTests.jl @@ -0,0 +1,77 @@ +module RedistributeToolsTests + using MPI + using PartitionedArrays + using Gridap + using GridapDistributed + using GridapP4est + using GridapSolvers + using Test + + u(x) = x[1] + x[2] + + function model_hierarchy_free!(mh::ModelHierarchy) + for lev in 1:num_levels(mh) + model = get_model(mh,lev) + octree_distributed_discrete_model_free!(model) + end + end + + function run(parts,num_parts_x_level,num_trees,num_refs_coarse) + domain = (0,1,0,1) + cmodel = CartesianDiscreteModel(domain,num_trees) + coarse_model = OctreeDistributedDiscreteModel(parts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + # FE Spaces + order = 1 + reffe = ReferenceFE(lagrangian,Float64,order) + glue = mh.levels[1].red_glue + + model_old = get_model_before_redist(mh.levels[1]) + VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") + UOLD = TrialFESpace(u,VOLD) + + model_new = get_model(mh.levels[1]) + VNEW = TestFESpace(model_new,reffe,dirichlet_tags="boundary") + UNEW = TrialFESpace(u,VNEW) + + # Triangulations + qdegree = 2*order+1 + Ω_old = Triangulation(model_old) + dΩ_old = Measure(Ω_old,qdegree) + Ω_new = Triangulation(model_new) + dΩ_new = Measure(Ω_new,qdegree) + + # Old -> New + uhold = interpolate(u,UOLD) + uhnew = GridapSolvers.redistribute_fe_function(uhold, + UNEW, + model_new, + glue) + o = sum(∫(uhold)*dΩ_old) + n = sum(∫(uhnew)*dΩ_new) + @test o ≈ n + + # New -> Old + uhnew = interpolate(u,UNEW) + uhold = GridapSolvers.redistribute_fe_function(uhnew, + UOLD, + model_old, + glue; + reverse=true) + o = sum(∫(uhnew)*dΩ_new) + n = sum(∫(uhold)*dΩ_old) + @test o ≈ n + + model_hierarchy_free!(mh) + end + + + num_parts_x_level = [4,2,2] # Procs in each refinement level + num_trees = (1,1) # Number of initial P4est trees + num_refs_coarse = 2 # Number of initial refinements + + ranks = num_parts_x_level[1] + prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) + MPI.Finalize() +end diff --git a/test/runtests.jl b/test/runtests.jl index 287cc3e2..88e8f4c6 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -28,7 +28,7 @@ function run_tests(testdir) testfiles = sort(filter(istest, readdir(testdir))) @time @testset "$f" for f in testfiles MPI.mpiexec() do cmd - if f in [""] + if f in ["RedistributeToolsTests.jl"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] From 23a43bcd7fb4bcbdd21b6916ae195f4b550cf48f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 26 Oct 2022 14:13:43 +1100 Subject: [PATCH 20/95] Added RefinementTools, enhanced Transfer ops --- Manifest.toml | 14 +-- ...jl => DistributedGridTransferOperators.jl} | 24 ++--- src/FESpaceHierarchies.jl | 14 ++- src/GridapSolvers.jl | 7 +- src/ModelHierarchies.jl | 31 +++---- src/RefinementTools.jl | 93 +++++++++++++++++++ 6 files changed, 140 insertions(+), 43 deletions(-) rename src/{InterGridTransferOperators.jl => DistributedGridTransferOperators.jl} (67%) create mode 100644 src/RefinementTools.jl diff --git a/Manifest.toml b/Manifest.toml index 50e10e3c..4c096d10 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -32,9 +32,9 @@ version = "1.1.1" [[deps.ArrayInterfaceCore]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "e9f7992287edfc27b3cbe0046c544bace004ca5b" +git-tree-sha1 = "e6cba4aadba7e8a7574ab2ba2fcfb307b4c4b02a" uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.22" +version = "0.1.23" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] @@ -203,8 +203,8 @@ deps = ["Random"] uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] -deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "IterativeSolvers", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "a3b73b5209abe4b5ad08351ccb37082263cf7064" +deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] +git-tree-sha1 = "c3a0779596eb97e7a3e0b6add0bb37dd1a1cc889" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -220,7 +220,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "4a267b7c10531b6482b1488ab43616847d49d49c" +git-tree-sha1 = "212eedcd932c4906257a3f9ee7ca1544f6e6cd8d" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -494,9 +494,9 @@ uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" [[deps.QuadGK]] deps = ["DataStructures", "LinearAlgebra"] -git-tree-sha1 = "3c009334f45dfd546a16a57960a821a1a023d241" +git-tree-sha1 = "97aa253e65b784fd13e83774cadc95b38011d734" uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" -version = "2.5.0" +version = "2.6.0" [[deps.REPL]] deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] diff --git a/src/InterGridTransferOperators.jl b/src/DistributedGridTransferOperators.jl similarity index 67% rename from src/InterGridTransferOperators.jl rename to src/DistributedGridTransferOperators.jl index b39f29b2..fd1e632a 100644 --- a/src/InterGridTransferOperators.jl +++ b/src/DistributedGridTransferOperators.jl @@ -1,11 +1,11 @@ -struct InterGridTransferOperator{T,R,A,B,C} +struct DistributedGridTransferOperator{T,R,A,B,C} <: Gridap.Refinement.GridTransferOperator sh :: A ref_op :: B cache :: C - function InterGridTransferOperator(op_type::Symbol,redist::Bool,sh::FESpaceHierarchy,ref_op,cache) + function DistributedGridTransferOperator(op_type::Symbol,redist::Bool,sh::FESpaceHierarchy,ref_op,cache) T = typeof(Val(op_type)) R = typeof(Val(redist)) A = typeof(sh) @@ -17,10 +17,10 @@ end ### Constructors -RestrictionOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = InterGridTransferOperator(lev,sh,qdegree,:restriction) -ProlongationOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = InterGridTransferOperator(lev,sh,qdegree,:prolongation) +RestrictionOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = DistributedGridTransferOperator(lev,sh,qdegree,:restriction) +ProlongationOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = DistributedGridTransferOperator(lev,sh,qdegree,:prolongation) -function InterGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol) +function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol) mh = sh.mh @check lev != num_levels(mh) @check op_type ∈ [:restriction, :prolongation] @@ -31,7 +31,7 @@ function InterGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op # Refinement from, to = (op_type == :restriction) ? (Uh, UH) : (UH, Uh) - ref_op = RefinementTransferOperator(from,to;qdegree=qdegree) + ref_op = ProjectionTransferOperator(from,to;qdegree=qdegree) # Redistribution redist = has_redistribution(mh,lev) @@ -46,12 +46,12 @@ function InterGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op cache = nothing end - return InterGridTransferOperator(:op_type,redist,sh,ref_op,cache) + return DistributedGridTransferOperator(:op_type,redist,sh,ref_op,cache) end function setup_transfer_operators(sh::FESpaceHierarchy, qdegree::Int) - restrictions = Vector{InterGridTransferOperator}(undef,num_levels(sh)-1) - interpolations = Vector{InterGridTransferOperator}(undef,num_levels(sh)-1) + restrictions = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) + interpolations = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) for lev in 1:num_levels(sh)-1 restrictions[lev] = RestrictionOperator(lev,sh,qdegree) interpolations[lev] = InterpolationOperator(lev,sh,qdegree) @@ -62,7 +62,7 @@ end ### Applying the operators: ## A) Without redistribution (same for interpolation/restriction) -function LinearAlgebra.mul!(y::PVector,A::InterGridTransferOperator{T,Val{false}},x::PVector) where T +function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{T,Val{false}},x::PVector) where T map_parts(y,A.ref_op,x) do y, ref_op, x mul!(y,ref_op,x) @@ -72,7 +72,7 @@ function LinearAlgebra.mul!(y::PVector,A::InterGridTransferOperator{T,Val{false} end ## B) Prolongation (coarse to fine), with redistribution -function LinearAlgebra.mul!(y::PVector,A::InterGridTransferOperator{Val{:prolongation},Val{true}},x::PVector) +function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:prolongation},Val{true}},x::PVector) fv_h, Uh, fv_h_red, Uh_red, model_h, model_h_red, glue = A.cache # 1 - Solve c2f projection in coarse partition @@ -86,7 +86,7 @@ function LinearAlgebra.mul!(y::PVector,A::InterGridTransferOperator{Val{:prolong end ## C) Restriction (fine to coarse), with redistribution -function LinearAlgebra.mul!(y::PVector,A::InterGridTransferOperator{Val{:restriction},Val{true}},x::PVector) +function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{true}},x::PVector) fv_h, Uh, fv_h_red, Uh_red, model_h, model_h_red, glue = A.cache # 1 - Redistribute from coarse partition to fine partition diff --git a/src/FESpaceHierarchies.jl b/src/FESpaceHierarchies.jl index 71527674..5c3415c0 100644 --- a/src/FESpaceHierarchies.jl +++ b/src/FESpaceHierarchies.jl @@ -22,18 +22,26 @@ function num_levels(fh::FESpaceHierarchy) length(fh) end -function get_space(a::FESpaceHierarchyLevel{A,Nothing}) where {A} +function get_fe_space(a::FESpaceHierarchyLevel{A,Nothing}) where {A} a.fe_space end -function get_space(a::FESpaceHierarchyLevel{A,B}) where {A,B} +function get_fe_space(a::FESpaceHierarchyLevel{A,B}) where {A,B} a.fe_space_red end -function get_space_before_redist(a::FESpaceHierarchyLevel) +function get_fe_space(fh::FESpaceHierarchy,lev::Int) + get_space(fh[lev]) +end + +function get_fe_space_before_redist(a::FESpaceHierarchyLevel) a.fe_space end +function get_fe_space_before_redist(fh::FESpaceHierarchy,lev::Int) + get_space_before_redist(fh[lev]) +end + function Gridap.FESpaces.TestFESpace( mh::ModelHierarchyLevel{A,B,Nothing},args...;kwargs...) where {A,B} Vh = TestFESpace(mh.model,args...;kwargs...) diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index ad3d9528..f90af47d 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -4,8 +4,10 @@ module GridapSolvers using LinearAlgebra using Gridap using Gridap.Helpers + using Gridap.Algebra using Gridap.Geometry using Gridap.FESpaces + using Gridap.Refinement using PartitionedArrays using GridapDistributed using GridapP4est @@ -19,7 +21,7 @@ module GridapSolvers export FESpaceHierarchy export get_space, get_space_before_redist - export InterGridTransferOperator + export DistributedGridTransferOperator export RestrictionOperator, ProlongationOperator export setup_transfer_operators @@ -27,7 +29,8 @@ module GridapSolvers include("ModelHierarchies.jl") include("FESpaceHierarchies.jl") include("RedistributeTools.jl") - include("InterGridTransferOperators.jl") + include("RefinementTools.jl") + include("DistributedGridTransferOperators.jl") end diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index 359f68f3..e9d4f38b 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -5,19 +5,16 @@ In each level, `cmodel` is the coarse model which is first redistributed to obtain `cmodel_red` and then refined to obtain `fmodel_red`. - Two considerations: - 1. `model_red` and `red_glue` might be of type `Nothing` + Note that `model_red` and `red_glue` might be of type `Nothing` whenever there is no redistribution in a given level. - 2. `ref_glue` is of type `Nothing` for the coarsest model. """ -struct ModelHierarchyLevel{A,B,C,D} +struct ModelHierarchyLevel{A,B,C} level :: Int model :: A - ref_glue :: B - model_red :: C - red_glue :: D + model_red :: B + red_glue :: C end """ @@ -31,19 +28,15 @@ num_levels(a::ModelHierarchy) = length(a.levels) get_level(a::ModelHierarchy,level::Integer) = a.levels[level] get_model(a::ModelHierarchy,level::Integer) = get_model(get_level(a,level)) -get_model(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B} = a.model -get_model(a::ModelHierarchyLevel{A,B,C}) where {A,B,C} = a.model_red +get_model(a::ModelHierarchyLevel{A,Nothing}) where {A} = a.model +get_model(a::ModelHierarchyLevel{A,B}) where {A,B} = a.model_red get_model_before_redist(a::ModelHierarchy,level::Integer) = get_model_before_redist(get_level(a,level)) get_model_before_redist(a::ModelHierarchyLevel) = a.model -has_refinement(a::ModelHierarchy,level::Integer) = has_refinement(a.levels[level]) -has_refinement(a::ModelHierarchyLevel{A,B,C,D}) where {A,B,C,D} = true -has_refinement(a::ModelHierarchyLevel{A,Nothing,C,D}) where {A,C,D} = false - has_redistribution(a::ModelHierarchy,level::Integer) = has_redistribution(a.levels[level]) -has_redistribution(a::ModelHierarchyLevel{A,B,C,D}) where {A,B,C,D} = true -has_redistribution(a::ModelHierarchyLevel{A,B,C,Nothing}) where {A,B,C} = false +has_redistribution(a::ModelHierarchyLevel{A,B,C}) where {A,B,C} = true +has_redistribution(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B} = false """ ModelHierarchy(parts,model,num_procs_x_level;num_refs_x_level) @@ -58,20 +51,20 @@ function ModelHierarchy(parts,coarsest_model::GridapDistributed.AbstractDistribu level_parts = generate_level_parts(parts,num_procs_x_level) meshes = Vector{ModelHierarchyLevel}(undef,num_levels) - meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing,nothing) + meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing) for i = num_levels-1:-1:1 modelH = get_model(meshes[i+1]) if (num_procs_x_level[i]!=num_procs_x_level[i+1]) # meshes[i+1].model is distributed among P processors # model_ref is distributed among Q processors, with P!=Q - model_ref,ref_glue = refine(modelH,level_parts[i]) + model_ref = Gridap.Refinement.refine(modelH,level_parts[i]) model_red,red_glue = redistribute(model_ref) else - model_ref,ref_glue = refine(modelH) + model_ref = Gridap.Refinement.refine(modelH) model_red,red_glue = nothing,nothing end - meshes[i] = ModelHierarchyLevel(i,model_ref,ref_glue,model_red,red_glue) + meshes[i] = ModelHierarchyLevel(i,model_ref,model_red,red_glue) end return ModelHierarchy(level_parts,meshes) diff --git a/src/RefinementTools.jl b/src/RefinementTools.jl new file mode 100644 index 00000000..7fbaf706 --- /dev/null +++ b/src/RefinementTools.jl @@ -0,0 +1,93 @@ + +const DistributedRefinedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,AbstractPData{<:RefinedTriangulation{Dc,Dp}}} + +function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) + spaces = map_parts(local_views(U)) do U + U.space + end + gids = U.gids + vector_type = U.vector_type + return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,vector_type) +end + +function Gridap.Refinement.ProjectionTransferOperator(from::GridapDistributed.DistributedFESpace, + to::GridapDistributed.DistributedFESpace; + solver::LinearSolver=BackslashSolver(), + Π=Gridap.Refinement.Π_l2, + qdegree=3) + Ω_from = get_triangulation(from) + Ω_to = get_triangulation(to) + @assert isa(Ω_from,DistributedRefinedTriangulation) || isa(Ω_to,DistributedRefinedTriangulation) + @assert is_change_possible(Ω_from,Ω_to) + + # Choose integration space + Ω = best_target(Ω_from,Ω_to) + dΩ = Measure(Ω,qdegree) + U = (Ω === Ω_from) ? from : to + V = get_test_space(U) + vh_to = get_fe_basis(to) + vh = change_domain(vh_to,Ω) + + # Prepare system + V_to = get_test_space(to) + lhs_mat, lhs_vec = assemble_lhs(Π,Ω_to,to,V_to,qdegree) + rhs_vec = similar(lhs_vec) + assem = SparseMatrixAssembler(to,V_to) + + # Prepare solver + ss = symbolic_setup(solver,lhs_mat) + ns = numerical_setup(ss,lhs_mat) + + caches = ns, lhs_vec, rhs_vec, Π, assem, Ω, dΩ, U, V, vh + return Gridap.Refinement.ProjectionTransferOperator(eltype(sysmat),from,to,caches) +end + +# Solves the problem Π(uh,vh)_to = Π(uh_from,vh)_Ω for all vh in Vh_to +function LinearAlgebra.mul!(y::PVector,A::Gridap.Refinement.ProjectionTransferOperator,x::PVector) + ns, lhs_vec, rhs_vec, Π, assem, Ω, dΩ, U, V , vh_Ω = A.caches + Ω_to = get_triangulation(A.to) + + # Bring uh to the integration domain + uh_from = FEFunction(A.from,x) + uh_Ω = change_domain(uh_from,Ω,ReferenceDomain()) + + # Assemble rhs vector + contr = Π(uh_Ω,vh_Ω,dΩ) + if Ω !== Ω_to + contr = merge_contr_cells(contr,Ω,Ω_to) + end + vecdata = collect_cell_vector(A.to.space,contr) + assemble_vector!(rhs_vec,assem,vecdata) + rhs_vec .-= lhs_vec + + # Solve projection + solve!(y,ns,sysvec) + return y +end + +function Gridap.Refinement.merge_contr_cells(a::GridapDistributed.DistributedDomainContribution, + rtrian::GridapDistributed.DistributedTriangulation, + ctrian::GridapDistributed.DistributedTriangulation) + b = map_parts(Gridap.Refinement.merge_contr_cells,local_views(a),local_views(rtrian),local_views(ctrian)) + return GridapDistributed.DistributedDomainContribution(b) +end + +function Gridap.Refinement.change_domain_c2f(c_cell_field, + ftrian::GridapDistributed.DistributedTriangulation{Dc,Dp}, + glue::MPIData{<:Union{Nothing,Gridap.Refinement.RefinementGlue}}) where {Dc,Dp} + + i_am_in_coarse = (c_cell_field != nothing) + + fields = map_parts(local_views(ftrian)) do Ω + if (i_am_in_coarse) + c_cell_field.fields.part + else + Gridap.Helpers.@check num_cells(Ω) == 0 + Gridap.CellData.GenericCellField(Fill(Gridap.Fields.ConstantField(0.0),num_cells(Ω)),Ω,ReferenceDomain()) + end + end + c_cell_field_fine = GridapDistributed.DistributedCellField(fields) + + dfield = map_parts(Gridap.Refinement.change_domain_c2f,local_views(c_cell_field_fine),local_views(ftrian),glue) + return GridapDistributed.DistributedCellField(dfield) +end \ No newline at end of file From 5f93db39e41384f40e5eb2ad4d5bc6819ed23bb8 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 2 Nov 2022 10:54:53 +1100 Subject: [PATCH 21/95] Added DistributedRefinedDiscreteModels --- Manifest.toml | 8 ++--- src/DistributedRefinedDiscreteModels.jl | 43 ++++++++++++++++++++++ src/FESpaceHierarchies.jl | 10 +++--- src/GridapSolvers.jl | 2 ++ src/ModelHierarchies.jl | 48 +++++++++++++++---------- test/mpi/ModelHierarchiesTests.jl | 1 + test/mpi/RedistributeToolsTests.jl | 1 + 7 files changed, 86 insertions(+), 27 deletions(-) create mode 100644 src/DistributedRefinedDiscreteModels.jl diff --git a/Manifest.toml b/Manifest.toml index 4c096d10..67f673f5 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -32,9 +32,9 @@ version = "1.1.1" [[deps.ArrayInterfaceCore]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "e6cba4aadba7e8a7574ab2ba2fcfb307b4c4b02a" +git-tree-sha1 = "732cddf5c7a3d4e7d4829012042221a724a30674" uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.23" +version = "0.1.24" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] @@ -204,7 +204,7 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "c3a0779596eb97e7a3e0b6add0bb37dd1a1cc889" +git-tree-sha1 = "8975563b8197ceb50351cfaf9d4588ffedb86470" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -220,7 +220,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "212eedcd932c4906257a3f9ee7ca1544f6e6cd8d" +git-tree-sha1 = "c37fc6727b474ecd54ee23a55dc7e56d108077a5" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" diff --git a/src/DistributedRefinedDiscreteModels.jl b/src/DistributedRefinedDiscreteModels.jl new file mode 100644 index 00000000..5d2ab7e1 --- /dev/null +++ b/src/DistributedRefinedDiscreteModels.jl @@ -0,0 +1,43 @@ + + +struct DistributedRefinedDiscreteModel{Dc,Dp,A,B,C} <: GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp} + model :: A + parent :: B + glue :: C + + function DistributedRefinedDiscreteModel(model ::GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp}, + parent::GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp}, + glue) where {Dc,Dp} + A = typeof(model) + B = typeof(parent) + C = typeof(glue) + return new{Dc,Dp,A,B,C}(model,parent,glue) + end +end + + +function Base.getproperty(x::DistributedRefinedDiscreteModel, sym::Symbol) + if sym === :parts + return x.model.parts + else + getfield(x, sym) + end +end + +function Base.propertynames(x::DistributedRefinedDiscreteModel, private::Bool=false) + (fieldnames(typeof(x))...,:parts) +end + +Gridap.Geometry.num_cells(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_cells(model.model) +Gridap.Geometry.num_facets(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_facets(model.model) +Gridap.Geometry.num_edges(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_edges(model.model) +Gridap.Geometry.num_vertices(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_vertices(model.model) +Gridap.Geometry.num_faces(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_faces(model.model) +Gridap.Geometry.get_grid(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.get_grid(model.model) +Gridap.Geometry.get_grid_topology(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.get_grid_topology(model.model) +Gridap.Geometry.get_face_labeling(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.get_face_labeling(model.model) + +GridapDistributed.local_views(model::DistributedRefinedDiscreteModel) = GridapDistributed.local_views(model.model) +GridapDistributed.get_cell_gids(model::DistributedRefinedDiscreteModel) = GridapDistributed.get_cell_gids(model.model) +GridapDistributed.get_face_gids(model::DistributedRefinedDiscreteModel,dim::Integer) = GridapDistributed.get_face_gids(model.model,dim) +GridapDistributed.generate_gids(model::DistributedRefinedDiscreteModel,spaces) = GridapDistributed.generate_gids(model.model,spaces) diff --git a/src/FESpaceHierarchies.jl b/src/FESpaceHierarchies.jl index 5c3415c0..c686d156 100644 --- a/src/FESpaceHierarchies.jl +++ b/src/FESpaceHierarchies.jl @@ -43,15 +43,15 @@ function get_fe_space_before_redist(fh::FESpaceHierarchy,lev::Int) end function Gridap.FESpaces.TestFESpace( - mh::ModelHierarchyLevel{A,B,Nothing},args...;kwargs...) where {A,B} - Vh = TestFESpace(mh.model,args...;kwargs...) + mh::ModelHierarchyLevel{A,B,C,Nothing},args...;kwargs...) where {A,B,C} + Vh = TestFESpace(get_model(mh),args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,nothing) end function Gridap.FESpaces.TestFESpace( - mh::ModelHierarchyLevel{A,B,C},args...;kwargs...) where {A,B,C} - Vh = TestFESpace(mh.model,args...;kwargs...) - Vh_red = TestFESpace(mh.model_red,args...;kwargs...) + mh::ModelHierarchyLevel{A,B,C,D},args...;kwargs...) where {A,B,C,D} + Vh = TestFESpace(get_model_before_redist(mh),args...;kwargs...) + Vh_red = TestFESpace(get_model(mh),args...;kwargs...) FESpaceHierarchyLevel(mh.level,Vh,Vh_red) end diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index f90af47d..68379a53 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -14,6 +14,7 @@ module GridapSolvers import GridapDistributed: local_views + export DistributedRefinedDiscreteModel export ModelHierarchy export num_levels, get_level, get_model, get_model_before_redist, has_refinement, has_redistribution @@ -26,6 +27,7 @@ module GridapSolvers export setup_transfer_operators include("PartitionedArraysExtensions.jl") + include("DistributedRefinedDiscreteModels.jl") include("ModelHierarchies.jl") include("FESpaceHierarchies.jl") include("RedistributeTools.jl") diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index e9d4f38b..d04600a9 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -2,19 +2,17 @@ """ Single level for a ModelHierarchy. - In each level, `cmodel` is the coarse model which is - first redistributed to obtain `cmodel_red` and then - refined to obtain `fmodel_red`. - Note that `model_red` and `red_glue` might be of type `Nothing` whenever there is no redistribution in a given level. - + + `ref_glue` is of type `Nothing` on the coarsest level. """ -struct ModelHierarchyLevel{A,B,C} +struct ModelHierarchyLevel{A,B,C,D} level :: Int model :: A - model_red :: B - red_glue :: C + ref_glue :: B + model_red :: C + red_glue :: D end """ @@ -28,15 +26,15 @@ num_levels(a::ModelHierarchy) = length(a.levels) get_level(a::ModelHierarchy,level::Integer) = a.levels[level] get_model(a::ModelHierarchy,level::Integer) = get_model(get_level(a,level)) -get_model(a::ModelHierarchyLevel{A,Nothing}) where {A} = a.model -get_model(a::ModelHierarchyLevel{A,B}) where {A,B} = a.model_red +get_model(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B} = a.model +get_model(a::ModelHierarchyLevel{A,B,C}) where {A,B,C} = a.model_red get_model_before_redist(a::ModelHierarchy,level::Integer) = get_model_before_redist(get_level(a,level)) get_model_before_redist(a::ModelHierarchyLevel) = a.model has_redistribution(a::ModelHierarchy,level::Integer) = has_redistribution(a.levels[level]) -has_redistribution(a::ModelHierarchyLevel{A,B,C}) where {A,B,C} = true -has_redistribution(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B} = false +has_redistribution(a::ModelHierarchyLevel{A,B,C,D}) where {A,B,C,D} = true +has_redistribution(a::ModelHierarchyLevel{A,B,C,Nothing}) where {A,B,C} = false """ ModelHierarchy(parts,model,num_procs_x_level;num_refs_x_level) @@ -51,21 +49,35 @@ function ModelHierarchy(parts,coarsest_model::GridapDistributed.AbstractDistribu level_parts = generate_level_parts(parts,num_procs_x_level) meshes = Vector{ModelHierarchyLevel}(undef,num_levels) - meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing) + meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing,nothing) for i = num_levels-1:-1:1 modelH = get_model(meshes[i+1]) if (num_procs_x_level[i]!=num_procs_x_level[i+1]) # meshes[i+1].model is distributed among P processors # model_ref is distributed among Q processors, with P!=Q - model_ref = Gridap.Refinement.refine(modelH,level_parts[i]) + model_ref,ref_glue = Gridap.Refinement.refine(modelH,level_parts[i]) model_red,red_glue = redistribute(model_ref) else - model_ref = Gridap.Refinement.refine(modelH) + model_ref,ref_glue = Gridap.Refinement.refine(modelH) model_red,red_glue = nothing,nothing end - meshes[i] = ModelHierarchyLevel(i,model_ref,model_red,red_glue) + meshes[i] = ModelHierarchyLevel(i,model_ref,ref_glue,model_red,red_glue) end - return ModelHierarchy(level_parts,meshes) -end \ No newline at end of file + mh = ModelHierarchy(level_parts,meshes) + return convert_to_refined_models(mh) +end + +function convert_to_refined_models(mh::ModelHierarchy) + nlevs = num_levels(mh) + levels = Vector{ModelHierarchyLevel}(undef,nlevs) + for lev in 1:nlevs-1 + ref_glue = mh.levels[lev].ref_glue + model_ref = DistributedRefinedDiscreteModel(get_model_before_redist(mh,lev),get_model(mh,lev+1),ref_glue) + levels[lev] = ModelHierarchyLevel(lev,model_ref,ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) + end + levels[nlevs] = mh.levels[nlevs] + + return ModelHierarchy(mh.level_parts,levels) +end diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index ead0219e..e9fd4275 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -11,6 +11,7 @@ using GridapP4est function model_hierarchy_free!(mh::ModelHierarchy) for lev in 1:num_levels(mh) model = get_model(mh,lev) + isa(model,DistributedRefinedDiscreteModel) && (model = model.model) octree_distributed_discrete_model_free!(model) end end diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index fe97ff55..8b3ef516 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -12,6 +12,7 @@ module RedistributeToolsTests function model_hierarchy_free!(mh::ModelHierarchy) for lev in 1:num_levels(mh) model = get_model(mh,lev) + isa(model,DistributedRefinedDiscreteModel) && (model = model.model) octree_distributed_discrete_model_free!(model) end end From 3ada68fcac46b2e9f7c9e1b3d378b963d3c019ae Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 3 Nov 2022 19:59:13 +1100 Subject: [PATCH 22/95] Debugging --- src/DistributedGridTransferOperators.jl | 28 ++++++--- src/DistributedRefinedDiscreteModels.jl | 43 ------------- src/FESpaceHierarchies.jl | 16 ++--- src/GridapSolvers.jl | 10 +-- src/ModelHierarchies.jl | 8 ++- src/RefinementTools.jl | 69 +++++++++++++++++---- test/mpi/InterGridTransferOperatorsTests.jl | 58 +++++++++++++++++ 7 files changed, 154 insertions(+), 78 deletions(-) delete mode 100644 src/DistributedRefinedDiscreteModels.jl diff --git a/src/DistributedGridTransferOperators.jl b/src/DistributedGridTransferOperators.jl index fd1e632a..8ac146f6 100644 --- a/src/DistributedGridTransferOperators.jl +++ b/src/DistributedGridTransferOperators.jl @@ -11,7 +11,7 @@ struct DistributedGridTransferOperator{T,R,A,B,C} <: Gridap.Refinement.GridTrans A = typeof(sh) B = typeof(ref_op) C = typeof(cache) - new{T,R,A,B,C}(sh,cache) + new{T,R,A,B,C}(sh,ref_op,cache) end end @@ -22,31 +22,41 @@ ProlongationOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = DistributedGr function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol) mh = sh.mh - @check lev != num_levels(mh) + cparts = get_level_parts(mh,lev+1) + @check lev < num_levels(mh) @check op_type ∈ [:restriction, :prolongation] - - UH = get_space(sh,lev+1) - Uh = get_space_before_redist(sh,lev) - Uh_red = get_space(sh,lev) + Uh = get_fe_space_before_redist(sh,lev) + Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) + # Refinement - from, to = (op_type == :restriction) ? (Uh, UH) : (UH, Uh) - ref_op = ProjectionTransferOperator(from,to;qdegree=qdegree) + if GridapP4est.i_am_in(cparts) + UH = get_fe_space(sh,lev+1) + ΩH = get_triangulation(UH,get_model(mh,lev+1)) + + from, to = (op_type == :restriction) ? (Uh, UH) : (UH, Uh) + Ω_from, Ω_to = (op_type == :restriction) ? (Ωh, ΩH) : (ΩH, Ωh) + ref_op = ProjectionTransferOperator(from,Ω_from,to,Ω_to;qdegree=qdegree) + else + ref_op = nothing + end # Redistribution redist = has_redistribution(mh,lev) if redist + Uh_red = get_fe_space(sh,lev) model_h = get_model_before_redist(mh,lev) model_h_red = get_model(mh,lev) fv_h = PVector(0.0,Uh.gids) fv_h_red = PVector(0.0,Uh_red.gids) + glue = mh.levels[lev].red_glue cache = fv_h, Uh, fv_h_red, Uh_red, model_h, model_h_red, glue else cache = nothing end - return DistributedGridTransferOperator(:op_type,redist,sh,ref_op,cache) + return DistributedGridTransferOperator(op_type,redist,sh,ref_op,cache) end function setup_transfer_operators(sh::FESpaceHierarchy, qdegree::Int) diff --git a/src/DistributedRefinedDiscreteModels.jl b/src/DistributedRefinedDiscreteModels.jl deleted file mode 100644 index 5d2ab7e1..00000000 --- a/src/DistributedRefinedDiscreteModels.jl +++ /dev/null @@ -1,43 +0,0 @@ - - -struct DistributedRefinedDiscreteModel{Dc,Dp,A,B,C} <: GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp} - model :: A - parent :: B - glue :: C - - function DistributedRefinedDiscreteModel(model ::GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp}, - parent::GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp}, - glue) where {Dc,Dp} - A = typeof(model) - B = typeof(parent) - C = typeof(glue) - return new{Dc,Dp,A,B,C}(model,parent,glue) - end -end - - -function Base.getproperty(x::DistributedRefinedDiscreteModel, sym::Symbol) - if sym === :parts - return x.model.parts - else - getfield(x, sym) - end -end - -function Base.propertynames(x::DistributedRefinedDiscreteModel, private::Bool=false) - (fieldnames(typeof(x))...,:parts) -end - -Gridap.Geometry.num_cells(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_cells(model.model) -Gridap.Geometry.num_facets(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_facets(model.model) -Gridap.Geometry.num_edges(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_edges(model.model) -Gridap.Geometry.num_vertices(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_vertices(model.model) -Gridap.Geometry.num_faces(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.num_faces(model.model) -Gridap.Geometry.get_grid(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.get_grid(model.model) -Gridap.Geometry.get_grid_topology(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.get_grid_topology(model.model) -Gridap.Geometry.get_face_labeling(model::DistributedRefinedDiscreteModel) = Gridap.Geometry.get_face_labeling(model.model) - -GridapDistributed.local_views(model::DistributedRefinedDiscreteModel) = GridapDistributed.local_views(model.model) -GridapDistributed.get_cell_gids(model::DistributedRefinedDiscreteModel) = GridapDistributed.get_cell_gids(model.model) -GridapDistributed.get_face_gids(model::DistributedRefinedDiscreteModel,dim::Integer) = GridapDistributed.get_face_gids(model.model,dim) -GridapDistributed.generate_gids(model::DistributedRefinedDiscreteModel,spaces) = GridapDistributed.generate_gids(model.model,spaces) diff --git a/src/FESpaceHierarchies.jl b/src/FESpaceHierarchies.jl index c686d156..25fd14ee 100644 --- a/src/FESpaceHierarchies.jl +++ b/src/FESpaceHierarchies.jl @@ -31,7 +31,7 @@ function get_fe_space(a::FESpaceHierarchyLevel{A,B}) where {A,B} end function get_fe_space(fh::FESpaceHierarchy,lev::Int) - get_space(fh[lev]) + get_fe_space(fh[lev]) end function get_fe_space_before_redist(a::FESpaceHierarchyLevel) @@ -39,7 +39,7 @@ function get_fe_space_before_redist(a::FESpaceHierarchyLevel) end function get_fe_space_before_redist(fh::FESpaceHierarchy,lev::Int) - get_space_before_redist(fh[lev]) + get_fe_space_before_redist(fh[lev]) end function Gridap.FESpaces.TestFESpace( @@ -69,8 +69,8 @@ end function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) where {A,B} test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh)) for i=1:num_levels(mh) - model = get_model(mh,i) - if (GridapP4est.i_am_in(model.parts)) + parts = get_level_parts(mh,i) + if (GridapP4est.i_am_in(parts)) Vh = TestFESpace(get_level(mh,i),args...;kwargs...) test_spaces[i] = Vh end @@ -81,8 +81,8 @@ end function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchy) trial_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(a.mh)) for i=1:num_levels(a.mh) - model = get_model(a.mh,i) - if (GridapP4est.i_am_in(model.parts)) + parts = get_level_parts(a.mh,i) + if (GridapP4est.i_am_in(parts)) Uh = TrialFESpace(u,a[i]) trial_spaces[i] = Uh end @@ -93,8 +93,8 @@ end function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) trial_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(a.mh)) for i=1:num_levels(a.mh) - model = get_model(a.mh,i) - if (GridapP4est.i_am_in(model.parts)) + parts = get_level_parts(a.mh,i) + if (GridapP4est.i_am_in(parts)) Uh = TrialFESpace(a[i]) trial_spaces[i] = Uh end diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 68379a53..7eba4269 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -17,21 +17,21 @@ module GridapSolvers export DistributedRefinedDiscreteModel export ModelHierarchy - export num_levels, get_level, get_model, get_model_before_redist, has_refinement, has_redistribution + export num_levels, get_level, get_level_parts + export get_model, get_model_before_redist, has_refinement, has_redistribution export FESpaceHierarchy - export get_space, get_space_before_redist + export get_fe_space, get_fe_space_before_redist export DistributedGridTransferOperator export RestrictionOperator, ProlongationOperator export setup_transfer_operators include("PartitionedArraysExtensions.jl") - include("DistributedRefinedDiscreteModels.jl") + include("RefinementTools.jl") + include("RedistributeTools.jl") include("ModelHierarchies.jl") include("FESpaceHierarchies.jl") - include("RedistributeTools.jl") - include("RefinementTools.jl") include("DistributedGridTransferOperators.jl") diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index d04600a9..bffe54d2 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -25,6 +25,8 @@ end num_levels(a::ModelHierarchy) = length(a.levels) get_level(a::ModelHierarchy,level::Integer) = a.levels[level] +get_level_parts(a::ModelHierarchy,level::Integer) = a.level_parts[level] + get_model(a::ModelHierarchy,level::Integer) = get_model(get_level(a,level)) get_model(a::ModelHierarchyLevel{A,B,Nothing}) where {A,B} = a.model get_model(a::ModelHierarchyLevel{A,B,C}) where {A,B,C} = a.model_red @@ -53,7 +55,7 @@ function ModelHierarchy(parts,coarsest_model::GridapDistributed.AbstractDistribu for i = num_levels-1:-1:1 modelH = get_model(meshes[i+1]) - if (num_procs_x_level[i]!=num_procs_x_level[i+1]) + if (num_procs_x_level[i] != num_procs_x_level[i+1]) # meshes[i+1].model is distributed among P processors # model_ref is distributed among Q processors, with P!=Q model_ref,ref_glue = Gridap.Refinement.refine(modelH,level_parts[i]) @@ -73,8 +75,10 @@ function convert_to_refined_models(mh::ModelHierarchy) nlevs = num_levels(mh) levels = Vector{ModelHierarchyLevel}(undef,nlevs) for lev in 1:nlevs-1 + model = get_model_before_redist(mh,lev) + parent = get_model(mh,lev+1) ref_glue = mh.levels[lev].ref_glue - model_ref = DistributedRefinedDiscreteModel(get_model_before_redist(mh,lev),get_model(mh,lev+1),ref_glue) + model_ref = DistributedRefinedDiscreteModel(model,parent,ref_glue) levels[lev] = ModelHierarchyLevel(lev,model_ref,ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) end levels[nlevs] = mh.levels[nlevs] diff --git a/src/RefinementTools.jl b/src/RefinementTools.jl index 7fbaf706..a6757631 100644 --- a/src/RefinementTools.jl +++ b/src/RefinementTools.jl @@ -1,5 +1,42 @@ -const DistributedRefinedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,AbstractPData{<:RefinedTriangulation{Dc,Dp}}} +# DistributedRefinedDiscreteModels + +const DistributedRefinedDiscreteModel{Dc,Dp} = GridapDistributed.DistributedDiscreteModel{Dc,Dp,<:AbstractPData{<:RefinedDiscreteModel{Dc,Dp}}} + +function DistributedRefinedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel, + parent_models::AbstractPData{<:DiscreteModel}, + glue::AbstractPData{<:RefinementGlue}) + + models = map_parts(local_views(model),parent_models,glue) do model, parent, glue + RefinedDiscreteModel(model,parent,glue) + end + return GridapDistributed.DistributedDiscreteModel(models,get_cell_gids(model)) +end + +function DistributedRefinedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel, + parent::GridapDistributed.AbstractDistributedDiscreteModel, + glue::AbstractPData{<:RefinementGlue}) + if !(model.parts === parent.parts) + parent_models = map_parts(local_views(model)) do m + parent_models = local_views(parent) + if i_am_in(model.parts) + parent_models.part + else + nothing + end + end + return DistributedRefinedDiscreteModel(model,parent_models,glue) + else + return DistributedRefinedDiscreteModel(model,local_views(parent),glue) + end +end + +# DistributedRefinedTriangulations + +const DistributedRefinedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:RefinedTriangulation{Dc,Dp}}} + + +# DistributedFESpaces function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) spaces = map_parts(local_views(U)) do U @@ -10,15 +47,22 @@ function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,vector_type) end +function FESpaces.get_triangulation(f::GridapDistributed.DistributedSingleFieldFESpace,model::GridapDistributed.AbstractDistributedDiscreteModel) + trians = map_parts(get_triangulation,local_views(f)) + GridapDistributed.DistributedTriangulation(trians,model) +end + + +# Refinement Operators + function Gridap.Refinement.ProjectionTransferOperator(from::GridapDistributed.DistributedFESpace, - to::GridapDistributed.DistributedFESpace; + Ω_from::GridapDistributed.DistributedTriangulation, + to::GridapDistributed.DistributedFESpace, + Ω_to::GridapDistributed.DistributedTriangulation; solver::LinearSolver=BackslashSolver(), Π=Gridap.Refinement.Π_l2, qdegree=3) - Ω_from = get_triangulation(from) - Ω_to = get_triangulation(to) - @assert isa(Ω_from,DistributedRefinedTriangulation) || isa(Ω_to,DistributedRefinedTriangulation) - @assert is_change_possible(Ω_from,Ω_to) + #@assert is_change_possible(Ω_from,Ω_to) # Choose integration space Ω = best_target(Ω_from,Ω_to) @@ -26,7 +70,8 @@ function Gridap.Refinement.ProjectionTransferOperator(from::GridapDistributed.Di U = (Ω === Ω_from) ? from : to V = get_test_space(U) vh_to = get_fe_basis(to) - vh = change_domain(vh_to,Ω) + #vh = change_domain(vh_to,Ω) + vh = (Ω === Ω_from) ? change_domain_c2f(vh_to,Ω,Ω.model.glue) : vh_to # Prepare system V_to = get_test_space(to) @@ -38,14 +83,13 @@ function Gridap.Refinement.ProjectionTransferOperator(from::GridapDistributed.Di ss = symbolic_setup(solver,lhs_mat) ns = numerical_setup(ss,lhs_mat) - caches = ns, lhs_vec, rhs_vec, Π, assem, Ω, dΩ, U, V, vh + caches = ns, lhs_vec, rhs_vec, Π, assem, Ω, dΩ, U, V, vh, Ω_to return Gridap.Refinement.ProjectionTransferOperator(eltype(sysmat),from,to,caches) end # Solves the problem Π(uh,vh)_to = Π(uh_from,vh)_Ω for all vh in Vh_to function LinearAlgebra.mul!(y::PVector,A::Gridap.Refinement.ProjectionTransferOperator,x::PVector) - ns, lhs_vec, rhs_vec, Π, assem, Ω, dΩ, U, V , vh_Ω = A.caches - Ω_to = get_triangulation(A.to) + ns, lhs_vec, rhs_vec, Π, assem, Ω, dΩ, U, V, vh_Ω, Ω_to = A.caches # Bring uh to the integration domain uh_from = FEFunction(A.from,x) @@ -65,6 +109,9 @@ function LinearAlgebra.mul!(y::PVector,A::Gridap.Refinement.ProjectionTransferOp return y end + +# ChangeDomain + function Gridap.Refinement.merge_contr_cells(a::GridapDistributed.DistributedDomainContribution, rtrian::GridapDistributed.DistributedTriangulation, ctrian::GridapDistributed.DistributedTriangulation) @@ -90,4 +137,4 @@ function Gridap.Refinement.change_domain_c2f(c_cell_field, dfield = map_parts(Gridap.Refinement.change_domain_c2f,local_views(c_cell_field_fine),local_views(ftrian),glue) return GridapDistributed.DistributedCellField(dfield) -end \ No newline at end of file +end diff --git a/test/mpi/InterGridTransferOperatorsTests.jl b/test/mpi/InterGridTransferOperatorsTests.jl index e69de29b..b266599f 100644 --- a/test/mpi/InterGridTransferOperatorsTests.jl +++ b/test/mpi/InterGridTransferOperatorsTests.jl @@ -0,0 +1,58 @@ +module InterGridTransferOperatorsTests + using MPI + using PartitionedArrays + using Gridap + using GridapDistributed + using GridapP4est + using GridapSolvers + using Test + + u(x) = x[1] + x[2] + + function model_hierarchy_free!(mh::ModelHierarchy) + for lev in 1:num_levels(mh) + model = get_model(mh,lev) + isa(model,DistributedRefinedDiscreteModel) && (model = model.model) + octree_distributed_discrete_model_free!(model) + end + end + + function run(parts,num_parts_x_level,num_trees,num_refs_coarse) + num_levels = length(num_parts_x_level) + domain = (0,1,0,1) + cmodel = CartesianDiscreteModel(domain,num_trees) + coarse_model = OctreeDistributedDiscreteModel(parts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + # FE Spaces + order = 1 + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") + trials = TrialFESpace(u,tests) + + # Transfer ops + qdegree = 2 + R = RestrictionOperator(1,trials,qdegree) + P = ProlongationOperator(1,trials,qdegree) + @test isa(R,DistributedGridTransferOperator{Val{:restriction},Val{true}}) + @test isa(P,DistributedGridTransferOperator{Val{:prolongation},Val{true}}) + + R = RestrictionOperator(2,trials,qdegree) + P = ProlongationOperator(2,trials,qdegree) + @test isa(R,DistributedGridTransferOperator{Val{:restriction},Val{false}}) + @test isa(P,DistributedGridTransferOperator{Val{:prolongation},Val{false}}) + + #ops = setup_transfer_operators(trials,qdegree) + + model_hierarchy_free!(mh) + end + + + num_parts_x_level = [4,2,2] # Procs in each refinement level + num_trees = (1,1) # Number of initial P4est trees + num_refs_coarse = 2 # Number of initial refinements + + ranks = num_parts_x_level[1] + prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) + MPI.Finalize() +end From 57231acdbb3a35fefa9169e25dcd07f3cde5f948 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 7 Nov 2022 19:18:13 +1100 Subject: [PATCH 23/95] more fixes --- Manifest.toml | 20 ++++++------- src/DistributedGridTransferOperators.jl | 10 +++++-- src/FESpaceHierarchies.jl | 6 ++-- src/ModelHierarchies.jl | 13 +++++---- src/RefinementTools.jl | 5 ++-- test/mpi/InterGridTransferOperatorsTests.jl | 31 ++++++++++++--------- test/mpi/ModelHierarchiesTests.jl | 5 ++-- 7 files changed, 51 insertions(+), 39 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 67f673f5..25beaa59 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -32,9 +32,9 @@ version = "1.1.1" [[deps.ArrayInterfaceCore]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "732cddf5c7a3d4e7d4829012042221a724a30674" +git-tree-sha1 = "c46fb7dd1d8ca1d213ba25848a5ec4e47a1a1b08" uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.24" +version = "0.1.26" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] @@ -188,9 +188,9 @@ version = "0.12.8" [[deps.FiniteDiff]] deps = ["ArrayInterfaceCore", "LinearAlgebra", "Requires", "Setfield", "SparseArrays", "StaticArrays"] -git-tree-sha1 = "5a2cff9b6b77b33b89f3d97a4d367747adce647e" +git-tree-sha1 = "bb61d9e5085784fe453f70c97b23964c5bf36942" uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" -version = "2.15.0" +version = "2.16.0" [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] @@ -220,7 +220,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "c37fc6727b474ecd54ee23a55dc7e56d108077a5" +git-tree-sha1 = "c06179cbe35bf1bf3e721dbae3c8bb700229f035" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -460,10 +460,10 @@ uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a" version = "0.12.3" [[deps.Parsers]] -deps = ["Dates"] -git-tree-sha1 = "6c01a9b494f6d2a9fc180a08b182fcb06f0958a0" +deps = ["Dates", "SnoopPrecompile"] +git-tree-sha1 = "cceb0257b662528ecdf0b4b4302eb00e767b38e7" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.4.2" +version = "2.5.0" [[deps.PartitionedArrays]] deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] @@ -478,9 +478,9 @@ version = "1.8.0" [[deps.PolynomialBases]] deps = ["ArgCheck", "FFTW", "FastGaussQuadrature", "LinearAlgebra", "Requires", "SpecialFunctions", "UnPack"] -git-tree-sha1 = "cb0ab14725f574a45b873b03934c2e57b934f7c0" +git-tree-sha1 = "0990e89674ff6cd5cbc7bc40c959ed77168d4aa8" uuid = "c74db56a-226d-5e98-8bb0-a6049094aeea" -version = "0.4.13" +version = "0.4.14" [[deps.Preferences]] deps = ["TOML"] diff --git a/src/DistributedGridTransferOperators.jl b/src/DistributedGridTransferOperators.jl index 8ac146f6..e79831db 100644 --- a/src/DistributedGridTransferOperators.jl +++ b/src/DistributedGridTransferOperators.jl @@ -22,7 +22,6 @@ ProlongationOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = DistributedGr function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol) mh = sh.mh - cparts = get_level_parts(mh,lev+1) @check lev < num_levels(mh) @check op_type ∈ [:restriction, :prolongation] @@ -30,6 +29,7 @@ function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree:: Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) # Refinement + cparts = get_level_parts(mh,lev+1) if GridapP4est.i_am_in(cparts) UH = get_fe_space(sh,lev+1) ΩH = get_triangulation(UH,get_model(mh,lev+1)) @@ -60,11 +60,15 @@ function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree:: end function setup_transfer_operators(sh::FESpaceHierarchy, qdegree::Int) + mh = sh.mh restrictions = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) interpolations = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) for lev in 1:num_levels(sh)-1 - restrictions[lev] = RestrictionOperator(lev,sh,qdegree) - interpolations[lev] = InterpolationOperator(lev,sh,qdegree) + parts = get_level_parts(mh,lev) + if GridapP4est.i_am_in(parts) + restrictions[lev] = RestrictionOperator(lev,sh,qdegree) + interpolations[lev] = ProlongationOperator(lev,sh,qdegree) + end end return restrictions, interpolations end diff --git a/src/FESpaceHierarchies.jl b/src/FESpaceHierarchies.jl index 25fd14ee..a1705d49 100644 --- a/src/FESpaceHierarchies.jl +++ b/src/FESpaceHierarchies.jl @@ -72,7 +72,7 @@ function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) where parts = get_level_parts(mh,i) if (GridapP4est.i_am_in(parts)) Vh = TestFESpace(get_level(mh,i),args...;kwargs...) - test_spaces[i] = Vh + test_spaces[i] = Vh end end FESpaceHierarchy(mh,test_spaces) @@ -84,7 +84,7 @@ function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchy) parts = get_level_parts(a.mh,i) if (GridapP4est.i_am_in(parts)) Uh = TrialFESpace(u,a[i]) - trial_spaces[i] = Uh + trial_spaces[i] = Uh end end FESpaceHierarchy(a.mh,trial_spaces) @@ -96,7 +96,7 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) parts = get_level_parts(a.mh,i) if (GridapP4est.i_am_in(parts)) Uh = TrialFESpace(a[i]) - trial_spaces[i] = Uh + trial_spaces[i] = Uh end end FESpaceHierarchy(a.mh,trial_spaces) diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index bffe54d2..240b4fb0 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -75,11 +75,14 @@ function convert_to_refined_models(mh::ModelHierarchy) nlevs = num_levels(mh) levels = Vector{ModelHierarchyLevel}(undef,nlevs) for lev in 1:nlevs-1 - model = get_model_before_redist(mh,lev) - parent = get_model(mh,lev+1) - ref_glue = mh.levels[lev].ref_glue - model_ref = DistributedRefinedDiscreteModel(model,parent,ref_glue) - levels[lev] = ModelHierarchyLevel(lev,model_ref,ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) + parts = get_level_parts(mh,lev) + if (GridapP4est.i_am_in(parts)) + model = get_model_before_redist(mh,lev) + parent = get_model(mh,lev+1) + ref_glue = mh.levels[lev].ref_glue + model_ref = DistributedRefinedDiscreteModel(model,parent,ref_glue) + levels[lev] = ModelHierarchyLevel(lev,model_ref,ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) + end end levels[nlevs] = mh.levels[nlevs] diff --git a/src/RefinementTools.jl b/src/RefinementTools.jl index a6757631..c4178b26 100644 --- a/src/RefinementTools.jl +++ b/src/RefinementTools.jl @@ -18,9 +18,8 @@ function DistributedRefinedDiscreteModel(model::GridapDistributed.AbstractDistri glue::AbstractPData{<:RefinementGlue}) if !(model.parts === parent.parts) parent_models = map_parts(local_views(model)) do m - parent_models = local_views(parent) - if i_am_in(model.parts) - parent_models.part + if i_am_in(parent.parts) + parent.dmodel.models.part else nothing end diff --git a/test/mpi/InterGridTransferOperatorsTests.jl b/test/mpi/InterGridTransferOperatorsTests.jl index b266599f..cbe2b1ee 100644 --- a/test/mpi/InterGridTransferOperatorsTests.jl +++ b/test/mpi/InterGridTransferOperatorsTests.jl @@ -21,33 +21,38 @@ module InterGridTransferOperatorsTests num_levels = length(num_parts_x_level) domain = (0,1,0,1) cmodel = CartesianDiscreteModel(domain,num_trees) - coarse_model = OctreeDistributedDiscreteModel(parts,cmodel,num_refs_coarse) + level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) + + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) # FE Spaces + println(" > Testing FESpaces") order = 1 reffe = ReferenceFE(lagrangian,Float64,order) tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") trials = TrialFESpace(u,tests) # Transfer ops + println(" > Testing operators") qdegree = 2 - R = RestrictionOperator(1,trials,qdegree) - P = ProlongationOperator(1,trials,qdegree) - @test isa(R,DistributedGridTransferOperator{Val{:restriction},Val{true}}) - @test isa(P,DistributedGridTransferOperator{Val{:prolongation},Val{true}}) - - R = RestrictionOperator(2,trials,qdegree) - P = ProlongationOperator(2,trials,qdegree) - @test isa(R,DistributedGridTransferOperator{Val{:restriction},Val{false}}) - @test isa(P,DistributedGridTransferOperator{Val{:prolongation},Val{false}}) + for lev in 1:num_levels-1 + println(" > Level num ", lev) + parts = get_level_parts(mh,lev) + if GridapP4est.i_am_in(parts) + R = RestrictionOperator(lev,trials,qdegree) + P = ProlongationOperator(lev,trials,qdegree) + @test isa(R,DistributedGridTransferOperator{Val{:restriction},Val{true}}) + @test isa(P,DistributedGridTransferOperator{Val{:prolongation},Val{true}}) + end + end - #ops = setup_transfer_operators(trials,qdegree) + println(" > Testing setup_transfer_operators") + ops = setup_transfer_operators(trials,qdegree) - model_hierarchy_free!(mh) + #model_hierarchy_free!(mh) end - num_parts_x_level = [4,2,2] # Procs in each refinement level num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index e9fd4275..ea97908e 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -20,8 +20,9 @@ function main(parts,num_parts_x_level,num_trees,num_refs_coarse) domain = (0,1,0,1) cmodel = CartesianDiscreteModel(domain,num_trees) - num_levels = length(num_parts_x_level) - coarse_model = OctreeDistributedDiscreteModel(parts,cmodel,num_refs_coarse) + num_levels = length(num_parts_x_level) + level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) sol(x) = x[1] + x[2] From c0cb84db228b5b9ad612453c49ce9350da5405b5 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 10 Nov 2022 17:35:16 +1100 Subject: [PATCH 24/95] Added void management --- Manifest.toml | 16 +-- src/GridapDistributedExtensions.jl | 87 +++++++++++++++ src/GridapSolvers.jl | 11 +- src/ModelHierarchies.jl | 18 ++-- src/RefinementTools.jl | 111 +++++--------------- test/mpi/InterGridTransferOperatorsTests.jl | 4 + test/mpi/ModelHierarchiesTests.jl | 4 +- test/mpi/RedistributeToolsTests.jl | 2 +- 8 files changed, 141 insertions(+), 112 deletions(-) create mode 100644 src/GridapDistributedExtensions.jl diff --git a/Manifest.toml b/Manifest.toml index 25beaa59..3c93b991 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -46,9 +46,9 @@ version = "0.7.10" uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" [[deps.BSON]] -git-tree-sha1 = "306bb5574b0c1c56d7e1207581516c557d105cad" +git-tree-sha1 = "86e9781ac28f4e80e9b98f7f96eae21891332ac2" uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" -version = "0.3.5" +version = "0.3.6" [[deps.Base64]] uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" @@ -128,9 +128,9 @@ version = "1.1.0" [[deps.DiffRules]] deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "8b7a4d23e22f5d44883671da70865ca98f2ebf9d" +git-tree-sha1 = "9a95659c283c9018ea99e017aa9e13b7e89fadd2" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.12.0" +version = "1.12.1" [[deps.Distances]] deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"] @@ -204,7 +204,7 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "8975563b8197ceb50351cfaf9d4588ffedb86470" +git-tree-sha1 = "0633606f20ab18df9832a7bfed901cb0550ad212" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -220,7 +220,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "c06179cbe35bf1bf3e721dbae3c8bb700229f035" +git-tree-sha1 = "86270740a1a74d012c076d405c921258f221f290" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -255,9 +255,9 @@ version = "0.9.2" [[deps.JLD2]] deps = ["FileIO", "MacroTools", "Mmap", "OrderedCollections", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"] -git-tree-sha1 = "1c3ff7416cb727ebf4bab0491a56a296d7b8cf1d" +git-tree-sha1 = "acb4be8227e1f3ff890d564d55dcde9cf92e4337" uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -version = "0.4.25" +version = "0.4.26" [[deps.JLLWrappers]] deps = ["Preferences"] diff --git a/src/GridapDistributedExtensions.jl b/src/GridapDistributedExtensions.jl new file mode 100644 index 00000000..fb01801a --- /dev/null +++ b/src/GridapDistributedExtensions.jl @@ -0,0 +1,87 @@ + +function change_parts(x::Union{AbstractPData,Nothing}, new_parts; default=nothing) + x_new = map_parts(new_parts) do _p + if isa(x,AbstractPData) + x.part + else + default + end + end + return x_new +end + +# get_parts + +function get_parts(x::GridapDistributed.DistributedDiscreteModel) + return PartitionedArrays.get_part_ids(x.models) +end + +function get_parts(x::GridapDistributed.DistributedTriangulation) + return PartitionedArrays.get_part_ids(x.trians) +end + +function get_parts(x::GridapP4est.OctreeDistributedDiscreteModel) + return x.parts +end + + +# Void GridapDistributed structures + +struct VoidDistributedDiscreteModel{Dc,Dp,A} <: GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp} + parts::A + function VoidDistributedDiscreteModel(Dc::Int,Dp::Int,parts) + A = typeof(parts) + return new{Dc,Dp,A}(parts) + end +end + +function VoidDistributedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} + return VoidDistributedDiscreteModel(Dc,Dp,get_parts(model)) +end + +function get_parts(x::VoidDistributedDiscreteModel) + return x.parts +end + +struct VoidDistributedTriangulation{Dc,Dp,A} <: GridapType + parts::A + function VoidDistributedTriangulation(Dc::Int,Dp::Int,parts) + A = typeof(parts) + return new{Dc,Dp,A}(parts) + end +end + +function get_parts(x::VoidDistributedTriangulation) + return x.parts +end + +function VoidDistributedTriangulation(trian::GridapDistributed.DistributedTriangulation{Dc,Dp}) where {Dc,Dp} + return VoidDistributedTriangulation(Dc,Dp,get_parts(trian)) +end + +function Triangulation(model::VoidDistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} + return VoidDistributedTriangulation(Dc,Dp,get_parts(model)) +end + + +# Void Gridap structures + +function void(::Type{<:UnstructuredDiscreteModel{Dc,Dp}}) where {Dc,Dp} + # This should work but does not..... + """ + node_coordinates = Vector{Point{Dp,Dp}}(undef,0) + cell_node_ids = Table(Vector{Int32}(undef,0),Vector{Int32}(undef,0)) + reffes = Vector{LagrangianRefFE{Dc}}(undef,0) + cell_types = Vector{Int8}(undef,0) + grid = UnstructuredGrid(node_coordinates,cell_node_ids,reffes,cell_types) + """ + grid = UnstructuredGrid(Gridap.ReferenceFEs.LagrangianRefFE(Float64,QUAD,1)) + return UnstructuredDiscreteModel(grid) +end + +function void(::Type{<:AdaptivityGlue}) + f2c_faces_map = [Int32[1]] + fcell_to_child_id = Int32[1] + f2c_reference_cell_map = Int32[1] + return AdaptivityGlue(f2c_faces_map,fcell_to_child_id,f2c_reference_cell_map) +end diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 7eba4269..f6f45ecb 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -7,7 +7,7 @@ module GridapSolvers using Gridap.Algebra using Gridap.Geometry using Gridap.FESpaces - using Gridap.Refinement + using Gridap.Adaptivity using PartitionedArrays using GridapDistributed using GridapP4est @@ -23,16 +23,17 @@ module GridapSolvers export FESpaceHierarchy export get_fe_space, get_fe_space_before_redist - export DistributedGridTransferOperator - export RestrictionOperator, ProlongationOperator - export setup_transfer_operators + #export DistributedGridTransferOperator + #export RestrictionOperator, ProlongationOperator + #export setup_transfer_operators include("PartitionedArraysExtensions.jl") + include("GridapDistributedExtensions.jl") include("RefinementTools.jl") include("RedistributeTools.jl") include("ModelHierarchies.jl") include("FESpaceHierarchies.jl") - include("DistributedGridTransferOperators.jl") + #include("DistributedGridTransferOperators.jl") end diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index 240b4fb0..1a79b70f 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -58,10 +58,10 @@ function ModelHierarchy(parts,coarsest_model::GridapDistributed.AbstractDistribu if (num_procs_x_level[i] != num_procs_x_level[i+1]) # meshes[i+1].model is distributed among P processors # model_ref is distributed among Q processors, with P!=Q - model_ref,ref_glue = Gridap.Refinement.refine(modelH,level_parts[i]) + model_ref,ref_glue = Gridap.Adaptivity.refine(modelH,level_parts[i]) model_red,red_glue = redistribute(model_ref) else - model_ref,ref_glue = Gridap.Refinement.refine(modelH) + model_ref,ref_glue = Gridap.Adaptivity.refine(modelH) model_red,red_glue = nothing,nothing end meshes[i] = ModelHierarchyLevel(i,model_ref,ref_glue,model_red,red_glue) @@ -75,14 +75,12 @@ function convert_to_refined_models(mh::ModelHierarchy) nlevs = num_levels(mh) levels = Vector{ModelHierarchyLevel}(undef,nlevs) for lev in 1:nlevs-1 - parts = get_level_parts(mh,lev) - if (GridapP4est.i_am_in(parts)) - model = get_model_before_redist(mh,lev) - parent = get_model(mh,lev+1) - ref_glue = mh.levels[lev].ref_glue - model_ref = DistributedRefinedDiscreteModel(model,parent,ref_glue) - levels[lev] = ModelHierarchyLevel(lev,model_ref,ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) - end + model = get_model_before_redist(mh,lev) + parent = get_model(mh,lev+1) + ref_glue = change_parts(mh.levels[lev].ref_glue,get_parts(model);default=void(AdaptivityGlue)) + model_ref = DistributedAdaptedDiscreteModel(model,parent,ref_glue) + + levels[lev] = ModelHierarchyLevel(lev,model_ref,ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) end levels[nlevs] = mh.levels[nlevs] diff --git a/src/RefinementTools.jl b/src/RefinementTools.jl index c4178b26..a0d02d0c 100644 --- a/src/RefinementTools.jl +++ b/src/RefinementTools.jl @@ -1,38 +1,41 @@ # DistributedRefinedDiscreteModels -const DistributedRefinedDiscreteModel{Dc,Dp} = GridapDistributed.DistributedDiscreteModel{Dc,Dp,<:AbstractPData{<:RefinedDiscreteModel{Dc,Dp}}} +const DistributedAdaptedDiscreteModel{Dc,Dp} = GridapDistributed.DistributedDiscreteModel{Dc,Dp,<:AbstractPData{<:AdaptedDiscreteModel{Dc,Dp}}} -function DistributedRefinedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel, +function DistributedAdaptedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel, parent_models::AbstractPData{<:DiscreteModel}, - glue::AbstractPData{<:RefinementGlue}) - + glue::AbstractPData{<:AdaptivityGlue}) models = map_parts(local_views(model),parent_models,glue) do model, parent, glue - RefinedDiscreteModel(model,parent,glue) + AdaptedDiscreteModel(model,parent,glue) end return GridapDistributed.DistributedDiscreteModel(models,get_cell_gids(model)) end -function DistributedRefinedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel, +function DistributedAdaptedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel, parent::GridapDistributed.AbstractDistributedDiscreteModel, - glue::AbstractPData{<:RefinementGlue}) - if !(model.parts === parent.parts) - parent_models = map_parts(local_views(model)) do m - if i_am_in(parent.parts) - parent.dmodel.models.part - else - nothing - end + glue::AbstractPData{<:Union{AdaptivityGlue,Nothing}}) + mparts = get_parts(model) + pparts = get_parts(parent) + + !i_am_in(mparts) && (return VoidDistributedDiscreteModel(model)) + (mparts === pparts) && (return DistributedAdaptedDiscreteModel(model,local_views(parent),glue)) + + parent_models, glues = map_parts(local_views(model)) do m + if i_am_in(pparts) + parent_models = local_views(parent) + parent_models.part, glue.part + else + void(typeof(m)), void(AdaptivityGlue) end - return DistributedRefinedDiscreteModel(model,parent_models,glue) - else - return DistributedRefinedDiscreteModel(model,local_views(parent),glue) end + return DistributedAdaptedDiscreteModel(model,parent_models,glues) end + # DistributedRefinedTriangulations -const DistributedRefinedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:RefinedTriangulation{Dc,Dp}}} +const DistributedRefinedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:AdaptedTriangulation{Dc,Dp}}} # DistributedFESpaces @@ -52,75 +55,11 @@ function FESpaces.get_triangulation(f::GridapDistributed.DistributedSingleFieldF end -# Refinement Operators - -function Gridap.Refinement.ProjectionTransferOperator(from::GridapDistributed.DistributedFESpace, - Ω_from::GridapDistributed.DistributedTriangulation, - to::GridapDistributed.DistributedFESpace, - Ω_to::GridapDistributed.DistributedTriangulation; - solver::LinearSolver=BackslashSolver(), - Π=Gridap.Refinement.Π_l2, - qdegree=3) - #@assert is_change_possible(Ω_from,Ω_to) - - # Choose integration space - Ω = best_target(Ω_from,Ω_to) - dΩ = Measure(Ω,qdegree) - U = (Ω === Ω_from) ? from : to - V = get_test_space(U) - vh_to = get_fe_basis(to) - #vh = change_domain(vh_to,Ω) - vh = (Ω === Ω_from) ? change_domain_c2f(vh_to,Ω,Ω.model.glue) : vh_to - - # Prepare system - V_to = get_test_space(to) - lhs_mat, lhs_vec = assemble_lhs(Π,Ω_to,to,V_to,qdegree) - rhs_vec = similar(lhs_vec) - assem = SparseMatrixAssembler(to,V_to) - - # Prepare solver - ss = symbolic_setup(solver,lhs_mat) - ns = numerical_setup(ss,lhs_mat) - - caches = ns, lhs_vec, rhs_vec, Π, assem, Ω, dΩ, U, V, vh, Ω_to - return Gridap.Refinement.ProjectionTransferOperator(eltype(sysmat),from,to,caches) -end - -# Solves the problem Π(uh,vh)_to = Π(uh_from,vh)_Ω for all vh in Vh_to -function LinearAlgebra.mul!(y::PVector,A::Gridap.Refinement.ProjectionTransferOperator,x::PVector) - ns, lhs_vec, rhs_vec, Π, assem, Ω, dΩ, U, V, vh_Ω, Ω_to = A.caches - - # Bring uh to the integration domain - uh_from = FEFunction(A.from,x) - uh_Ω = change_domain(uh_from,Ω,ReferenceDomain()) - - # Assemble rhs vector - contr = Π(uh_Ω,vh_Ω,dΩ) - if Ω !== Ω_to - contr = merge_contr_cells(contr,Ω,Ω_to) - end - vecdata = collect_cell_vector(A.to.space,contr) - assemble_vector!(rhs_vec,assem,vecdata) - rhs_vec .-= lhs_vec - - # Solve projection - solve!(y,ns,sysvec) - return y -end - - # ChangeDomain -function Gridap.Refinement.merge_contr_cells(a::GridapDistributed.DistributedDomainContribution, - rtrian::GridapDistributed.DistributedTriangulation, - ctrian::GridapDistributed.DistributedTriangulation) - b = map_parts(Gridap.Refinement.merge_contr_cells,local_views(a),local_views(rtrian),local_views(ctrian)) - return GridapDistributed.DistributedDomainContribution(b) -end - -function Gridap.Refinement.change_domain_c2f(c_cell_field, - ftrian::GridapDistributed.DistributedTriangulation{Dc,Dp}, - glue::MPIData{<:Union{Nothing,Gridap.Refinement.RefinementGlue}}) where {Dc,Dp} +function Gridap.Adaptivity.change_domain_c2f(c_cell_field, + ftrian::GridapDistributed.DistributedTriangulation{Dc,Dp}, + glue::AbstractPData{Gridap.Adaptivity.AdaptivityGlue}) where {Dc,Dp} i_am_in_coarse = (c_cell_field != nothing) @@ -134,6 +73,6 @@ function Gridap.Refinement.change_domain_c2f(c_cell_field, end c_cell_field_fine = GridapDistributed.DistributedCellField(fields) - dfield = map_parts(Gridap.Refinement.change_domain_c2f,local_views(c_cell_field_fine),local_views(ftrian),glue) + dfield = map_parts(Gridap.Adaptivity.change_domain_c2f,local_views(c_cell_field_fine),local_views(ftrian),glue) return GridapDistributed.DistributedCellField(dfield) end diff --git a/test/mpi/InterGridTransferOperatorsTests.jl b/test/mpi/InterGridTransferOperatorsTests.jl index cbe2b1ee..ddce7e81 100644 --- a/test/mpi/InterGridTransferOperatorsTests.jl +++ b/test/mpi/InterGridTransferOperatorsTests.jl @@ -1,4 +1,5 @@ module InterGridTransferOperatorsTests + """ using MPI using PartitionedArrays using Gridap @@ -26,6 +27,8 @@ module InterGridTransferOperatorsTests coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + println(typeof(level_parts[1])) + # FE Spaces println(" > Testing FESpaces") order = 1 @@ -60,4 +63,5 @@ module InterGridTransferOperatorsTests ranks = num_parts_x_level[1] prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) MPI.Finalize() + """ end diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index ea97908e..fa957952 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -11,7 +11,7 @@ using GridapP4est function model_hierarchy_free!(mh::ModelHierarchy) for lev in 1:num_levels(mh) model = get_model(mh,lev) - isa(model,DistributedRefinedDiscreteModel) && (model = model.model) + isa(model,DistributedAdaptedDiscreteModel) && (model = model.model) octree_distributed_discrete_model_free!(model) end end @@ -30,7 +30,7 @@ function main(parts,num_parts_x_level,num_trees,num_refs_coarse) tests = TestFESpace(mh,reffe,conformity=:H1) trials = TrialFESpace(sol,tests) - model_hierarchy_free!(mh) + # model_hierarchy_free!(mh) end num_parts_x_level = [4,4,2,2] # Procs in each refinement level diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index 8b3ef516..0bf72530 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -64,7 +64,7 @@ module RedistributeToolsTests n = sum(∫(uhold)*dΩ_old) @test o ≈ n - model_hierarchy_free!(mh) + #model_hierarchy_free!(mh) end From 6f00b5659aaf8108ebf3d9be73a8071adda734d8 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 15 Nov 2022 20:24:25 +1100 Subject: [PATCH 25/95] Added Gridap fixes for degenerate cases --- Manifest.toml | 16 ++-- src/GridapFixes.jl | 219 +++++++++++++++++++++++++++++++++++++++++++ src/GridapSolvers.jl | 3 + 3 files changed, 230 insertions(+), 8 deletions(-) create mode 100644 src/GridapFixes.jl diff --git a/Manifest.toml b/Manifest.toml index 3c93b991..afec9d2a 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -128,9 +128,9 @@ version = "1.1.0" [[deps.DiffRules]] deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "9a95659c283c9018ea99e017aa9e13b7e89fadd2" +git-tree-sha1 = "c5b6685d53f933c11404a3ae9822afe30d522494" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.12.1" +version = "1.12.2" [[deps.Distances]] deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"] @@ -194,9 +194,9 @@ version = "2.16.0" [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] -git-tree-sha1 = "187198a4ed8ccd7b5d99c41b69c679269ea2b2d4" +git-tree-sha1 = "10fa12fe96e4d76acfa738f4df2126589a67374f" uuid = "f6369f11-7733-5829-9624-2563aa707210" -version = "0.10.32" +version = "0.10.33" [[deps.Future]] deps = ["Random"] @@ -204,7 +204,7 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "0633606f20ab18df9832a7bfed901cb0550ad212" +git-tree-sha1 = "597eb564aafeb479d0c197d206857da867b248a7" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -212,7 +212,7 @@ version = "0.17.14" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "2950eceffd1b4a35946e92489e3827aa077561ac" +git-tree-sha1 = "c3ab1740aecfba01d6f68014486285b5f3155821" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapDistributed.jl.git" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" @@ -255,9 +255,9 @@ version = "0.9.2" [[deps.JLD2]] deps = ["FileIO", "MacroTools", "Mmap", "OrderedCollections", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"] -git-tree-sha1 = "acb4be8227e1f3ff890d564d55dcde9cf92e4337" +git-tree-sha1 = "18dd357553912b6adc23b5f721e4be19930140c6" uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -version = "0.4.26" +version = "0.4.28" [[deps.JLLWrappers]] deps = ["Preferences"] diff --git a/src/GridapFixes.jl b/src/GridapFixes.jl new file mode 100644 index 00000000..0df6a3bb --- /dev/null +++ b/src/GridapFixes.jl @@ -0,0 +1,219 @@ +function Base.map(::typeof(Gridap.Arrays.testitem), + a::Tuple{<:AbstractVector{<:AbstractVector{<:VectorValue}},<:AbstractVector{<:Gridap.Fields.LinearCombinationFieldVector}}) + a2=Gridap.Arrays.testitem(a[2]) + a1=Vector{eltype(eltype(a[1]))}(undef,size(a2,1)) + a1.=zero(Gridap.Arrays.testitem(a1)) + (a1,a2) +end + +# Fixes Err3 (see below) +function Gridap.Geometry.is_change_possible( +strian::Gridap.Geometry.Triangulation, +ttrian::Gridap.Geometry.Triangulation) + if strian === ttrian || num_cells(strian)==num_cells(ttrian)==0 + return true + end + Gridap.Helpers.@check get_background_model(strian) === get_background_model(ttrian) "Triangulations do not point to the same background discrete model!" + D = num_cell_dims(strian) + sglue = get_glue(strian,Val(D)) + tglue = get_glue(ttrian,Val(D)) + Gridap.Geometry.is_change_possible(sglue,tglue) # Fails here +end + +# Fixes Err3 (see below) +function Gridap.CellData.change_domain(a::CellField, + ::ReferenceDomain, + ttrian::Gridap.Geometry.Triangulation, + ::ReferenceDomain) + msg = """\n + We cannot move the given CellField to the reference domain of the requested triangulation. + Make sure that the given triangulation is either the same as the triangulation on which the + CellField is defined, or that the latter triangulation is the background of the former. + """ + strian = get_triangulation(a) + if strian === ttrian || num_cells(strian)==num_cells(ttrian)==0 + return a + end + @assert Gridap.Geometry.is_change_possible(strian,ttrian) msg + D = num_cell_dims(strian) + sglue = get_glue(strian,Val(D)) + tglue = get_glue(ttrian,Val(D)) + Gridap.CellData.change_domain_ref_ref(a,ttrian,sglue,tglue) +end + +function Gridap.FESpaces.get_cell_fe_data(fun,f,ttrian) + sface_to_data = fun(f) + strian = get_triangulation(f) + if strian === ttrian || num_cells(strian)==num_cells(ttrian)==0 + return sface_to_data + end + @assert Gridap.Geometry.is_change_possible(strian,ttrian) + D = num_cell_dims(strian) + sglue = get_glue(strian,Val(D)) + tglue = get_glue(ttrian,Val(D)) + Gridap.FESpaces.get_cell_fe_data(fun,sface_to_data,sglue,tglue) +end + +function Gridap.Geometry.best_target(trian1::Gridap.Geometry.Triangulation,trian2::Gridap.Geometry.Triangulation) + if (num_cells(trian1)==num_cells(trian2)==0) + return trian1 + end + Gridap.Helpers.@check Gridap.Geometry.is_change_possible(trian1,trian2) + Gridap.Helpers.@check Gridap.Geometry.is_change_possible(trian2,trian1) + D1 = num_cell_dims(trian1) + D2 = num_cell_dims(trian2) + glue1 = get_glue(trian1,Val(D2)) + glue2 = get_glue(trian2,Val(D1)) + Gridap.Geometry.best_target(trian1,trian2,glue1,glue2) +end + + +function Gridap.Geometry.is_change_possible(strian::Gridap.Adaptivity.AdaptedTriangulation,ttrian::Gridap.Adaptivity.AdaptedTriangulation) + (strian === ttrian) && (return true) + (num_cells(strian)==num_cells(ttrian)==0) && (return true) + if (get_background_model(strian) === get_background_model(ttrian)) + return Gridap.Geometry.is_change_possible(strian.trian,ttrian.trian) + end + if typeof(strian.trian) == typeof(ttrian.trian) + smodel = Gridap.Adaptivity.get_adapted_model(strian) + tmodel = Gridap.Adaptivity.get_adapted_model(ttrian) + a = Gridap.Adaptivity.get_parent(tmodel) === Gridap.Adaptivity.get_model(smodel) # tmodel = refine(smodel) + b = Gridap.Adaptivity.get_parent(smodel) === Gridap.Adaptivity.get_model(tmodel) # smodel = refine(tmodel) + return a || b + end + @notimplemented + return false +end + +function Gridap.Geometry.is_change_possible(strian::Gridap.Adaptivity.AdaptedTriangulation,ttrian::Gridap.Geometry.Triangulation) + (num_cells(strian)==num_cells(ttrian)==0) && (return true) + if (get_background_model(strian) === get_background_model(ttrian)) + return Gridap.Geometry.is_change_possible(strian.trian,ttrian) + end + if typeof(strian.trian) == typeof(ttrian) + smodel = Gridap.Adaptivity.get_adapted_model(strian) + tmodel = get_background_model(ttrian) + return get_parent(smodel) === tmodel # smodel = refine(tmodel) + end + @notimplemented + return false +end + +function Gridap.Geometry.is_change_possible(strian::Gridap.Geometry.Triangulation,ttrian::Gridap.Adaptivity.AdaptedTriangulation) + (num_cells(strian)==num_cells(ttrian)==0) && (return true) + if (get_background_model(strian) === get_background_model(ttrian)) + return Gridap.Geometry.is_change_possible(strian,ttrian.trian) + end + if typeof(strian) == typeof(ttrian.trian) + smodel = get_background_model(strian) + tmodel = Gridap.Adaptivity.get_adapted_model(ttrian) + return Gridap.Adaptivity.get_parent(tmodel) === smodel # tmodel = refine(smodel) + end + @notimplemented + return false +end + +function Gridap.Geometry.best_target(strian::Gridap.Adaptivity.AdaptedTriangulation,ttrian::Gridap.Adaptivity.AdaptedTriangulation) + @check Gridap.Geometry.is_change_possible(strian,ttrian) + (num_cells(strian)==num_cells(ttrian)==0) && (return strian) + + (strian === ttrian) && (return ttrian) + if (get_background_model(strian) === get_background_model(ttrian)) + return Gridap.Geometry.best_target(strian.trian,ttrian.trian) + end + if typeof(strian.trian) == typeof(ttrian.trian) + smodel = Gridap.Adaptivity.get_adapted_model(strian) + tmodel = Gridap.Adaptivity.get_adapted_model(ttrian) + a = Gridap.Adaptivity.get_parent(tmodel) === Gridap.Adaptivity.get_model(smodel) # tmodel = refine(smodel) + a ? (return ttrian) : (return strian) + end + @notimplemented + return nothing +end + +function Gridap.Geometry.best_target(strian::Gridap.Adaptivity.AdaptedTriangulation,ttrian::Gridap.Geometry.Triangulation) + @check Gridap.Geometry.is_change_possible(strian,ttrian) + return strian +end + +function Gridap.Geometry.best_target(strian::Gridap.Geometry.Triangulation,ttrian::Gridap.Adaptivity.AdaptedTriangulation) + @check Gridap.Geometry.is_change_possible(strian,ttrian) + return ttrian +end + +function Gridap.CellData.change_domain(a::CellField,ttrian::Gridap.Adaptivity.AdaptedTriangulation,::ReferenceDomain) + strian = get_triangulation(a) + if (strian === ttrian) || (num_cells(strian)==num_cells(ttrian)==0) + return a + end + @assert Gridap.Geometry.is_change_possible(strian,ttrian) + if (get_background_model(strian) === get_background_model(ttrian)) + return Gridap.CellData.change_domain(a,ttrian.trian,ReferenceDomain()) + end + return Gridap.Adaptivity.change_domain_c2f(a,ttrian) +end + +function Gridap.CellData.change_domain(a::Gridap.CellData.OperationCellField,ttrian::Gridap.Adaptivity.AdaptedTriangulation,::ReferenceDomain) + strian = get_triangulation(a) + if (strian === ttrian) || (num_cells(strian)==num_cells(ttrian)==0) + return a + end + @assert Gridap.Geometry.is_change_possible(strian,ttrian) + if (get_background_model(strian) === get_background_model(ttrian)) + return Gridap.CellDatachange_domain(a,ttrian.trian,ReferenceDomain()) + end + return Gridap.Adaptivity.change_domain_c2f(a,ttrian) +end + +function Gridap.CellData.change_domain(a::CellField,ttrian::Gridap.Adaptivity.AdaptedTriangulation,::PhysicalDomain) + strian = get_triangulation(a) + if (strian === ttrian) || (num_cells(strian)==num_cells(ttrian)==0) + return a + end + @assert Gridap.Geometry.is_change_possible(strian,ttrian) + if (get_background_model(strian) === get_background_model(ttrian)) + return Gridap.Adaptivity.change_domain(a,ttrian.trian,PhysicalDomain()) + end + @notimplemented +end + +function Gridap.Geometry.move_contributions(scell_to_val::AbstractArray, strian::Gridap.Adaptivity.AdaptedTriangulation, ttrian::Gridap.Geometry.Triangulation) + (num_cells(strian)==num_cells(ttrian)==0) && (return scell_to_val) + + smodel = Gridap.Adaptivity.get_adapted_model(strian) + @check Gridap.Adaptivity.get_parent(smodel) === get_background_model(ttrian) + tcell_to_val = Gridap.Geometry.move_contributions(scell_to_val,get_adaptivity_glue(smodel)) + return tcell_to_val +end + + + +# This fix is required to be able to integrate in the overlapped mesh underlying patch smoothers +function Gridap.Geometry.get_glue(trian::BodyFittedTriangulation{Dt},::Val{Dt}) where Dt + tface_to_mface = trian.tface_to_mface + tface_to_mface_map = FillArrays.Fill(Gridap.Fields.GenericField(identity),num_cells(trian)) + if isa(tface_to_mface,Gridap.Arrays.IdentityVector) && num_faces(trian.model,Dt) == num_cells(trian) + mface_to_tface = tface_to_mface + else + #nmfaces = num_faces(trian.model,Dt) + # Crashes here!!! It does not support overlapping!!! + mface_to_tface = nothing #PosNegPartition(tface_to_mface,Int32(nmfaces)) + end + FaceToFaceGlue(tface_to_mface,tface_to_mface_map,mface_to_tface) +end + + +function Gridap.Arrays.evaluate!(cache,s::Gridap.CellData.CellDof,f::CellField) + trian_f = get_triangulation(f) + trian_s = get_triangulation(s) + + (num_cells(trian_s)==num_cells(trian_f)==0) && (return Gridap.CellData.get_data(f)) + + if trian_f !== trian_s + @unreachable """\n + A CellDof can only be evaluated on a CellField defined on the same Triangulation. + """ + end + b = change_domain(f,s.domain_style) + lazy_map(evaluate,Gridap.CellData.get_data(s),Gridap.CellData.get_data(b)) +end \ No newline at end of file diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index f6f45ecb..7ad33f4a 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -14,6 +14,8 @@ module GridapSolvers import GridapDistributed: local_views + export change_parts, void + export DistributedRefinedDiscreteModel export ModelHierarchy @@ -29,6 +31,7 @@ module GridapSolvers include("PartitionedArraysExtensions.jl") include("GridapDistributedExtensions.jl") + include("GridapFixes.jl") include("RefinementTools.jl") include("RedistributeTools.jl") include("ModelHierarchies.jl") From bf0dbd91384f4e3de32b62f0d92c702f3f50842e Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 15 Nov 2022 20:25:50 +1100 Subject: [PATCH 26/95] Started adding support for empty parts --- src/FESpaceHierarchies.jl | 6 +- src/GridapDistributedExtensions.jl | 95 ++++++++++++++++++++++++++---- src/ModelHierarchies.jl | 9 ++- src/PartitionedArraysExtensions.jl | 4 +- test/mpi/ModelHierarchiesTests.jl | 6 +- test/mpi/RedistributeToolsTests.jl | 14 +++-- test/mpi/RefinementToolsTests.jl | 78 ++++++++++++++++++++++++ 7 files changed, 180 insertions(+), 32 deletions(-) create mode 100644 test/mpi/RefinementToolsTests.jl diff --git a/src/FESpaceHierarchies.jl b/src/FESpaceHierarchies.jl index a1705d49..0b96a6f6 100644 --- a/src/FESpaceHierarchies.jl +++ b/src/FESpaceHierarchies.jl @@ -68,7 +68,7 @@ end function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) where {A,B} test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh)) - for i=1:num_levels(mh) + for i = 1:num_levels(mh) parts = get_level_parts(mh,i) if (GridapP4est.i_am_in(parts)) Vh = TestFESpace(get_level(mh,i),args...;kwargs...) @@ -80,7 +80,7 @@ end function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchy) trial_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(a.mh)) - for i=1:num_levels(a.mh) + for i = 1:num_levels(a.mh) parts = get_level_parts(a.mh,i) if (GridapP4est.i_am_in(parts)) Uh = TrialFESpace(u,a[i]) @@ -92,7 +92,7 @@ end function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) trial_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(a.mh)) - for i=1:num_levels(a.mh) + for i = 1:num_levels(a.mh) parts = get_level_parts(a.mh,i) if (GridapP4est.i_am_in(parts)) Uh = TrialFESpace(a[i]) diff --git a/src/GridapDistributedExtensions.jl b/src/GridapDistributedExtensions.jl index fb01801a..5701eee0 100644 --- a/src/GridapDistributedExtensions.jl +++ b/src/GridapDistributedExtensions.jl @@ -1,4 +1,19 @@ +# DistributedCompositeMeasure + +function Gridap.CellData.Measure(tt::GridapDistributed.DistributedTriangulation{Dc,Dp}, + it::GridapDistributed.DistributedTriangulation{Dc,Dp}, + args...) where {Dc,Dp} + itrians = change_parts(local_views(it),get_parts(tt);default=void(BodyFittedTriangulation{Dc,Dp})) + + measures = map_parts(local_views(tt),itrians) do ttrian, itrian + Measure(ttrian,itrian,args...) + end + return GridapDistributed.DistributedMeasure(measures) +end + +# change_parts + function change_parts(x::Union{AbstractPData,Nothing}, new_parts; default=nothing) x_new = map_parts(new_parts) do _p if isa(x,AbstractPData) @@ -10,6 +25,51 @@ function change_parts(x::Union{AbstractPData,Nothing}, new_parts; default=nothin return x_new end +function change_parts(::Type{<:GridapDistributed.DistributedCellField},x,new_parts) + if isa(x,GridapDistributed.DistributedCellField) + fields = change_parts(local_views(x),new_parts) + else + fields = change_parts(nothing,new_parts;default=void(CellField)) + end + return GridapDistributed.DistributedCellField(fields) +end + +""" +function change_parts(::Type{<:GridapDistributed.DistributedSingleFieldFEFunction},x,new_parts) + if isa(x,GridapDistributed.DistributedSingleFieldFEFunction) + fields = change_parts(local_views(x),new_parts) + metadata = GridapDistributed.DistributedFEFunctionData(change_parts(x.metadata.free_values,new_parts)) + else + fields = change_parts(nothing,new_parts;default=void(CellField)) + metadata = GridapDistributed.DistributedFEFunctionData(change_parts(nothing,new_parts;default=Float64[])) + end + return GridapDistributed.DistributedCellField(fields,metadata) +end +""" + +""" +function change_parts(::Type{<:PRange},x::Union{PRange,Nothing}, new_parts) + if isa(x,PRange) + ngids = x.ngids + partition = change_parts(x.partition,new_parts;default=void(IndexSet)) + exchanger = x.exchanger + gid_to_part = x.gid_to_part + ghost = x.ghost + else + ngids = 0 + partition = change_parts(nothing,new_parts;default=void(IndexSet)) + exchanger = empty_exchanger(new_parts) + gid_to_part = nothing + ghost = false + end + return PRange(ngids,partition,exchanger,gid_to_part,ghost) +end + +function void(::Type{IndexSet}) + return IndexSet(0,Int[],Int32[],Int32[],Int32[],Int32[],Dict{Int,Int32}()) +end +""" + # get_parts function get_parts(x::GridapDistributed.DistributedDiscreteModel) @@ -66,22 +126,31 @@ end # Void Gridap structures +function void(::Type{<:CartesianDiscreteModel{Dc,Dp}}) where {Dc,Dp} + #domain = Tuple(fill(0.0,2*Dc)) + domain = Tuple(repeat([0,1],Dc)) + partition = Tuple(fill(0,Dc)) + return CartesianDiscreteModel(domain,partition) +end + function void(::Type{<:UnstructuredDiscreteModel{Dc,Dp}}) where {Dc,Dp} - # This should work but does not..... - """ - node_coordinates = Vector{Point{Dp,Dp}}(undef,0) - cell_node_ids = Table(Vector{Int32}(undef,0),Vector{Int32}(undef,0)) - reffes = Vector{LagrangianRefFE{Dc}}(undef,0) - cell_types = Vector{Int8}(undef,0) - grid = UnstructuredGrid(node_coordinates,cell_node_ids,reffes,cell_types) - """ - grid = UnstructuredGrid(Gridap.ReferenceFEs.LagrangianRefFE(Float64,QUAD,1)) - return UnstructuredDiscreteModel(grid) + cmodel = void(CartesianDiscreteModel{Dc,Dp}) + return UnstructuredDiscreteModel(cmodel) end function void(::Type{<:AdaptivityGlue}) - f2c_faces_map = [Int32[1]] - fcell_to_child_id = Int32[1] - f2c_reference_cell_map = Int32[1] + f2c_faces_map = [Int32[],Int32[],Int32[]] + fcell_to_child_id = Int32[] + f2c_reference_cell_map = Int32[] return AdaptivityGlue(f2c_faces_map,fcell_to_child_id,f2c_reference_cell_map) end + +function void(::Type{<:BodyFittedTriangulation{Dc,Dp}}) where {Dc,Dp} + model = void(UnstructuredDiscreteModel{Dc,Dp}) + return Gridap.Geometry.Triangulation(model) +end + +function void(::Type{<:CellField}) + trian = void(BodyFittedTriangulation{2,2}) + return Gridap.CellData.CellField(0.0,trian,ReferenceDomain()) +end diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index 1a79b70f..f0189777 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -45,12 +45,11 @@ has_redistribution(a::ModelHierarchyLevel{A,B,C,Nothing}) where {A,B,C} = false each level into. We need `num_procs_x_level[end]` to be equal to the number of parts of `model`. """ -function ModelHierarchy(parts,coarsest_model::GridapDistributed.AbstractDistributedDiscreteModel,num_procs_x_level::Vector{Int}; num_refs_x_level=nothing) +function ModelHierarchy(coarsest_model::GridapDistributed.AbstractDistributedDiscreteModel,level_parts; num_refs_x_level=nothing) # TODO: Implement support for num_refs_x_level? (future work) - num_levels = length(num_procs_x_level) - level_parts = generate_level_parts(parts,num_procs_x_level) - - meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + num_levels = length(level_parts) + num_procs_x_level = map(num_parts,level_parts) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing,nothing) for i = num_levels-1:-1:1 diff --git a/src/PartitionedArraysExtensions.jl b/src/PartitionedArraysExtensions.jl index 5f8e32f7..51c80e41 100644 --- a/src/PartitionedArraysExtensions.jl +++ b/src/PartitionedArraysExtensions.jl @@ -25,7 +25,7 @@ function i_am_in(comm::MPI.Comm) PartitionedArrays.get_part_id(comm) >=0 end -function i_am_in(parts) +function i_am_in(parts::MPIData) i_am_in(parts.comm) end @@ -103,4 +103,4 @@ function generate_level_parts(parts,num_procs_x_level) end end return level_parts -end \ No newline at end of file +end diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index fa957952..19c489c4 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -23,7 +23,7 @@ function main(parts,num_parts_x_level,num_trees,num_refs_coarse) num_levels = length(num_parts_x_level) level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + mh = ModelHierarchy(coarse_model,level_parts) sol(x) = x[1] + x[2] reffe = ReferenceFE(lagrangian,Float64,1) @@ -38,7 +38,7 @@ num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] -prun(main,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) -MPI.Finalize() +#prun(main,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) +#MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index 0bf72530..c4e983f7 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -7,8 +7,6 @@ module RedistributeToolsTests using GridapSolvers using Test - u(x) = x[1] + x[2] - function model_hierarchy_free!(mh::ModelHierarchy) for lev in 1:num_levels(mh) model = get_model(mh,lev) @@ -20,11 +18,15 @@ module RedistributeToolsTests function run(parts,num_parts_x_level,num_trees,num_refs_coarse) domain = (0,1,0,1) cmodel = CartesianDiscreteModel(domain,num_trees) - coarse_model = OctreeDistributedDiscreteModel(parts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + num_levels = length(num_parts_x_level) + level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) # FE Spaces order = 1 + u(x) = x[1] + x[2] reffe = ReferenceFE(lagrangian,Float64,order) glue = mh.levels[1].red_glue @@ -73,6 +75,6 @@ module RedistributeToolsTests num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] - prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) - MPI.Finalize() + #prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) + #MPI.Finalize() end diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl new file mode 100644 index 00000000..3393e67a --- /dev/null +++ b/test/mpi/RefinementToolsTests.jl @@ -0,0 +1,78 @@ +module RefinementToolsTests + using MPI + using PartitionedArrays + using Gridap + using GridapDistributed + using GridapP4est + using GridapSolvers + using Test + + function run(parts,num_parts_x_level,num_trees,num_refs_coarse) + domain = (0,1,0,1) + cmodel = CartesianDiscreteModel(domain,num_trees) + + nlevs = length(num_parts_x_level) + level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) + coarse_model = OctreeDistributedDiscreteModel(level_parts[nlevs],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) + + # FE Spaces + order = 1 + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1)#,dirichlet_tags="boundary") + trials = TrialFESpace(sol,tests) + + quad_order = 2*order+1 + for lev in 1:nlevs-1 + fparts = get_level_parts(mh,lev) + cparts = get_level_parts(mh,lev+1) + + if GridapP4est.i_am_in(fparts) + Uh = get_fe_space_before_redist(trials,lev) + Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) + dΩh = Measure(Ωh,quad_order) + uh = interpolate(sol,Uh) + vh = get_fe_basis(Uh) + + if GridapP4est.i_am_in(cparts) + UH = get_fe_space(trials,lev+1) + ΩH = get_triangulation(UH,get_model(mh,lev+1)) + dΩH = Measure(ΩH,quad_order) + uH = interpolate(sol,UH) + vH = get_fe_basis(UH) + dΩhH = Measure(ΩH,Ωh,quad_order) + else + uH = nothing + vH = nothing + end + + uHh = change_parts(GridapDistributed.DistributedCellField,uH,fparts) + vHh = change_parts(GridapDistributed.DistributedCellField,vH,fparts) + + # Coarse FEFunction -> Fine FEFunction, by interpolation + uh_f_inter = interpolate(uHh,Uh) + + # Coarse FEFunction -> Fine FEFunction, by projection + #af(u,v) = ∫(v⋅u)*dΩ_f + #lf(v) = ∫(v⋅uh_c)*dΩ_f + #opf = AffineFEOperator(af,lf,U_f,V_f) + + + GridapP4est.i_am_main(parts) && println("FFFFF") + + end + end + + #model_hierarchy_free!(mh) + end + + + num_parts_x_level = [4,2,2] # Procs in each refinement level + num_trees = (1,1) # Number of initial P4est trees + num_refs_coarse = 2 # Number of initial refinements + + ranks = num_parts_x_level[1] + prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) + MPI.Finalize() +end From efa31eeeccf9359cde141903a8a047f38e2e7e6b Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 16 Nov 2022 17:57:41 +1100 Subject: [PATCH 27/95] Continued dealing with void --- Manifest.toml | 2 +- Project.toml | 1 + src/GridapFixes.jl | 45 +++++++++++++- src/GridapSolvers.jl | 1 + .../OctreeDistributedDiscreteModelsTests.jl | 48 +++++++++++++++ test/mpi/RefinementToolsTests.jl | 46 ++++++++++---- test/runtests.jl | 2 +- test/seq/DegenerateTests.jl | 61 +++++++++++++++++++ 8 files changed, 192 insertions(+), 14 deletions(-) create mode 100644 test/mpi/OctreeDistributedDiscreteModelsTests.jl create mode 100644 test/seq/DegenerateTests.jl diff --git a/Manifest.toml b/Manifest.toml index afec9d2a..70db4543 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.8.1" manifest_format = "2.0" -project_hash = "ed5df3dccfd63f0f6cd0890dd8cf640652010af8" +project_hash = "dacc65432743d6fd7291a02b1cfef7c22067c5b6" [[deps.AbstractFFTs]] deps = ["ChainRulesCore", "LinearAlgebra"] diff --git a/Project.toml b/Project.toml index f4b20679..5d6295cd 100644 --- a/Project.toml +++ b/Project.toml @@ -9,6 +9,7 @@ FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" Gridap = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" GridapDistributed = "f9701e48-63b3-45aa-9a63-9bc6c271f355" GridapP4est = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" +IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" diff --git a/src/GridapFixes.jl b/src/GridapFixes.jl index 0df6a3bb..f7191e7e 100644 --- a/src/GridapFixes.jl +++ b/src/GridapFixes.jl @@ -207,7 +207,11 @@ function Gridap.Arrays.evaluate!(cache,s::Gridap.CellData.CellDof,f::CellField) trian_f = get_triangulation(f) trian_s = get_triangulation(s) - (num_cells(trian_s)==num_cells(trian_f)==0) && (return Gridap.CellData.get_data(f)) + """if num_cells(trian_s)==num_cells(trian_f)==0 + dof_data = Gridap.CellData.get_data(s) + item = Gridap.Arrays.testitem(dof_data) + return Fill(item,0) + end""" if trian_f !== trian_s @unreachable """\n @@ -216,4 +220,43 @@ function Gridap.Arrays.evaluate!(cache,s::Gridap.CellData.CellDof,f::CellField) end b = change_domain(f,s.domain_style) lazy_map(evaluate,Gridap.CellData.get_data(s),Gridap.CellData.get_data(b)) +end + + +function Gridap.CellData.integrate(f::Gridap.CellData.CellField,quad::Gridap.CellData.CellQuadrature) where DDS + trian_f = get_triangulation(f) + trian_x = get_triangulation(quad) + + #if num_cells(trian_f)==num_cells(trian_x)==0 + # return [Float64[]] + #end + + msg = """\n + Your are trying to integrate a CellField using a CellQuadrature defined on incompatible + triangulations. Verify that either the two objects are defined in the same triangulation + or that the triangulaiton of the CellField is the background triangulation of the CellQuadrature. + """ + @check is_change_possible(trian_f,trian_x) msg + + b = change_domain(f,quad.trian,quad.data_domain_style) + x = get_cell_points(quad) + bx = b(x) + if quad.data_domain_style == PhysicalDomain() && + quad.integration_domain_style == PhysicalDomain() + lazy_map(Gridap.Fields.IntegrationMap(),bx,quad.cell_weight) + elseif quad.data_domain_style == ReferenceDomain() && + quad.integration_domain_style == PhysicalDomain() + cell_map = get_cell_map(quad.trian) + cell_Jt = lazy_map(∇,cell_map) + cell_Jtx = lazy_map(evaluate,cell_Jt,quad.cell_point) + lazy_map(Gridap.Fields.IntegrationMap(),bx,quad.cell_weight,cell_Jtx) + elseif quad.data_domain_style == ReferenceDomain() && + quad.integration_domain_style == ReferenceDomain() + cell_map = Fill(Gridap.Fields.GenericField(identity),length(bx)) + cell_Jt = lazy_map(∇,cell_map) + cell_Jtx = lazy_map(evaluate,cell_Jt,quad.cell_point) + lazy_map(Gridap.Fields.IntegrationMap(),bx,quad.cell_weight,cell_Jtx) + else + @notimplemented + end end \ No newline at end of file diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 7ad33f4a..ce2f5bd5 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -2,6 +2,7 @@ module GridapSolvers using MPI using LinearAlgebra + using FillArrays using Gridap using Gridap.Helpers using Gridap.Algebra diff --git a/test/mpi/OctreeDistributedDiscreteModelsTests.jl b/test/mpi/OctreeDistributedDiscreteModelsTests.jl new file mode 100644 index 00000000..61067958 --- /dev/null +++ b/test/mpi/OctreeDistributedDiscreteModelsTests.jl @@ -0,0 +1,48 @@ +module OctreeDistributedDiscreteModelsTests + using MPI + using Test + using Gridap + using Gridap.ReferenceFEs + using Gridap.FESpaces + using PartitionedArrays + using GridapDistributed + using GridapP4est + using GridapSolvers + + function run(parts,subdomains,num_parts_x_level) + if length(subdomains) == 2 + domain=(0,1,0,1) + else + @assert length(subdomains) == 3 + domain=(0,1,0,1,0,1) + end + + # Generate model + level_parts = GridapP4est.generate_level_parts(parts,num_parts_x_level) + coarse_model = CartesianDiscreteModel(domain,subdomains) + model = GridapP4est.OctreeDistributedDiscreteModel(level_parts[2],coarse_model,1) + + # Refining and distributing + fmodel , rglue = refine(model,level_parts[1]) + dfmodel, dglue = redistribute(fmodel) + + map_parts(GridapDistributed.local_views(fmodel)) do model + println(num_cells(model)) + println(typeof(model)) + end + + # FESpaces tests + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,1) + Vh = TestFESpace(fmodel, reffe; conformity=:H1) + Uh = TrialFESpace(sol,Vh) + Ω = Triangulation(fmodel) + dΩ = Measure(Ω,3) + + a(u,v) = ∫(v⋅u)*dΩ + assemble_matrix(a,Uh,Vh) + end + + #prun(run,mpi,4,(2,2),[4,2]) + #MPI.Finalize() +end # module diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 3393e67a..5d4883c0 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -6,6 +6,7 @@ module RefinementToolsTests using GridapP4est using GridapSolvers using Test + using IterativeSolvers function run(parts,num_parts_x_level,num_trees,num_refs_coarse) domain = (0,1,0,1) @@ -29,6 +30,7 @@ module RefinementToolsTests cparts = get_level_parts(mh,lev+1) if GridapP4est.i_am_in(fparts) + Vh = get_fe_space_before_redist(tests,lev) Uh = get_fe_space_before_redist(trials,lev) Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) dΩh = Measure(Ωh,quad_order) @@ -36,6 +38,7 @@ module RefinementToolsTests vh = get_fe_basis(Uh) if GridapP4est.i_am_in(cparts) + VH = get_fe_space(tests,lev+1) UH = get_fe_space(trials,lev+1) ΩH = get_triangulation(UH,get_model(mh,lev+1)) dΩH = Measure(ΩH,quad_order) @@ -47,24 +50,45 @@ module RefinementToolsTests vH = nothing end - uHh = change_parts(GridapDistributed.DistributedCellField,uH,fparts) - vHh = change_parts(GridapDistributed.DistributedCellField,vH,fparts) - - # Coarse FEFunction -> Fine FEFunction, by interpolation - uh_f_inter = interpolate(uHh,Uh) + uH_Ph = change_parts(GridapDistributed.DistributedCellField,uH,fparts) + vH_Ph = change_parts(GridapDistributed.DistributedCellField,vH,fparts) # Coarse FEFunction -> Fine FEFunction, by projection - #af(u,v) = ∫(v⋅u)*dΩ_f - #lf(v) = ∫(v⋅uh_c)*dΩ_f - #opf = AffineFEOperator(af,lf,U_f,V_f) + ah(u,v) = ∫(v⋅u)*dΩh + lh(v) = ∫(v⋅uH_Ph)*dΩh + Ah = assemble_matrix(ah,Uh,Vh) + bh = assemble_vector(lh,Vh) + + xh = PVector(0.0,Ah.cols) + IterativeSolvers.cg!(xh,Ah,bh;verbose=i_am_main(parts),reltol=1.0e-06) + uhH = FEFunction(Uh,xh) + + eh = sum(∫(uh-uhH)*dΩh) + i_am_main(parts) && println("Error H2h: ", eh) + + # Fine FEFunction -> Coarse FEFunction, by projection + if GridapP4est.i_am_in(cparts) + uh_PH = change_parts(GridapDistributed.DistributedCellField,uh,cparts) + aH(u,v) = ∫(v⋅u)*dΩH + lH(v) = ∫(v⋅uh_PH)*dΩhH + AH = assemble_matrix(aH,UH,VH) + bH = assemble_vector(lH,VH) + + xH = PVector(0.0,AH.cols) + IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-06) + uHh = FEFunction(UH,xH) + + eh = sum(∫(uH-uHh)*dΩH) + i_am_main(parts) && println("Error h2H: ", eh) + else + uHh = nothing + end + uHh_Ph = change_parts(GridapDistributed.DistributedCellField,uHh,fparts) - GridapP4est.i_am_main(parts) && println("FFFFF") - end end - #model_hierarchy_free!(mh) end diff --git a/test/runtests.jl b/test/runtests.jl index 88e8f4c6..1a3e29f6 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -28,7 +28,7 @@ function run_tests(testdir) testfiles = sort(filter(istest, readdir(testdir))) @time @testset "$f" for f in testfiles MPI.mpiexec() do cmd - if f in ["RedistributeToolsTests.jl"] + if f in ["RedistributeToolsTests.jl","RefinementToolsTests","OctreeDistributedDiscreteModelsTests"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] diff --git a/test/seq/DegenerateTests.jl b/test/seq/DegenerateTests.jl new file mode 100644 index 00000000..b64dcf2b --- /dev/null +++ b/test/seq/DegenerateTests.jl @@ -0,0 +1,61 @@ +#module DegenerateTests + +using LinearAlgebra +using FillArrays +using Gridap +using Gridap.Helpers +using Gridap.Algebra +using Gridap.Geometry +using Gridap.FESpaces +using Gridap.Adaptivity +using Test +using IterativeSolvers + +include("../../src/GridapFixes.jl") + +function cg_solve(op) + A = get_matrix(op) + b = get_vector(op) + x = PVector(0.0,A.cols) + IterativeSolvers.cg!(x,A,b;verbose=true,reltol=1.0e-06) + return x +end + +#function main() + domain = (0,1,0,1) + partition = (0,0) + cmodel = CartesianDiscreteModel(domain,partition) + model = UnstructuredDiscreteModel(cmodel) + + order = 1 + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe,conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(sol,Vh) + Ω = Triangulation(model) + dΩ = Measure(Ω,2*order+1) + + u_sol = interpolate(sol,Vh) + a(u,v) = ∫(v⋅u)*dΩ + l(v) = ∫(v⋅u_sol)*dΩ + op = AffineFEOperator(a,l,Uh,Vh) + + assemble_matrix(a,Uh,Vh) + + c = a(get_trial_fe_basis(Uh),get_fe_basis(Vh)) + first(c.dict) + + dofs = get_fe_dof_basis(Uh) + dofs(u_sol) + + x = cg_solve(op) + uh = FEFunction(x,Uh) + + eh = ∫(uh-u_sol)*dΩ + e = sum(eh) + println(e) +#end + +main() + +#end \ No newline at end of file From 518a24de535985304a1bc6f93a1e93628b89c922 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 24 Nov 2022 14:19:42 +1100 Subject: [PATCH 28/95] Propagated changes in Gridap --- Manifest.toml | 30 +++++++------- src/GridapDistributedExtensions.jl | 13 ++++-- src/GridapFixes.jl | 63 +----------------------------- src/RefinementTools.jl | 4 +- 4 files changed, 28 insertions(+), 82 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 70db4543..c308c317 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -95,9 +95,9 @@ version = "0.3.0" [[deps.Compat]] deps = ["Dates", "LinearAlgebra", "UUIDs"] -git-tree-sha1 = "3ca828fe1b75fa84b021a7860bd039eaea84d2f2" +git-tree-sha1 = "aaabba4ce1b7f8a9b34c015053d3b1edf60fa49c" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.3.0" +version = "4.4.0" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] @@ -188,9 +188,9 @@ version = "0.12.8" [[deps.FiniteDiff]] deps = ["ArrayInterfaceCore", "LinearAlgebra", "Requires", "Setfield", "SparseArrays", "StaticArrays"] -git-tree-sha1 = "bb61d9e5085784fe453f70c97b23964c5bf36942" +git-tree-sha1 = "04ed1f0029b6b3af88343e439b995141cb0d0b8d" uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" -version = "2.16.0" +version = "2.17.0" [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] @@ -204,7 +204,7 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "597eb564aafeb479d0c197d206857da867b248a7" +git-tree-sha1 = "3646853c9d7b329bfead910ed66164d85d6f8182" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -212,7 +212,7 @@ version = "0.17.14" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "c3ab1740aecfba01d6f68014486285b5f3155821" +git-tree-sha1 = "b247a9beb4d11d5931c878ffdfdab477bc0f2607" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapDistributed.jl.git" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" @@ -220,7 +220,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "86270740a1a74d012c076d405c921258f221f290" +git-tree-sha1 = "53595da44640d7e741369cde1ba605798d665a22" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -321,9 +321,9 @@ uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[deps.LogExpFunctions]] deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "94d9c52ca447e23eac0c0f074effbcd38830deb5" +git-tree-sha1 = "946607f84feb96220f480e0422d3484c49c00239" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.18" +version = "0.3.19" [[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" @@ -388,9 +388,9 @@ version = "2022.2.1" [[deps.NLSolversBase]] deps = ["DiffResults", "Distributed", "FiniteDiff", "ForwardDiff"] -git-tree-sha1 = "50310f934e55e5ca3912fb941dec199b49ca9b68" +git-tree-sha1 = "a0b464d183da839699f4c79e7606d9d186ec172c" uuid = "d41bc354-129a-5804-8e4c-c37616107c6c" -version = "7.8.2" +version = "7.8.3" [[deps.NLsolve]] deps = ["Distances", "LineSearches", "LinearAlgebra", "NLSolversBase", "Printf", "Reexport"] @@ -461,9 +461,9 @@ version = "0.12.3" [[deps.Parsers]] deps = ["Dates", "SnoopPrecompile"] -git-tree-sha1 = "cceb0257b662528ecdf0b4b4302eb00e767b38e7" +git-tree-sha1 = "b64719e8b4504983c7fca6cc9db3ebc8acc2a4d6" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.5.0" +version = "2.5.1" [[deps.PartitionedArrays]] deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] @@ -562,9 +562,9 @@ version = "2.1.7" [[deps.StaticArrays]] deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] -git-tree-sha1 = "f86b3a049e5d05227b10e15dbb315c5b90f14988" +git-tree-sha1 = "4e051b85454b4e4f66e6a6b7bdc452ad9da3dcf6" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.5.9" +version = "1.5.10" [[deps.StaticArraysCore]] git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a" diff --git a/src/GridapDistributedExtensions.jl b/src/GridapDistributedExtensions.jl index 5701eee0..05fd9e83 100644 --- a/src/GridapDistributedExtensions.jl +++ b/src/GridapDistributedExtensions.jl @@ -139,10 +139,15 @@ function void(::Type{<:UnstructuredDiscreteModel{Dc,Dp}}) where {Dc,Dp} end function void(::Type{<:AdaptivityGlue}) - f2c_faces_map = [Int32[],Int32[],Int32[]] - fcell_to_child_id = Int32[] - f2c_reference_cell_map = Int32[] - return AdaptivityGlue(f2c_faces_map,fcell_to_child_id,f2c_reference_cell_map) + f2c_faces_map = [Int32[],Int32[],Int32[]] + fcell_to_child_id = Int32[] + rrules = Fill(void(RefinementRule),0) + return AdaptivityGlue(f2c_faces_map,fcell_to_child_id,rrules) +end + +function void(::Type{<:RefinementRule}) + reffe = Gridap.ReferenceFEs.LagrangianRefFE(Float64,QUAD,1) + return RefinementRule(reffe,1) end function void(::Type{<:BodyFittedTriangulation{Dc,Dp}}) where {Dc,Dp} diff --git a/src/GridapFixes.jl b/src/GridapFixes.jl index f7191e7e..2d7cec18 100644 --- a/src/GridapFixes.jl +++ b/src/GridapFixes.jl @@ -150,7 +150,7 @@ function Gridap.CellData.change_domain(a::CellField,ttrian::Gridap.Adaptivity.Ad if (get_background_model(strian) === get_background_model(ttrian)) return Gridap.CellData.change_domain(a,ttrian.trian,ReferenceDomain()) end - return Gridap.Adaptivity.change_domain_c2f(a,ttrian) + return Gridap.Adaptivity.change_domain_o2n(a,ttrian) end function Gridap.CellData.change_domain(a::Gridap.CellData.OperationCellField,ttrian::Gridap.Adaptivity.AdaptedTriangulation,::ReferenceDomain) @@ -162,7 +162,7 @@ function Gridap.CellData.change_domain(a::Gridap.CellData.OperationCellField,ttr if (get_background_model(strian) === get_background_model(ttrian)) return Gridap.CellDatachange_domain(a,ttrian.trian,ReferenceDomain()) end - return Gridap.Adaptivity.change_domain_c2f(a,ttrian) + return Gridap.Adaptivity.change_domain_o2n(a,ttrian) end function Gridap.CellData.change_domain(a::CellField,ttrian::Gridap.Adaptivity.AdaptedTriangulation,::PhysicalDomain) @@ -201,62 +201,3 @@ function Gridap.Geometry.get_glue(trian::BodyFittedTriangulation{Dt},::Val{Dt}) end FaceToFaceGlue(tface_to_mface,tface_to_mface_map,mface_to_tface) end - - -function Gridap.Arrays.evaluate!(cache,s::Gridap.CellData.CellDof,f::CellField) - trian_f = get_triangulation(f) - trian_s = get_triangulation(s) - - """if num_cells(trian_s)==num_cells(trian_f)==0 - dof_data = Gridap.CellData.get_data(s) - item = Gridap.Arrays.testitem(dof_data) - return Fill(item,0) - end""" - - if trian_f !== trian_s - @unreachable """\n - A CellDof can only be evaluated on a CellField defined on the same Triangulation. - """ - end - b = change_domain(f,s.domain_style) - lazy_map(evaluate,Gridap.CellData.get_data(s),Gridap.CellData.get_data(b)) -end - - -function Gridap.CellData.integrate(f::Gridap.CellData.CellField,quad::Gridap.CellData.CellQuadrature) where DDS - trian_f = get_triangulation(f) - trian_x = get_triangulation(quad) - - #if num_cells(trian_f)==num_cells(trian_x)==0 - # return [Float64[]] - #end - - msg = """\n - Your are trying to integrate a CellField using a CellQuadrature defined on incompatible - triangulations. Verify that either the two objects are defined in the same triangulation - or that the triangulaiton of the CellField is the background triangulation of the CellQuadrature. - """ - @check is_change_possible(trian_f,trian_x) msg - - b = change_domain(f,quad.trian,quad.data_domain_style) - x = get_cell_points(quad) - bx = b(x) - if quad.data_domain_style == PhysicalDomain() && - quad.integration_domain_style == PhysicalDomain() - lazy_map(Gridap.Fields.IntegrationMap(),bx,quad.cell_weight) - elseif quad.data_domain_style == ReferenceDomain() && - quad.integration_domain_style == PhysicalDomain() - cell_map = get_cell_map(quad.trian) - cell_Jt = lazy_map(∇,cell_map) - cell_Jtx = lazy_map(evaluate,cell_Jt,quad.cell_point) - lazy_map(Gridap.Fields.IntegrationMap(),bx,quad.cell_weight,cell_Jtx) - elseif quad.data_domain_style == ReferenceDomain() && - quad.integration_domain_style == ReferenceDomain() - cell_map = Fill(Gridap.Fields.GenericField(identity),length(bx)) - cell_Jt = lazy_map(∇,cell_map) - cell_Jtx = lazy_map(evaluate,cell_Jt,quad.cell_point) - lazy_map(Gridap.Fields.IntegrationMap(),bx,quad.cell_weight,cell_Jtx) - else - @notimplemented - end -end \ No newline at end of file diff --git a/src/RefinementTools.jl b/src/RefinementTools.jl index a0d02d0c..54b22169 100644 --- a/src/RefinementTools.jl +++ b/src/RefinementTools.jl @@ -57,7 +57,7 @@ end # ChangeDomain -function Gridap.Adaptivity.change_domain_c2f(c_cell_field, +function Gridap.Adaptivity.change_domain_o2n(c_cell_field, ftrian::GridapDistributed.DistributedTriangulation{Dc,Dp}, glue::AbstractPData{Gridap.Adaptivity.AdaptivityGlue}) where {Dc,Dp} @@ -73,6 +73,6 @@ function Gridap.Adaptivity.change_domain_c2f(c_cell_field, end c_cell_field_fine = GridapDistributed.DistributedCellField(fields) - dfield = map_parts(Gridap.Adaptivity.change_domain_c2f,local_views(c_cell_field_fine),local_views(ftrian),glue) + dfield = map_parts(Gridap.Adaptivity.change_domain_o2n,local_views(c_cell_field_fine),local_views(ftrian),glue) return GridapDistributed.DistributedCellField(dfield) end From 671fec48f2255ed226854090addf0cf87ba20619 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 28 Nov 2022 18:36:52 +1100 Subject: [PATCH 29/95] Fixed tests --- test/mpi/RefinementToolsTests.jl | 32 +++++++++++++++++--------------- 1 file changed, 17 insertions(+), 15 deletions(-) diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 5d4883c0..adb974b6 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -18,10 +18,10 @@ module RefinementToolsTests mh = ModelHierarchy(coarse_model,level_parts) # FE Spaces - order = 1 - sol(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1)#,dirichlet_tags="boundary") + order = 4 + sol(x) = x[1]*(x[1]-1.0)*x[2]*(x[2]-1.0) # This high-order function is needed to have sol(x) = 0 for x ∈ Γ + reffe = ReferenceFE(lagrangian,Float64,order) # which is needed since we cannot impose Dirichlet BCs in the empty + tests = TestFESpace(mh,reffe,conformity=:H1) # discrete models. Hopefuly fixed soon... trials = TrialFESpace(sol,tests) quad_order = 2*order+1 @@ -49,11 +49,10 @@ module RefinementToolsTests uH = nothing vH = nothing end - + + # Coarse FEFunction -> Fine FEFunction, by projection uH_Ph = change_parts(GridapDistributed.DistributedCellField,uH,fparts) - vH_Ph = change_parts(GridapDistributed.DistributedCellField,vH,fparts) - # Coarse FEFunction -> Fine FEFunction, by projection ah(u,v) = ∫(v⋅u)*dΩh lh(v) = ∫(v⋅uH_Ph)*dΩh Ah = assemble_matrix(ah,Uh,Vh) @@ -61,30 +60,33 @@ module RefinementToolsTests xh = PVector(0.0,Ah.cols) IterativeSolvers.cg!(xh,Ah,bh;verbose=i_am_main(parts),reltol=1.0e-06) - uhH = FEFunction(Uh,xh) + uH_projected = FEFunction(Uh,xh) - eh = sum(∫(uh-uhH)*dΩh) + _eh = uh-uH_projected + eh = sum(∫(_eh⋅_eh)*dΩh) i_am_main(parts) && println("Error H2h: ", eh) # Fine FEFunction -> Coarse FEFunction, by projection if GridapP4est.i_am_in(cparts) uh_PH = change_parts(GridapDistributed.DistributedCellField,uh,cparts) + uH_projected_PH = change_parts(GridapDistributed.DistributedCellField,uH_projected,cparts) aH(u,v) = ∫(v⋅u)*dΩH - lH(v) = ∫(v⋅uh_PH)*dΩhH + lH(v) = ∫(v⋅uH_projected_PH)*dΩhH AH = assemble_matrix(aH,UH,VH) bH = assemble_vector(lH,VH) xH = PVector(0.0,AH.cols) IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-06) - uHh = FEFunction(UH,xH) + uh_projected = FEFunction(UH,xH) - eh = sum(∫(uH-uHh)*dΩH) - i_am_main(parts) && println("Error h2H: ", eh) + _eH = uH-uh_projected + eH = sum(∫(_eH⋅_eH)*dΩH) + i_am_main(parts) && println("Error h2H: ", eH) else - uHh = nothing + uh_projected = nothing end - uHh_Ph = change_parts(GridapDistributed.DistributedCellField,uHh,fparts) + uh_projected_Ph = change_parts(GridapDistributed.DistributedCellField,uh_projected,fparts) end end From d959a6f0b730ff87f3bbadf1be11f89083406b46 Mon Sep 17 00:00:00 2001 From: "Alberto F. Martin" Date: Mon, 12 Dec 2022 10:43:58 +1100 Subject: [PATCH 30/95] Updating Manifest.toml package versions --- Manifest.toml | 53 ++++++++++++++++++++++++++++----------------------- 1 file changed, 29 insertions(+), 24 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index c308c317..0535c54d 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -1,6 +1,6 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.8.1" +julia_version = "1.8.3" manifest_format = "2.0" project_hash = "dacc65432743d6fd7291a02b1cfef7c22067c5b6" @@ -45,6 +45,11 @@ version = "0.7.10" [[deps.Artifacts]] uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" +[[deps.AutoHashEquals]] +git-tree-sha1 = "45bb6705d93be619b81451bb2006b7ee5d4e4453" +uuid = "15f4f7f2-30c1-5605-9d31-71845cf9641f" +version = "0.2.0" + [[deps.BSON]] git-tree-sha1 = "86e9781ac28f4e80e9b98f7f96eae21891332ac2" uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" @@ -95,9 +100,9 @@ version = "0.3.0" [[deps.Compat]] deps = ["Dates", "LinearAlgebra", "UUIDs"] -git-tree-sha1 = "aaabba4ce1b7f8a9b34c015053d3b1edf60fa49c" +git-tree-sha1 = "00a2cccc7f098ff3b66806862d275ca3db9e6e5a" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.4.0" +version = "4.5.0" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] @@ -194,21 +199,21 @@ version = "2.17.0" [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] -git-tree-sha1 = "10fa12fe96e4d76acfa738f4df2126589a67374f" +git-tree-sha1 = "a69dd6db8a809f78846ff259298678f0d6212180" uuid = "f6369f11-7733-5829-9624-2563aa707210" -version = "0.10.33" +version = "0.10.34" [[deps.Future]] deps = ["Random"] uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] -deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "3646853c9d7b329bfead910ed66164d85d6f8182" +deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DataStructures", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] +git-tree-sha1 = "e29f6aa54e749cfe3f5cf903d7b1c4543c177ade" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" -version = "0.17.14" +version = "0.17.15" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] @@ -255,9 +260,9 @@ version = "0.9.2" [[deps.JLD2]] deps = ["FileIO", "MacroTools", "Mmap", "OrderedCollections", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"] -git-tree-sha1 = "18dd357553912b6adc23b5f721e4be19930140c6" +git-tree-sha1 = "ec8a9c9f0ecb1c687e34c1fda2699de4d054672a" uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -version = "0.4.28" +version = "0.4.29" [[deps.JLLWrappers]] deps = ["Preferences"] @@ -299,9 +304,9 @@ uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" [[deps.Libiconv_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "42b62845d70a619f063a7da093d995ec8e15e778" +git-tree-sha1 = "c7cb1f5d892775ba13767a87c7ada0b980ea0a71" uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" -version = "1.16.1+1" +version = "1.16.1+2" [[deps.LightXML]] deps = ["Libdl", "XML2_jll"] @@ -348,9 +353,9 @@ version = "4.0.2+5" [[deps.MPIPreferences]] deps = ["Libdl", "Preferences"] -git-tree-sha1 = "34892fb69751a76bcf8b7add84ec77015208a1ec" +git-tree-sha1 = "71f937129731a29eabe6969db2c90368a4408933" uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" -version = "0.1.6" +version = "0.1.7" [[deps.MPItrampoline_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] @@ -477,10 +482,10 @@ uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" version = "1.8.0" [[deps.PolynomialBases]] -deps = ["ArgCheck", "FFTW", "FastGaussQuadrature", "LinearAlgebra", "Requires", "SpecialFunctions", "UnPack"] -git-tree-sha1 = "0990e89674ff6cd5cbc7bc40c959ed77168d4aa8" +deps = ["ArgCheck", "AutoHashEquals", "FFTW", "FastGaussQuadrature", "LinearAlgebra", "Requires", "SpecialFunctions", "UnPack"] +git-tree-sha1 = "38629c0a9cace7c6f51c103084589ff8a7a1c02f" uuid = "c74db56a-226d-5e98-8bb0-a6049094aeea" -version = "0.4.14" +version = "0.4.15" [[deps.Preferences]] deps = ["TOML"] @@ -508,9 +513,9 @@ uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[deps.RecipesBase]] deps = ["SnoopPrecompile"] -git-tree-sha1 = "d12e612bba40d189cead6ff857ddb67bd2e6a387" +git-tree-sha1 = "18c35ed630d7229c5584b945641a73ca83fb5213" uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" -version = "1.3.1" +version = "1.3.2" [[deps.Reexport]] git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" @@ -562,9 +567,9 @@ version = "2.1.7" [[deps.StaticArrays]] deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] -git-tree-sha1 = "4e051b85454b4e4f66e6a6b7bdc452ad9da3dcf6" +git-tree-sha1 = "ffc098086f35909741f71ce21d03dadf0d2bfa76" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.5.10" +version = "1.5.11" [[deps.StaticArraysCore]] git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a" @@ -593,7 +598,7 @@ version = "1.0.0" [[deps.Tar]] deps = ["ArgTools", "SHA"] uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" -version = "1.10.0" +version = "1.10.1" [[deps.Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] @@ -606,9 +611,9 @@ version = "1.0.1" [[deps.TranscodingStreams]] deps = ["Random", "Test"] -git-tree-sha1 = "8a75929dcd3c38611db2f8d08546decb514fcadf" +git-tree-sha1 = "e4bdc63f5c6d62e80eb1c0043fcc0360d5950ff7" uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.9.9" +version = "0.9.10" [[deps.UUIDs]] deps = ["Random", "SHA"] From 65db04235f07ec7a06b35d7d0ab71509f0ec8027 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 13 Dec 2022 01:50:29 +0100 Subject: [PATCH 31/95] Updated dependencies --- Manifest.toml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 0535c54d..61e61922 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -1,6 +1,6 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.8.3" +julia_version = "1.8.1" manifest_format = "2.0" project_hash = "dacc65432743d6fd7291a02b1cfef7c22067c5b6" @@ -225,7 +225,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "53595da44640d7e741369cde1ba605798d665a22" +git-tree-sha1 = "d5205a26d9fb40dcad83e98efb0d3c7f993de8cd" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -598,7 +598,7 @@ version = "1.0.0" [[deps.Tar]] deps = ["ArgTools", "SHA"] uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" -version = "1.10.1" +version = "1.10.0" [[deps.Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] From b0ddd21d1a258fe50ef4d63efea39c98bee4fa8f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 13 Dec 2022 01:53:19 +0100 Subject: [PATCH 32/95] Changed how parts are handled in hierarchy --- src/FESpaceHierarchies.jl | 6 +- src/GridapDistributedExtensions.jl | 45 +++++++++++- src/ModelHierarchies.jl | 5 +- src/RedistributeTools.jl | 44 +++++++----- src/RefinementTools.jl | 18 ----- .../OctreeDistributedDiscreteModelsTests.jl | 14 ++-- test/mpi/RedistributeToolsTests.jl | 52 +++++++++----- test/mpi/RefinementToolsTests.jl | 72 +++++++------------ 8 files changed, 144 insertions(+), 112 deletions(-) diff --git a/src/FESpaceHierarchies.jl b/src/FESpaceHierarchies.jl index 0b96a6f6..de2b624e 100644 --- a/src/FESpaceHierarchies.jl +++ b/src/FESpaceHierarchies.jl @@ -56,13 +56,13 @@ function Gridap.FESpaces.TestFESpace( end function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchyLevel{A,Nothing}) where {A} - Uh = TrialFESpace(u,a.fe_space) + Uh = TrialFESpace(a.fe_space,u) FESpaceHierarchyLevel(a.level,Uh,nothing) end function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchyLevel{A,B}) where {A,B} - Uh = TrialFESpace(u,a.fe_space) - Uh_red = TrialFESpace(u,a.fe_space_red) + Uh = TrialFESpace(a.fe_space,u) + Uh_red = TrialFESpace(a.fe_space_red,u) FESpaceHierarchyLevel(a.level,Uh,Uh_red) end diff --git a/src/GridapDistributedExtensions.jl b/src/GridapDistributedExtensions.jl index 05fd9e83..3b485714 100644 --- a/src/GridapDistributedExtensions.jl +++ b/src/GridapDistributedExtensions.jl @@ -84,6 +84,29 @@ function get_parts(x::GridapP4est.OctreeDistributedDiscreteModel) return x.parts end +function get_parts(x::GridapDistributed.RedistributeGlue) + return PartitionedArrays.get_part_ids(x.old2new) +end + +function get_parts(x::GridapDistributed.DistributedSingleFieldFESpace) + return PartitionedArrays.get_part_ids(x.spaces) +end + +# DistributedFESpaces + +function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) + spaces = map_parts(local_views(U)) do U + U.space + end + gids = U.gids + vector_type = U.vector_type + return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,vector_type) +end + +function FESpaces.get_triangulation(f::GridapDistributed.DistributedSingleFieldFESpace,model::GridapDistributed.AbstractDistributedDiscreteModel) + trians = map_parts(get_triangulation,local_views(f)) + GridapDistributed.DistributedTriangulation(trians,model) +end # Void GridapDistributed structures @@ -119,10 +142,30 @@ function VoidDistributedTriangulation(trian::GridapDistributed.DistributedTriang return VoidDistributedTriangulation(Dc,Dp,get_parts(trian)) end -function Triangulation(model::VoidDistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} +function Gridap.Geometry.Triangulation(model::VoidDistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} return VoidDistributedTriangulation(Dc,Dp,get_parts(model)) end +struct VoidDistributedFESpace{A} <: GridapType + parts::A +end + +function get_parts(x::VoidDistributedFESpace) + return x.parts +end + +function Gridap.FESpaces.TestFESpace(model::VoidDistributedDiscreteModel,args...;kwargs...) + return VoidDistributedFESpace(get_parts(model)) +end + +function Gridap.FESpaces.TrialFESpace(space::VoidDistributedFESpace,args...;kwargs...) + return VoidDistributedFESpace(get_parts(space)) +end + +function FESpaces.get_triangulation(f::VoidDistributedFESpace,model::VoidDistributedDiscreteModel) + return VoidDistributedTriangulation(model) +end + # Void Gridap structures diff --git a/src/ModelHierarchies.jl b/src/ModelHierarchies.jl index f0189777..98b6f2a2 100644 --- a/src/ModelHierarchies.jl +++ b/src/ModelHierarchies.jl @@ -57,8 +57,8 @@ function ModelHierarchy(coarsest_model::GridapDistributed.AbstractDistributedDis if (num_procs_x_level[i] != num_procs_x_level[i+1]) # meshes[i+1].model is distributed among P processors # model_ref is distributed among Q processors, with P!=Q - model_ref,ref_glue = Gridap.Adaptivity.refine(modelH,level_parts[i]) - model_red,red_glue = redistribute(model_ref) + model_ref,ref_glue = Gridap.Adaptivity.refine(modelH) + model_red,red_glue = redistribute(model_ref,level_parts[i]) else model_ref,ref_glue = Gridap.Adaptivity.refine(modelH) model_red,red_glue = nothing,nothing @@ -77,6 +77,7 @@ function convert_to_refined_models(mh::ModelHierarchy) model = get_model_before_redist(mh,lev) parent = get_model(mh,lev+1) ref_glue = change_parts(mh.levels[lev].ref_glue,get_parts(model);default=void(AdaptivityGlue)) + #ref_glue = mh.levels[lev].ref_glue model_ref = DistributedAdaptedDiscreteModel(model,parent,ref_glue) levels[lev] = ModelHierarchyLevel(lev,model_ref,ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) diff --git a/src/RedistributeTools.jl b/src/RedistributeTools.jl index bc14a226..e8b61697 100644 --- a/src/RedistributeTools.jl +++ b/src/RedistributeTools.jl @@ -101,13 +101,15 @@ end function redistribute_cell_dofs(cell_dof_values_old, - Uh_new::GridapDistributed.DistributedSingleFieldFESpace, + cell_dof_ids_new, model_new, glue::GridapDistributed.RedistributeGlue; reverse=false) lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) - cell_dof_ids_new = map_parts(get_cell_dof_ids, Uh_new.spaces) + + cell_dof_values_old = change_parts(cell_dof_values_old,get_parts(glue);default=[]) + cell_dof_ids_new = change_parts(cell_dof_ids_new,get_parts(glue);default=[[]]) num_dofs_x_cell_snd = num_dofs_x_cell(cell_dof_values_old, lids_snd) num_dofs_x_cell_rcv = num_dofs_x_cell(cell_dof_ids_new, lids_rcv) @@ -135,9 +137,13 @@ function redistribute_cell_dofs(cell_dof_values_old, map_parts(wait,tout) unpack_rcv_data!(cell_dof_values_new,rcv_data,lids_rcv) - # Why are we exchanging something that has already been exchanged? - fgids = get_cell_gids(model_new) - exchange!(cell_dof_values_new,fgids.exchanger) + # Now that every part knows it's new owned dofs, exchange ghosts + new_parts = get_parts(model_new) + cell_dof_values_new = change_parts(cell_dof_values_new,new_parts) + if GridapP4est.i_am_in(new_parts) + fgids = get_cell_gids(model_new) + exchange!(cell_dof_values_new,fgids.exchanger) + end return cell_dof_values_new end @@ -151,29 +157,35 @@ function redistribute_free_values!(fv_new::PVector, reverse=false) uh_old = FEFunction(Uh_old,fv_old) - cell_dof_values_old = map_parts(get_cell_dof_values,uh_old.fields) - cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,Uh_new,model_new,glue;reverse=reverse) + cell_dof_values_old = map_parts(get_cell_dof_values,local_views(uh_old)) + cell_dof_ids_new = map_parts(get_cell_dof_ids, local_views(Uh_new)) + cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) # Assemble the new FEFunction - Gridap.FESpaces.gather_free_values!(fv_new, Uh_new.spaces,cell_dof_values_new) + Gridap.FESpaces.gather_free_values!(fv_new,local_views(Uh_new),cell_dof_values_new) return fv_new end -function redistribute_fe_function(uh_old::GridapDistributed.DistributedSingleFieldFEFunction, - Uh_new::GridapDistributed.DistributedSingleFieldFESpace, +function redistribute_fe_function(uh_old::Union{GridapDistributed.DistributedSingleFieldFEFunction,Nothing}, + Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, model_new, glue::GridapDistributed.RedistributeGlue; reverse=false) - cell_dof_values_old = map_parts(get_cell_dof_values,uh_old.fields) - cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,Uh_new,model_new,glue;reverse=reverse) + cell_dof_values_old = !isa(uh_old,Nothing) ? map_parts(get_cell_dof_values,local_views(uh_old)) : nothing + cell_dof_ids_new = !isa(Uh_new,VoidDistributedFESpace) ? map_parts(get_cell_dof_ids,local_views(Uh_new)) : nothing + cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) # Assemble the new FEFunction - free_values, dirichlet_values = Gridap.FESpaces.gather_free_and_dirichlet_values(Uh_new,cell_dof_values_new) - free_values = PVector(free_values,Uh_new.gids) - uh_new = FEFunction(Uh_new,free_values,dirichlet_values) - return uh_new + if GridapP4est.i_am_in(get_parts(Uh_new)) + free_values, dirichlet_values = Gridap.FESpaces.gather_free_and_dirichlet_values(Uh_new,cell_dof_values_new) + free_values = PVector(free_values,Uh_new.gids) + uh_new = FEFunction(Uh_new,free_values,dirichlet_values) + return uh_new + else + return nothing + end end diff --git a/src/RefinementTools.jl b/src/RefinementTools.jl index 54b22169..9964eb5e 100644 --- a/src/RefinementTools.jl +++ b/src/RefinementTools.jl @@ -37,24 +37,6 @@ end const DistributedRefinedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:AdaptedTriangulation{Dc,Dp}}} - -# DistributedFESpaces - -function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) - spaces = map_parts(local_views(U)) do U - U.space - end - gids = U.gids - vector_type = U.vector_type - return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,vector_type) -end - -function FESpaces.get_triangulation(f::GridapDistributed.DistributedSingleFieldFESpace,model::GridapDistributed.AbstractDistributedDiscreteModel) - trians = map_parts(get_triangulation,local_views(f)) - GridapDistributed.DistributedTriangulation(trians,model) -end - - # ChangeDomain function Gridap.Adaptivity.change_domain_o2n(c_cell_field, diff --git a/test/mpi/OctreeDistributedDiscreteModelsTests.jl b/test/mpi/OctreeDistributedDiscreteModelsTests.jl index 61067958..b9050ed8 100644 --- a/test/mpi/OctreeDistributedDiscreteModelsTests.jl +++ b/test/mpi/OctreeDistributedDiscreteModelsTests.jl @@ -23,20 +23,18 @@ module OctreeDistributedDiscreteModelsTests model = GridapP4est.OctreeDistributedDiscreteModel(level_parts[2],coarse_model,1) # Refining and distributing - fmodel , rglue = refine(model,level_parts[1]) - dfmodel, dglue = redistribute(fmodel) + fmodel1 , rglue1 = refine(model,level_parts[1]) + dfmodel1, dglue1 = redistribute(fmodel1) - map_parts(GridapDistributed.local_views(fmodel)) do model - println(num_cells(model)) - println(typeof(model)) - end + fmodel2 , rglue2 = refine(model) + dfmodel2, dglue2 = redistribute(fmodel2,level_parts[1]) # FESpaces tests sol(x) = x[1] + x[2] reffe = ReferenceFE(lagrangian,Float64,1) - Vh = TestFESpace(fmodel, reffe; conformity=:H1) + Vh = TestFESpace(dfmodel2, reffe; conformity=:H1) Uh = TrialFESpace(sol,Vh) - Ω = Triangulation(fmodel) + Ω = Triangulation(dfmodel2) dΩ = Measure(Ω,3) a(u,v) = ∫(v⋅u)*dΩ diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index c4e983f7..8607d4cd 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -24,6 +24,9 @@ module RedistributeToolsTests coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) mh = ModelHierarchy(coarse_model,level_parts) + old_parts = level_parts[2] + new_parts = level_parts[1] + # FE Spaces order = 1 u(x) = x[1] + x[2] @@ -32,39 +35,50 @@ module RedistributeToolsTests model_old = get_model_before_redist(mh.levels[1]) VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") - UOLD = TrialFESpace(u,VOLD) + UOLD = TrialFESpace(VOLD,u) model_new = get_model(mh.levels[1]) VNEW = TestFESpace(model_new,reffe,dirichlet_tags="boundary") - UNEW = TrialFESpace(u,VNEW) + UNEW = TrialFESpace(VNEW,u) # Triangulations qdegree = 2*order+1 - Ω_old = Triangulation(model_old) - dΩ_old = Measure(Ω_old,qdegree) Ω_new = Triangulation(model_new) dΩ_new = Measure(Ω_new,qdegree) + uh_new = interpolate(u,UNEW) + + if GridapP4est.i_am_in(old_parts) + Ω_old = Triangulation(model_old) + dΩ_old = Measure(Ω_old,qdegree) + uh_old = interpolate(u,UOLD) + else + Ω_old = nothing + dΩ_old = nothing + uh_old = nothing + end # Old -> New - uhold = interpolate(u,UOLD) - uhnew = GridapSolvers.redistribute_fe_function(uhold, - UNEW, - model_new, - glue) - o = sum(∫(uhold)*dΩ_old) - n = sum(∫(uhnew)*dΩ_new) - @test o ≈ n + uh_old_red = GridapSolvers.redistribute_fe_function(uh_old, + UNEW, + model_new, + glue) + n = sum(∫(uh_old_red)*dΩ_new) + if GridapP4est.i_am_in(old_parts) + o = sum(∫(uh_old)*dΩ_old) + @test o ≈ n + end # New -> Old - uhnew = interpolate(u,UNEW) - uhold = GridapSolvers.redistribute_fe_function(uhnew, + uh_new_red = GridapSolvers.redistribute_fe_function(uh_new, UOLD, model_old, glue; reverse=true) - o = sum(∫(uhnew)*dΩ_new) - n = sum(∫(uhold)*dΩ_old) - @test o ≈ n + n = sum(∫(uh_new)*dΩ_new) + if GridapP4est.i_am_in(old_parts) + o = sum(∫(uh_new_red)*dΩ_old) + @test o ≈ n + end #model_hierarchy_free!(mh) end @@ -75,6 +89,6 @@ module RedistributeToolsTests num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] - #prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) - #MPI.Finalize() + prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) + MPI.Finalize() end diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index adb974b6..e40cff59 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -18,10 +18,10 @@ module RefinementToolsTests mh = ModelHierarchy(coarse_model,level_parts) # FE Spaces - order = 4 - sol(x) = x[1]*(x[1]-1.0)*x[2]*(x[2]-1.0) # This high-order function is needed to have sol(x) = 0 for x ∈ Γ - reffe = ReferenceFE(lagrangian,Float64,order) # which is needed since we cannot impose Dirichlet BCs in the empty - tests = TestFESpace(mh,reffe,conformity=:H1) # discrete models. Hopefuly fixed soon... + order = 1 + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(sol,tests) quad_order = 2*order+1 @@ -29,32 +29,23 @@ module RefinementToolsTests fparts = get_level_parts(mh,lev) cparts = get_level_parts(mh,lev+1) - if GridapP4est.i_am_in(fparts) + if GridapP4est.i_am_in(cparts) Vh = get_fe_space_before_redist(tests,lev) Uh = get_fe_space_before_redist(trials,lev) Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) dΩh = Measure(Ωh,quad_order) uh = interpolate(sol,Uh) - vh = get_fe_basis(Uh) - if GridapP4est.i_am_in(cparts) - VH = get_fe_space(tests,lev+1) - UH = get_fe_space(trials,lev+1) - ΩH = get_triangulation(UH,get_model(mh,lev+1)) - dΩH = Measure(ΩH,quad_order) - uH = interpolate(sol,UH) - vH = get_fe_basis(UH) - dΩhH = Measure(ΩH,Ωh,quad_order) - else - uH = nothing - vH = nothing - end - - # Coarse FEFunction -> Fine FEFunction, by projection - uH_Ph = change_parts(GridapDistributed.DistributedCellField,uH,fparts) + VH = get_fe_space(tests,lev+1) + UH = get_fe_space(trials,lev+1) + ΩH = get_triangulation(UH,get_model(mh,lev+1)) + dΩH = Measure(ΩH,quad_order) + uH = interpolate(sol,UH) + dΩhH = Measure(ΩH,Ωh,quad_order) + # Coarse FEFunction -> Fine FEFunction, by projection ah(u,v) = ∫(v⋅u)*dΩh - lh(v) = ∫(v⋅uH_Ph)*dΩh + lh(v) = ∫(v⋅uH)*dΩh Ah = assemble_matrix(ah,Uh,Vh) bh = assemble_vector(lh,Vh) @@ -67,27 +58,18 @@ module RefinementToolsTests i_am_main(parts) && println("Error H2h: ", eh) # Fine FEFunction -> Coarse FEFunction, by projection - if GridapP4est.i_am_in(cparts) - uh_PH = change_parts(GridapDistributed.DistributedCellField,uh,cparts) - uH_projected_PH = change_parts(GridapDistributed.DistributedCellField,uH_projected,cparts) - - aH(u,v) = ∫(v⋅u)*dΩH - lH(v) = ∫(v⋅uH_projected_PH)*dΩhH - AH = assemble_matrix(aH,UH,VH) - bH = assemble_vector(lH,VH) - - xH = PVector(0.0,AH.cols) - IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-06) - uh_projected = FEFunction(UH,xH) - - _eH = uH-uh_projected - eH = sum(∫(_eH⋅_eH)*dΩH) - i_am_main(parts) && println("Error h2H: ", eH) - else - uh_projected = nothing - end - uh_projected_Ph = change_parts(GridapDistributed.DistributedCellField,uh_projected,fparts) - + aH(u,v) = ∫(v⋅u)*dΩH + lH(v) = ∫(v⋅uH_projected)*dΩhH + AH = assemble_matrix(aH,UH,VH) + bH = assemble_vector(lH,VH) + + xH = PVector(0.0,AH.cols) + IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-06) + uh_projected = FEFunction(UH,xH) + + _eH = uH-uh_projected + eH = sum(∫(_eH⋅_eH)*dΩH) + i_am_main(parts) && println("Error h2H: ", eH) end end @@ -99,6 +81,6 @@ module RefinementToolsTests num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] - prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) - MPI.Finalize() + #prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) + #MPI.Finalize() end From 01199099653e48d2a18cfd786aaacdf0296b78a9 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 13 Dec 2022 01:53:51 +0100 Subject: [PATCH 33/95] Continued implementing TransferOperators --- src/DistributedGridTransferOperators.jl | 97 +++++++++++++++++-------- 1 file changed, 67 insertions(+), 30 deletions(-) diff --git a/src/DistributedGridTransferOperators.jl b/src/DistributedGridTransferOperators.jl index e79831db..7b4081d7 100644 --- a/src/DistributedGridTransferOperators.jl +++ b/src/DistributedGridTransferOperators.jl @@ -1,17 +1,15 @@ -struct DistributedGridTransferOperator{T,R,A,B,C} <: Gridap.Refinement.GridTransferOperator +struct DistributedGridTransferOperator{T,R,A,B} sh :: A - ref_op :: B cache :: C - function DistributedGridTransferOperator(op_type::Symbol,redist::Bool,sh::FESpaceHierarchy,ref_op,cache) + function DistributedGridTransferOperator(op_type::Symbol,redist::Bool,sh::FESpaceHierarchy,cache) T = typeof(Val(op_type)) R = typeof(Val(redist)) A = typeof(sh) - B = typeof(ref_op) - C = typeof(cache) - new{T,R,A,B,C}(sh,ref_op,cache) + B = typeof(cache) + new{T,R,A,B}(sh,cache) end end @@ -25,21 +23,38 @@ function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree:: @check lev < num_levels(mh) @check op_type ∈ [:restriction, :prolongation] + cparts = get_level_parts(mh,lev+1) + fparts = get_level_parts(mh,lev) + Uh = get_fe_space_before_redist(sh,lev) Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) + fv_h = PVector(0.0,Uh.gids) + dv_h = get_dirichlet_dof_values(Uh) # Refinement - cparts = get_level_parts(mh,lev+1) if GridapP4est.i_am_in(cparts) UH = get_fe_space(sh,lev+1) - ΩH = get_triangulation(UH,get_model(mh,lev+1)) - from, to = (op_type == :restriction) ? (Uh, UH) : (UH, Uh) - Ω_from, Ω_to = (op_type == :restriction) ? (Ωh, ΩH) : (ΩH, Ωh) - ref_op = ProjectionTransferOperator(from,Ω_from,to,Ω_to;qdegree=qdegree) - else - ref_op = nothing + if (op_type == :restriction) + ΩH = get_triangulation(UH,get_model(mh,lev+1)) + dΩH = Measure(ΩH,qdegree) + dΩhH = Measure(ΩH,Ωh,qdegree) + + aH(u,v) = ∫(v⋅u)*dΩH + lH(v,uh) = ∫(v⋅uh)*dΩhH + AH = assemble_matrix(aH,UH,VH) + + fv_H, dv_H = nothing, nothing + else + fv_H = PVector(0.0,UH.gids) + dv_H = get_dirichlet_dof_values(UH) + + VH, AH, lH = nothing, nothing, nothing, nothing + end + else + VH, AH, lH, fv_H, dv_H = nothing, nothing, nothing, nothing, nothing, nothing end + cache_refine = Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H # Redistribution redist = has_redistribution(mh,lev) @@ -47,16 +62,16 @@ function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree:: Uh_red = get_fe_space(sh,lev) model_h = get_model_before_redist(mh,lev) model_h_red = get_model(mh,lev) - fv_h = PVector(0.0,Uh.gids) fv_h_red = PVector(0.0,Uh_red.gids) glue = mh.levels[lev].red_glue - cache = fv_h, Uh, fv_h_red, Uh_red, model_h, model_h_red, glue + cache_redist = fv_h_red, Uh_red, model_h, model_h_red, glue else - cache = nothing + cache_redist = nothing end - return DistributedGridTransferOperator(op_type,redist,sh,ref_op,cache) + cache = cache_refine, cache_redist + return DistributedGridTransferOperator(op_type,redist,sh,cache) end function setup_transfer_operators(sh::FESpaceHierarchy, qdegree::Int) @@ -75,40 +90,62 @@ end ### Applying the operators: -## A) Without redistribution (same for interpolation/restriction) -function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{T,Val{false}},x::PVector) where T +function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:prolongation},Val{false}},x::PVector) + cache_refine, cache_redist = A.cache + Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H = cache_refine - map_parts(y,A.ref_op,x) do y, ref_op, x - mul!(y,ref_op,x) - end + copy!(fv_H,x) # Matrix layout -> FE layout + uH = FEFunction(UH,fv_H,dv_H) + uh = interpolate!(uH,fv_h,Uh) + copy!(y,fv_h) # FE layout -> Matrix layout return y end -## B) Prolongation (coarse to fine), with redistribution +function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{false}},x::PVector) + cache_refine, cache_redist = A.cache + Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H = cache_refine + + copy!(fv_h,x) # Matrix layout -> FE layout + uh = FEFunction(Uh,fv_h,dv_h) + rhs(v) = lH(v,uh) + bH = assemble_vector(rhs,VH) + IterativeSolvers.cg!(y,AH,bH;reltol=1.0e-06) + + return y +end + function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:prolongation},Val{true}},x::PVector) - fv_h, Uh, fv_h_red, Uh_red, model_h, model_h_red, glue = A.cache + cache_refine, cache_redist = A.cache + Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H = cache_refine + fv_h_red, Uh_red, model_h, model_h_red, glue = cache_redist # 1 - Solve c2f projection in coarse partition - mul!(fv_h,ref_op,x) + copy!(fv_H,x) # Matrix layout -> FE layout + uH = FEFunction(UH,fv_H,dv_H) + uh = interpolate!(uH,fv_h,Uh) # 2 - Redistribute from coarse partition to fine partition redistribute_free_values!(fv_h_red,Uh_red,fv_h,Uh,model_h_red,glue;reverse=false) - copy!(y,fv_h_red) + copy!(y,fv_h_red) # FE layout -> Matrix layout return y end -## C) Restriction (fine to coarse), with redistribution function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{true}},x::PVector) - fv_h, Uh, fv_h_red, Uh_red, model_h, model_h_red, glue = A.cache + cache_refine, cache_redist = A.cache + Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H = cache_refine + fv_h_red, Uh_red, model_h, model_h_red, glue = cache_redist - # 1 - Redistribute from coarse partition to fine partition + # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) redistribute_free_values!(fv_h,Uh,fv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Solve f2c projection in fine partition - mul!(y,ref_op,fv_h) + uh = FEFunction(Uh,fv_h,dv_h) + rhs(v) = lH(v,uh) + bH = assemble_vector(rhs,VH) + IterativeSolvers.cg!(y,AH,bH;reltol=1.0e-06) return y end From fdb1275482cc6ebb1bdbab438739c27edc7a573b Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 13 Dec 2022 18:32:01 +0100 Subject: [PATCH 34/95] TransferOperators are now working --- src/DistributedGridTransferOperators.jl | 155 ++++++++++++------ src/FESpaceHierarchies.jl | 29 +++- src/GridapSolvers.jl | 14 +- src/RedistributeTools.jl | 21 +-- test/mpi/InterGridTransferOperatorsTests.jl | 135 ++++++++------- test/mpi/ModelHierarchiesTests.jl | 6 +- .../OctreeDistributedDiscreteModelsTests.jl | 4 +- test/mpi/RefinementToolsTests.jl | 6 +- 8 files changed, 231 insertions(+), 139 deletions(-) diff --git a/src/DistributedGridTransferOperators.jl b/src/DistributedGridTransferOperators.jl index 7b4081d7..b5a2b5fa 100644 --- a/src/DistributedGridTransferOperators.jl +++ b/src/DistributedGridTransferOperators.jl @@ -2,7 +2,7 @@ struct DistributedGridTransferOperator{T,R,A,B} sh :: A - cache :: C + cache :: B function DistributedGridTransferOperator(op_type::Symbol,redist::Bool,sh::FESpaceHierarchy,cache) T = typeof(Val(op_type)) @@ -23,76 +23,114 @@ function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree:: @check lev < num_levels(mh) @check op_type ∈ [:restriction, :prolongation] + # Refinement + if (op_type == :restriction) + cache_refine = _get_restriction_cache(lev,sh,qdegree) + else + cache_refine = _get_prolongation_cache(lev,sh,qdegree) + end + + # Redistribution + redist = has_redistribution(mh,lev) + cache_redist = _get_redistribution_cache(lev,sh) + + cache = cache_refine, cache_redist + return DistributedGridTransferOperator(op_type,redist,sh,cache) +end + +function _get_prolongation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) + mh = sh.mh cparts = get_level_parts(mh,lev+1) - fparts = get_level_parts(mh,lev) - Uh = get_fe_space_before_redist(sh,lev) - Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) - fv_h = PVector(0.0,Uh.gids) - dv_h = get_dirichlet_dof_values(Uh) - - # Refinement if GridapP4est.i_am_in(cparts) + model_h = get_model_before_redist(mh,lev) + Uh = get_fe_space_before_redist(sh,lev) + fv_h = PVector(0.0,Uh.gids) + dv_h = get_dirichlet_dof_values(Uh) # Should this be zeros? + UH = get_fe_space(sh,lev+1) + fv_H = PVector(0.0,UH.gids) + dv_H = get_dirichlet_dof_values(UH) - if (op_type == :restriction) - ΩH = get_triangulation(UH,get_model(mh,lev+1)) - dΩH = Measure(ΩH,qdegree) - dΩhH = Measure(ΩH,Ωh,qdegree) + cache_refine = model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H + else + model_h = get_model_before_redist(mh,lev) + Uh = get_fe_space_before_redist(sh,lev) + cache_refine = model_h, Uh, nothing, nothing, nothing, nothing, nothing + end - aH(u,v) = ∫(v⋅u)*dΩH - lH(v,uh) = ∫(v⋅uh)*dΩhH - AH = assemble_matrix(aH,UH,VH) + return cache_refine +end - fv_H, dv_H = nothing, nothing - else - fv_H = PVector(0.0,UH.gids) - dv_H = get_dirichlet_dof_values(UH) +function _get_restriction_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) + mh = sh.mh + cparts = get_level_parts(mh,lev+1) - VH, AH, lH = nothing, nothing, nothing, nothing - end + if GridapP4est.i_am_in(cparts) + model_h = get_model_before_redist(mh,lev) + Uh = get_fe_space_before_redist(sh,lev) + Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) + fv_h = PVector(0.0,Uh.gids) + dv_h = get_dirichlet_dof_values(Uh) # Should this be zeros? + + UH = get_fe_space(sh,lev+1) + VH = get_test_space(UH) + ΩH = get_triangulation(UH,get_model(mh,lev+1)) + dΩH = Measure(ΩH,qdegree) + dΩhH = Measure(ΩH,Ωh,qdegree) + + aH(u,v) = ∫(v⋅u)*dΩH + lH(v,uh) = ∫(v⋅uh)*dΩhH + AH = assemble_matrix(aH,UH,VH) + xH = PVector(0.0,AH.rows) + + cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH else - VH, AH, lH, fv_H, dv_H = nothing, nothing, nothing, nothing, nothing, nothing + model_h = get_model_before_redist(mh,lev) + Uh = get_fe_space_before_redist(sh,lev) + cache_refine = model_h, Uh, nothing, nothing, nothing, nothing, nothing, nothing end - cache_refine = Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H - # Redistribution + return cache_refine +end + +function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy) + mh = sh.mh redist = has_redistribution(mh,lev) if redist Uh_red = get_fe_space(sh,lev) - model_h = get_model_before_redist(mh,lev) model_h_red = get_model(mh,lev) fv_h_red = PVector(0.0,Uh_red.gids) - glue = mh.levels[lev].red_glue + dv_h_red = get_dirichlet_dof_values(Uh_red) + glue = mh.levels[lev].red_glue - cache_redist = fv_h_red, Uh_red, model_h, model_h_red, glue + cache_redist = fv_h_red, dv_h_red, Uh_red, model_h_red, glue else cache_redist = nothing end - - cache = cache_refine, cache_redist - return DistributedGridTransferOperator(op_type,redist,sh,cache) + return cache_redist end function setup_transfer_operators(sh::FESpaceHierarchy, qdegree::Int) mh = sh.mh - restrictions = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) - interpolations = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) + restrictions = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) + prolongations = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) for lev in 1:num_levels(sh)-1 parts = get_level_parts(mh,lev) if GridapP4est.i_am_in(parts) - restrictions[lev] = RestrictionOperator(lev,sh,qdegree) - interpolations[lev] = ProlongationOperator(lev,sh,qdegree) + restrictions[lev] = RestrictionOperator(lev,sh,qdegree) + prolongations[lev] = ProlongationOperator(lev,sh,qdegree) end end - return restrictions, interpolations + return restrictions, prolongations end ### Applying the operators: +# A) Prolongation, without redistribution function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:prolongation},Val{false}},x::PVector) cache_refine, cache_redist = A.cache - Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H = cache_refine + model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine copy!(fv_H,x) # Matrix layout -> FE layout uH = FEFunction(UH,fv_H,dv_H) @@ -102,50 +140,59 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:p return y end +# B) Restriction, without redistribution function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{false}},x::PVector) cache_refine, cache_redist = A.cache - Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H = cache_refine + model_h, Uh, fv_h, dv_h, VH, AH, lH, xH = cache_refine copy!(fv_h,x) # Matrix layout -> FE layout uh = FEFunction(Uh,fv_h,dv_h) rhs(v) = lH(v,uh) - bH = assemble_vector(rhs,VH) - IterativeSolvers.cg!(y,AH,bH;reltol=1.0e-06) + bH = assemble_vector(rhs,VH) # Matrix layout + IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) + copy!(y,xH) # TO UNDERSTAND: Why can't we use directly y instead of xH? return y end -function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:prolongation},Val{true}},x::PVector) +# C) Prolongation, with redistribution +function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:prolongation},Val{true}},x::Union{PVector,Nothing}) cache_refine, cache_redist = A.cache - Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H = cache_refine - fv_h_red, Uh_red, model_h, model_h_red, glue = cache_redist + model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine + fv_h_red, dv_h_red, Uh_red, model_h_red, glue = cache_redist # 1 - Solve c2f projection in coarse partition - copy!(fv_H,x) # Matrix layout -> FE layout - uH = FEFunction(UH,fv_H,dv_H) - uh = interpolate!(uH,fv_h,Uh) + if !isa(x,Nothing) + copy!(fv_H,x) # Matrix layout -> FE layout + uH = FEFunction(UH,fv_H,dv_H) + uh = interpolate!(uH,fv_h,Uh) + end # 2 - Redistribute from coarse partition to fine partition - redistribute_free_values!(fv_h_red,Uh_red,fv_h,Uh,model_h_red,glue;reverse=false) + redistribute_free_values!(fv_h_red,Uh_red,fv_h,dv_h,Uh,model_h_red,glue;reverse=false) copy!(y,fv_h_red) # FE layout -> Matrix layout return y end -function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{true}},x::PVector) +# D) Restriction, with redistribution +function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true}},x::PVector) cache_refine, cache_redist = A.cache - Uh, fv_h, dv_h, VH, AH, lH, fv_H, dv_H = cache_refine - fv_h_red, Uh_red, model_h, model_h_red, glue = cache_redist + model_h, Uh, fv_h, dv_h, VH, AH, lH, xH = cache_refine + fv_h_red, dv_h_red, Uh_red, model_h_red, glue = cache_redist # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - redistribute_free_values!(fv_h,Uh,fv_h_red,Uh_red,model_h,glue;reverse=true) + redistribute_free_values!(fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Solve f2c projection in fine partition - uh = FEFunction(Uh,fv_h,dv_h) - rhs(v) = lH(v,uh) - bH = assemble_vector(rhs,VH) - IterativeSolvers.cg!(y,AH,bH;reltol=1.0e-06) + if !isa(y,Nothing) + uh = FEFunction(Uh,fv_h,dv_h) + rhs(v) = lH(v,uh) + bH = assemble_vector(rhs,VH) # Matrix layout + IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) + copy!(y,xH) + end return y end diff --git a/src/FESpaceHierarchies.jl b/src/FESpaceHierarchies.jl index de2b624e..d81b403d 100644 --- a/src/FESpaceHierarchies.jl +++ b/src/FESpaceHierarchies.jl @@ -55,12 +55,12 @@ function Gridap.FESpaces.TestFESpace( FESpaceHierarchyLevel(mh.level,Vh,Vh_red) end -function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchyLevel{A,Nothing}) where {A} +function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel{A,Nothing},u) where {A} Uh = TrialFESpace(a.fe_space,u) FESpaceHierarchyLevel(a.level,Uh,nothing) end -function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchyLevel{A,B}) where {A,B} +function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel{A,B},u) where {A,B} Uh = TrialFESpace(a.fe_space,u) Uh_red = TrialFESpace(a.fe_space_red,u) FESpaceHierarchyLevel(a.level,Uh,Uh_red) @@ -78,12 +78,12 @@ function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) where FESpaceHierarchy(mh,test_spaces) end -function Gridap.FESpaces.TrialFESpace(u,a::FESpaceHierarchy) +function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy,u) trial_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(a.mh)) for i = 1:num_levels(a.mh) parts = get_level_parts(a.mh,i) if (GridapP4est.i_am_in(parts)) - Uh = TrialFESpace(u,a[i]) + Uh = TrialFESpace(a[i],u) trial_spaces[i] = Uh end end @@ -101,3 +101,24 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) end FESpaceHierarchy(a.mh,trial_spaces) end + +function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,qdegree::Int) + nlevs = num_levels(trials) + mh = trials.mh + + mats = Vector{PSparseMatrix}(undef,nlevs) + for lev in 1:nlevs + parts = get_level_parts(mh,lev) + if GridapP4est.i_am_in(parts) + model = get_model(mh,lev) + U = get_fe_space(trials,lev) + V = get_test_space(U) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + ai(u,v) = a(u,v,dΩ) + A = assemble_matrix(ai,U,V) + mats[lev] = A + end + end + return mats +end diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index ce2f5bd5..0a6f0eac 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -3,6 +3,7 @@ module GridapSolvers using MPI using LinearAlgebra using FillArrays + using IterativeSolvers using Gridap using Gridap.Helpers using Gridap.Algebra @@ -13,6 +14,7 @@ module GridapSolvers using GridapDistributed using GridapP4est + import LinearAlgebra: mul! import GridapDistributed: local_views export change_parts, void @@ -25,19 +27,21 @@ module GridapSolvers export FESpaceHierarchy export get_fe_space, get_fe_space_before_redist + export compute_hierarchy_matrices - #export DistributedGridTransferOperator - #export RestrictionOperator, ProlongationOperator - #export setup_transfer_operators + export DistributedGridTransferOperator + export RestrictionOperator, ProlongationOperator + export setup_transfer_operators + export mul! include("PartitionedArraysExtensions.jl") include("GridapDistributedExtensions.jl") - include("GridapFixes.jl") + #include("GridapFixes.jl") include("RefinementTools.jl") include("RedistributeTools.jl") include("ModelHierarchies.jl") include("FESpaceHierarchies.jl") - #include("DistributedGridTransferOperators.jl") + include("DistributedGridTransferOperators.jl") end diff --git a/src/RedistributeTools.jl b/src/RedistributeTools.jl index e8b61697..93c0adb7 100644 --- a/src/RedistributeTools.jl +++ b/src/RedistributeTools.jl @@ -148,25 +148,26 @@ function redistribute_cell_dofs(cell_dof_values_old, return cell_dof_values_new end -function redistribute_free_values!(fv_new::PVector, - Uh_new::GridapDistributed.DistributedSingleFieldFESpace, - fv_old::PVector, - Uh_old::GridapDistributed.DistributedSingleFieldFESpace, +function redistribute_free_values!(fv_new::Union{PVector,Nothing}, + Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, + fv_old::Union{PVector,Nothing}, + dv_old::Union{AbstractPData,Nothing}, + Uh_old::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, model_new, glue::GridapDistributed.RedistributeGlue; reverse=false) - uh_old = FEFunction(Uh_old,fv_old) - cell_dof_values_old = map_parts(get_cell_dof_values,local_views(uh_old)) - cell_dof_ids_new = map_parts(get_cell_dof_ids, local_views(Uh_new)) + cell_dof_values_old = !isa(fv_old,Nothing) ? map_parts(scatter_free_and_dirichlet_values,local_views(Uh_old),local_views(fv_old),dv_old) : nothing + cell_dof_ids_new = !isa(fv_new,Nothing) ? map_parts(get_cell_dof_ids, local_views(Uh_new)) : nothing cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) - # Assemble the new FEFunction - Gridap.FESpaces.gather_free_values!(fv_new,local_views(Uh_new),cell_dof_values_new) + # Gather the new free dofs + if !isa(fv_new,Nothing) + Gridap.FESpaces.gather_free_values!(fv_new,Uh_new,cell_dof_values_new) + end return fv_new end - function redistribute_fe_function(uh_old::Union{GridapDistributed.DistributedSingleFieldFEFunction,Nothing}, Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, model_new, diff --git a/test/mpi/InterGridTransferOperatorsTests.jl b/test/mpi/InterGridTransferOperatorsTests.jl index ddce7e81..7642876c 100644 --- a/test/mpi/InterGridTransferOperatorsTests.jl +++ b/test/mpi/InterGridTransferOperatorsTests.jl @@ -1,67 +1,86 @@ module InterGridTransferOperatorsTests - """ - using MPI - using PartitionedArrays - using Gridap - using GridapDistributed - using GridapP4est - using GridapSolvers - using Test - - u(x) = x[1] + x[2] - - function model_hierarchy_free!(mh::ModelHierarchy) - for lev in 1:num_levels(mh) - model = get_model(mh,lev) - isa(model,DistributedRefinedDiscreteModel) && (model = model.model) - octree_distributed_discrete_model_free!(model) - end +using MPI +using PartitionedArrays +using Gridap +using GridapDistributed +using GridapP4est +using GridapSolvers +using Test + +function model_hierarchy_free!(mh::ModelHierarchy) + for lev in 1:num_levels(mh) + model = get_model(mh,lev) + isa(model,DistributedRefinedDiscreteModel) && (model = model.model) + octree_distributed_discrete_model_free!(model) end +end + +function run(parts,num_parts_x_level,num_trees,num_refs_coarse) + domain = (0,1,0,1) + cmodel = CartesianDiscreteModel(domain,num_trees) + + num_levels = length(num_parts_x_level) + level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) + + + old_parts = level_parts[2] + new_parts = level_parts[1] + + # Create Operators: + order = 1 + u(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,order) + + tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + qdegree = order*2+1 + ops = setup_transfer_operators(trials, qdegree) + restrictions, prolongations = ops - function run(parts,num_parts_x_level,num_trees,num_refs_coarse) - num_levels = length(num_parts_x_level) - domain = (0,1,0,1) - cmodel = CartesianDiscreteModel(domain,num_trees) - level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) - - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - println(typeof(level_parts[1])) - - # FE Spaces - println(" > Testing FESpaces") - order = 1 - reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") - trials = TrialFESpace(u,tests) - - # Transfer ops - println(" > Testing operators") - qdegree = 2 - for lev in 1:num_levels-1 - println(" > Level num ", lev) - parts = get_level_parts(mh,lev) - if GridapP4est.i_am_in(parts) - R = RestrictionOperator(lev,trials,qdegree) - P = ProlongationOperator(lev,trials,qdegree) - @test isa(R,DistributedGridTransferOperator{Val{:restriction},Val{true}}) - @test isa(P,DistributedGridTransferOperator{Val{:prolongation},Val{true}}) + a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ + mats = compute_hierarchy_matrices(trials,a,qdegree) + + for lev in 1:num_levels-1 + parts_h = get_level_parts(mh,lev) + parts_H = get_level_parts(mh,lev+1) + + if GridapP4est.i_am_in(parts_h) + GridapP4est.i_am_main(parts_h) && println("Lev : ", lev) + Ah = mats[lev] + xh = PVector(1.0,Ah.cols) + yh = PVector(0.0,Ah.rows) + + if GridapP4est.i_am_in(parts_H) + AH = mats[lev+1] + xH = PVector(1.0,AH.cols) + yH = PVector(0.0,AH.rows) + else + xH = nothing + yH = nothing end - end - println(" > Testing setup_transfer_operators") - ops = setup_transfer_operators(trials,qdegree) + GridapP4est.i_am_main(parts_h) && println(" > Restriction") + R = restrictions[lev] + mul!(yH,R,xh) - #model_hierarchy_free!(mh) + GridapP4est.i_am_main(parts_h) && println(" > Prolongation") + P = prolongations[lev] + mul!(yh,P,xH) + end end - num_parts_x_level = [4,2,2] # Procs in each refinement level - num_trees = (1,1) # Number of initial P4est trees - num_refs_coarse = 2 # Number of initial refinements - - ranks = num_parts_x_level[1] - prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) - MPI.Finalize() - """ + #model_hierarchy_free!(mh) +end + + +num_parts_x_level = [4,2,2] # Procs in each refinement level +num_trees = (1,1) # Number of initial P4est trees +num_refs_coarse = 2 # Number of initial refinements + +ranks = num_parts_x_level[1] +prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) +MPI.Finalize() end diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index 19c489c4..e1cebc50 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -28,7 +28,7 @@ function main(parts,num_parts_x_level,num_trees,num_refs_coarse) sol(x) = x[1] + x[2] reffe = ReferenceFE(lagrangian,Float64,1) tests = TestFESpace(mh,reffe,conformity=:H1) - trials = TrialFESpace(sol,tests) + trials = TrialFESpace(tests,sol) # model_hierarchy_free!(mh) end @@ -38,7 +38,7 @@ num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] -#prun(main,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) -#MPI.Finalize() +prun(main,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) +MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/OctreeDistributedDiscreteModelsTests.jl b/test/mpi/OctreeDistributedDiscreteModelsTests.jl index b9050ed8..b09fd6df 100644 --- a/test/mpi/OctreeDistributedDiscreteModelsTests.jl +++ b/test/mpi/OctreeDistributedDiscreteModelsTests.jl @@ -41,6 +41,6 @@ module OctreeDistributedDiscreteModelsTests assemble_matrix(a,Uh,Vh) end - #prun(run,mpi,4,(2,2),[4,2]) - #MPI.Finalize() + prun(run,mpi,4,(2,2),[4,2]) + MPI.Finalize() end # module diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index e40cff59..60bef53e 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -22,7 +22,7 @@ module RefinementToolsTests sol(x) = x[1] + x[2] reffe = ReferenceFE(lagrangian,Float64,order) tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") - trials = TrialFESpace(sol,tests) + trials = TrialFESpace(tests,sol) quad_order = 2*order+1 for lev in 1:nlevs-1 @@ -81,6 +81,6 @@ module RefinementToolsTests num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] - #prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) - #MPI.Finalize() + prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) + MPI.Finalize() end From 77c618300085ff92e08b4b5d1ab5ca90b83a63b8 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 13 Dec 2022 22:20:18 +0100 Subject: [PATCH 35/95] Moved code around, created submodules --- src/GridapSolvers.jl | 41 ++--- src/LinearSolvers/LinearSolvers.jl | 5 + .../DistributedGridTransferOperators.jl | 0 .../FESpaceHierarchies.jl | 0 .../GridapDistributedExtensions.jl | 0 src/{ => MultilevelTools}/GridapFixes.jl | 0 src/{ => MultilevelTools}/ModelHierarchies.jl | 0 src/MultilevelTools/MultilevelTools.jl | 53 ++++++ .../PartitionedArraysExtensions.jl | 0 .../RedistributeTools.jl | 0 src/{ => MultilevelTools}/RefinementTools.jl | 0 ... DistributedGridTransferOperatorsTests.jl} | 8 +- test/mpi/ModelHierarchiesTests.jl | 6 +- .../OctreeDistributedDiscreteModelsTests.jl | 78 +++++---- test/mpi/RedistributeToolsTests.jl | 158 ++++++++--------- test/mpi/RefinementToolsTests.jl | 162 +++++++++--------- test/runtests.jl | 2 +- 17 files changed, 280 insertions(+), 233 deletions(-) create mode 100644 src/LinearSolvers/LinearSolvers.jl rename src/{ => MultilevelTools}/DistributedGridTransferOperators.jl (100%) rename src/{ => MultilevelTools}/FESpaceHierarchies.jl (100%) rename src/{ => MultilevelTools}/GridapDistributedExtensions.jl (100%) rename src/{ => MultilevelTools}/GridapFixes.jl (100%) rename src/{ => MultilevelTools}/ModelHierarchies.jl (100%) create mode 100644 src/MultilevelTools/MultilevelTools.jl rename src/{ => MultilevelTools}/PartitionedArraysExtensions.jl (100%) rename src/{ => MultilevelTools}/RedistributeTools.jl (100%) rename src/{ => MultilevelTools}/RefinementTools.jl (100%) rename test/mpi/{InterGridTransferOperatorsTests.jl => DistributedGridTransferOperatorsTests.jl} (93%) diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 0a6f0eac..1f882936 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -1,29 +1,17 @@ module GridapSolvers - using MPI - using LinearAlgebra - using FillArrays - using IterativeSolvers - using Gridap - using Gridap.Helpers - using Gridap.Algebra - using Gridap.Geometry - using Gridap.FESpaces - using Gridap.Adaptivity - using PartitionedArrays - using GridapDistributed - using GridapP4est - - import LinearAlgebra: mul! - import GridapDistributed: local_views - - export change_parts, void - - export DistributedRefinedDiscreteModel + include("MultilevelTools/MultilevelTools.jl") + include("LinearSolvers/LinearSolvers.jl") + + using GridapSolvers.MultilevelTools + using GridapSolvers.LinearSolvers + + # MultilevelTools + export get_parts, generate_level_parts export ModelHierarchy export num_levels, get_level, get_level_parts - export get_model, get_model_before_redist, has_refinement, has_redistribution + export get_model, get_model_before_redist export FESpaceHierarchy export get_fe_space, get_fe_space_before_redist @@ -32,16 +20,7 @@ module GridapSolvers export DistributedGridTransferOperator export RestrictionOperator, ProlongationOperator export setup_transfer_operators - export mul! - - include("PartitionedArraysExtensions.jl") - include("GridapDistributedExtensions.jl") - #include("GridapFixes.jl") - include("RefinementTools.jl") - include("RedistributeTools.jl") - include("ModelHierarchies.jl") - include("FESpaceHierarchies.jl") - include("DistributedGridTransferOperators.jl") + # LinearSolvers end diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl new file mode 100644 index 00000000..0197aed8 --- /dev/null +++ b/src/LinearSolvers/LinearSolvers.jl @@ -0,0 +1,5 @@ +module LinearSolvers + +using GridapSolvers.MultilevelTools + +end \ No newline at end of file diff --git a/src/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl similarity index 100% rename from src/DistributedGridTransferOperators.jl rename to src/MultilevelTools/DistributedGridTransferOperators.jl diff --git a/src/FESpaceHierarchies.jl b/src/MultilevelTools/FESpaceHierarchies.jl similarity index 100% rename from src/FESpaceHierarchies.jl rename to src/MultilevelTools/FESpaceHierarchies.jl diff --git a/src/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl similarity index 100% rename from src/GridapDistributedExtensions.jl rename to src/MultilevelTools/GridapDistributedExtensions.jl diff --git a/src/GridapFixes.jl b/src/MultilevelTools/GridapFixes.jl similarity index 100% rename from src/GridapFixes.jl rename to src/MultilevelTools/GridapFixes.jl diff --git a/src/ModelHierarchies.jl b/src/MultilevelTools/ModelHierarchies.jl similarity index 100% rename from src/ModelHierarchies.jl rename to src/MultilevelTools/ModelHierarchies.jl diff --git a/src/MultilevelTools/MultilevelTools.jl b/src/MultilevelTools/MultilevelTools.jl new file mode 100644 index 00000000..9d4f2f32 --- /dev/null +++ b/src/MultilevelTools/MultilevelTools.jl @@ -0,0 +1,53 @@ +module MultilevelTools + +using MPI +using LinearAlgebra +using FillArrays +using IterativeSolvers +using Gridap +using Gridap.Helpers +using Gridap.Algebra +using Gridap.Geometry +using Gridap.FESpaces +using Gridap.Adaptivity +using PartitionedArrays +using GridapDistributed +using GridapP4est + +import LinearAlgebra: mul! +import GridapDistributed: local_views +import GridapP4est: i_am_in, i_am_main + + +export change_parts +export generate_level_parts + +export redistribute_fe_function +export redistribute_free_values! + +export ModelHierarchy +export num_levels, get_level, get_level_parts +export get_model, get_model_before_redist, has_refinement, has_redistribution + +export FESpaceHierarchy +export get_fe_space, get_fe_space_before_redist +export compute_hierarchy_matrices + +export DistributedGridTransferOperator +export RestrictionOperator, ProlongationOperator +export setup_transfer_operators +export mul! + +include("PartitionedArraysExtensions.jl") +include("GridapDistributedExtensions.jl") +#include("GridapFixes.jl") +include("RefinementTools.jl") +include("RedistributeTools.jl") +include("ModelHierarchies.jl") +include("FESpaceHierarchies.jl") +include("DistributedGridTransferOperators.jl") + + + +end + diff --git a/src/PartitionedArraysExtensions.jl b/src/MultilevelTools/PartitionedArraysExtensions.jl similarity index 100% rename from src/PartitionedArraysExtensions.jl rename to src/MultilevelTools/PartitionedArraysExtensions.jl diff --git a/src/RedistributeTools.jl b/src/MultilevelTools/RedistributeTools.jl similarity index 100% rename from src/RedistributeTools.jl rename to src/MultilevelTools/RedistributeTools.jl diff --git a/src/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl similarity index 100% rename from src/RefinementTools.jl rename to src/MultilevelTools/RefinementTools.jl diff --git a/test/mpi/InterGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl similarity index 93% rename from test/mpi/InterGridTransferOperatorsTests.jl rename to test/mpi/DistributedGridTransferOperatorsTests.jl index 7642876c..7bb12de5 100644 --- a/test/mpi/InterGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -1,12 +1,14 @@ -module InterGridTransferOperatorsTests +module DistributedGridTransferOperatorsTests using MPI using PartitionedArrays using Gridap using GridapDistributed using GridapP4est -using GridapSolvers using Test +using GridapSolvers +using GridapSolvers.MultilevelTools + function model_hierarchy_free!(mh::ModelHierarchy) for lev in 1:num_levels(mh) model = get_model(mh,lev) @@ -20,7 +22,7 @@ function run(parts,num_parts_x_level,num_trees,num_refs_coarse) cmodel = CartesianDiscreteModel(domain,num_trees) num_levels = length(num_parts_x_level) - level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) + level_parts = generate_level_parts(parts,num_parts_x_level) coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) mh = ModelHierarchy(coarse_model,level_parts) diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index e1cebc50..d9e70677 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -5,9 +5,11 @@ using Gridap using Gridap.FESpaces using GridapDistributed using PartitionedArrays -using GridapSolvers using GridapP4est +using GridapSolvers +using GridapSolvers.MultilevelTools + function model_hierarchy_free!(mh::ModelHierarchy) for lev in 1:num_levels(mh) model = get_model(mh,lev) @@ -21,7 +23,7 @@ function main(parts,num_parts_x_level,num_trees,num_refs_coarse) cmodel = CartesianDiscreteModel(domain,num_trees) num_levels = length(num_parts_x_level) - level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) + level_parts = generate_level_parts(parts,num_parts_x_level) coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) mh = ModelHierarchy(coarse_model,level_parts) diff --git a/test/mpi/OctreeDistributedDiscreteModelsTests.jl b/test/mpi/OctreeDistributedDiscreteModelsTests.jl index b09fd6df..af089cb7 100644 --- a/test/mpi/OctreeDistributedDiscreteModelsTests.jl +++ b/test/mpi/OctreeDistributedDiscreteModelsTests.jl @@ -1,46 +1,48 @@ module OctreeDistributedDiscreteModelsTests - using MPI - using Test - using Gridap - using Gridap.ReferenceFEs - using Gridap.FESpaces - using PartitionedArrays - using GridapDistributed - using GridapP4est - using GridapSolvers +using MPI +using Test +using Gridap +using Gridap.ReferenceFEs +using Gridap.FESpaces +using PartitionedArrays +using GridapDistributed +using GridapP4est - function run(parts,subdomains,num_parts_x_level) - if length(subdomains) == 2 - domain=(0,1,0,1) - else - @assert length(subdomains) == 3 - domain=(0,1,0,1,0,1) - end +using GridapSolvers +using GridapSolvers.MultilevelTools - # Generate model - level_parts = GridapP4est.generate_level_parts(parts,num_parts_x_level) - coarse_model = CartesianDiscreteModel(domain,subdomains) - model = GridapP4est.OctreeDistributedDiscreteModel(level_parts[2],coarse_model,1) +function run(parts,subdomains,num_parts_x_level) + if length(subdomains) == 2 + domain=(0,1,0,1) + else + @assert length(subdomains) == 3 + domain=(0,1,0,1,0,1) + end - # Refining and distributing - fmodel1 , rglue1 = refine(model,level_parts[1]) - dfmodel1, dglue1 = redistribute(fmodel1) + # Generate model + level_parts = generate_level_parts(parts,num_parts_x_level) + coarse_model = CartesianDiscreteModel(domain,subdomains) + model = GridapP4est.OctreeDistributedDiscreteModel(level_parts[2],coarse_model,1) - fmodel2 , rglue2 = refine(model) - dfmodel2, dglue2 = redistribute(fmodel2,level_parts[1]) + # Refining and distributing + fmodel1 , rglue1 = refine(model,level_parts[1]) + dfmodel1, dglue1 = redistribute(fmodel1) - # FESpaces tests - sol(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,1) - Vh = TestFESpace(dfmodel2, reffe; conformity=:H1) - Uh = TrialFESpace(sol,Vh) - Ω = Triangulation(dfmodel2) - dΩ = Measure(Ω,3) - - a(u,v) = ∫(v⋅u)*dΩ - assemble_matrix(a,Uh,Vh) - end + fmodel2 , rglue2 = refine(model) + dfmodel2, dglue2 = redistribute(fmodel2,level_parts[1]) + + # FESpaces tests + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,1) + Vh = TestFESpace(dfmodel2, reffe; conformity=:H1) + Uh = TrialFESpace(sol,Vh) + Ω = Triangulation(dfmodel2) + dΩ = Measure(Ω,3) + + a(u,v) = ∫(v⋅u)*dΩ + assemble_matrix(a,Uh,Vh) +end - prun(run,mpi,4,(2,2),[4,2]) - MPI.Finalize() +prun(run,mpi,4,(2,2),[4,2]) +MPI.Finalize() end # module diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index 8607d4cd..971e4993 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -1,94 +1,96 @@ module RedistributeToolsTests - using MPI - using PartitionedArrays - using Gridap - using GridapDistributed - using GridapP4est - using GridapSolvers - using Test +using MPI +using PartitionedArrays +using Gridap +using GridapDistributed +using GridapP4est +using Test - function model_hierarchy_free!(mh::ModelHierarchy) - for lev in 1:num_levels(mh) - model = get_model(mh,lev) - isa(model,DistributedRefinedDiscreteModel) && (model = model.model) - octree_distributed_discrete_model_free!(model) - end - end +using GridapSolvers +using GridapSolvers.MultilevelTools - function run(parts,num_parts_x_level,num_trees,num_refs_coarse) - domain = (0,1,0,1) - cmodel = CartesianDiscreteModel(domain,num_trees) - - num_levels = length(num_parts_x_level) - level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) +function model_hierarchy_free!(mh::ModelHierarchy) + for lev in 1:num_levels(mh) + model = get_model(mh,lev) + isa(model,DistributedRefinedDiscreteModel) && (model = model.model) + octree_distributed_discrete_model_free!(model) + end +end - old_parts = level_parts[2] - new_parts = level_parts[1] +function run(parts,num_parts_x_level,num_trees,num_refs_coarse) + domain = (0,1,0,1) + cmodel = CartesianDiscreteModel(domain,num_trees) + + num_levels = length(num_parts_x_level) + level_parts = generate_level_parts(parts,num_parts_x_level) + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) - # FE Spaces - order = 1 - u(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,order) - glue = mh.levels[1].red_glue + old_parts = level_parts[2] + new_parts = level_parts[1] - model_old = get_model_before_redist(mh.levels[1]) - VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") - UOLD = TrialFESpace(VOLD,u) + # FE Spaces + order = 1 + u(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,order) + glue = mh.levels[1].red_glue - model_new = get_model(mh.levels[1]) - VNEW = TestFESpace(model_new,reffe,dirichlet_tags="boundary") - UNEW = TrialFESpace(VNEW,u) + model_old = get_model_before_redist(mh.levels[1]) + VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") + UOLD = TrialFESpace(VOLD,u) - # Triangulations - qdegree = 2*order+1 - Ω_new = Triangulation(model_new) - dΩ_new = Measure(Ω_new,qdegree) - uh_new = interpolate(u,UNEW) + model_new = get_model(mh.levels[1]) + VNEW = TestFESpace(model_new,reffe,dirichlet_tags="boundary") + UNEW = TrialFESpace(VNEW,u) - if GridapP4est.i_am_in(old_parts) - Ω_old = Triangulation(model_old) - dΩ_old = Measure(Ω_old,qdegree) - uh_old = interpolate(u,UOLD) - else - Ω_old = nothing - dΩ_old = nothing - uh_old = nothing - end + # Triangulations + qdegree = 2*order+1 + Ω_new = Triangulation(model_new) + dΩ_new = Measure(Ω_new,qdegree) + uh_new = interpolate(u,UNEW) - # Old -> New - uh_old_red = GridapSolvers.redistribute_fe_function(uh_old, - UNEW, - model_new, - glue) - n = sum(∫(uh_old_red)*dΩ_new) - if GridapP4est.i_am_in(old_parts) - o = sum(∫(uh_old)*dΩ_old) - @test o ≈ n - end + if GridapP4est.i_am_in(old_parts) + Ω_old = Triangulation(model_old) + dΩ_old = Measure(Ω_old,qdegree) + uh_old = interpolate(u,UOLD) + else + Ω_old = nothing + dΩ_old = nothing + uh_old = nothing + end - # New -> Old - uh_new_red = GridapSolvers.redistribute_fe_function(uh_new, - UOLD, - model_old, - glue; - reverse=true) - n = sum(∫(uh_new)*dΩ_new) - if GridapP4est.i_am_in(old_parts) - o = sum(∫(uh_new_red)*dΩ_old) - @test o ≈ n - end + # Old -> New + uh_old_red = redistribute_fe_function(uh_old, + UNEW, + model_new, + glue) + n = sum(∫(uh_old_red)*dΩ_new) + if GridapP4est.i_am_in(old_parts) + o = sum(∫(uh_old)*dΩ_old) + @test o ≈ n + end - #model_hierarchy_free!(mh) + # New -> Old + uh_new_red = redistribute_fe_function(uh_new, + UOLD, + model_old, + glue; + reverse=true) + n = sum(∫(uh_new)*dΩ_new) + if GridapP4est.i_am_in(old_parts) + o = sum(∫(uh_new_red)*dΩ_old) + @test o ≈ n end + #model_hierarchy_free!(mh) +end - num_parts_x_level = [4,2,2] # Procs in each refinement level - num_trees = (1,1) # Number of initial P4est trees - num_refs_coarse = 2 # Number of initial refinements - - ranks = num_parts_x_level[1] - prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) - MPI.Finalize() + +num_parts_x_level = [4,2,2] # Procs in each refinement level +num_trees = (1,1) # Number of initial P4est trees +num_refs_coarse = 2 # Number of initial refinements + +ranks = num_parts_x_level[1] +prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) +MPI.Finalize() end diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 60bef53e..4e5d3b0b 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -1,86 +1,88 @@ module RefinementToolsTests - using MPI - using PartitionedArrays - using Gridap - using GridapDistributed - using GridapP4est - using GridapSolvers - using Test - using IterativeSolvers - - function run(parts,num_parts_x_level,num_trees,num_refs_coarse) - domain = (0,1,0,1) - cmodel = CartesianDiscreteModel(domain,num_trees) - - nlevs = length(num_parts_x_level) - level_parts = GridapSolvers.generate_level_parts(parts,num_parts_x_level) - coarse_model = OctreeDistributedDiscreteModel(level_parts[nlevs],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) - - # FE Spaces - order = 1 - sol(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") - trials = TrialFESpace(tests,sol) - - quad_order = 2*order+1 - for lev in 1:nlevs-1 - fparts = get_level_parts(mh,lev) - cparts = get_level_parts(mh,lev+1) - - if GridapP4est.i_am_in(cparts) - Vh = get_fe_space_before_redist(tests,lev) - Uh = get_fe_space_before_redist(trials,lev) - Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) - dΩh = Measure(Ωh,quad_order) - uh = interpolate(sol,Uh) - - VH = get_fe_space(tests,lev+1) - UH = get_fe_space(trials,lev+1) - ΩH = get_triangulation(UH,get_model(mh,lev+1)) - dΩH = Measure(ΩH,quad_order) - uH = interpolate(sol,UH) - dΩhH = Measure(ΩH,Ωh,quad_order) - - # Coarse FEFunction -> Fine FEFunction, by projection - ah(u,v) = ∫(v⋅u)*dΩh - lh(v) = ∫(v⋅uH)*dΩh - Ah = assemble_matrix(ah,Uh,Vh) - bh = assemble_vector(lh,Vh) - - xh = PVector(0.0,Ah.cols) - IterativeSolvers.cg!(xh,Ah,bh;verbose=i_am_main(parts),reltol=1.0e-06) - uH_projected = FEFunction(Uh,xh) - - _eh = uh-uH_projected - eh = sum(∫(_eh⋅_eh)*dΩh) - i_am_main(parts) && println("Error H2h: ", eh) - - # Fine FEFunction -> Coarse FEFunction, by projection - aH(u,v) = ∫(v⋅u)*dΩH - lH(v) = ∫(v⋅uH_projected)*dΩhH - AH = assemble_matrix(aH,UH,VH) - bH = assemble_vector(lH,VH) - - xH = PVector(0.0,AH.cols) - IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-06) - uh_projected = FEFunction(UH,xH) - - _eH = uH-uh_projected - eH = sum(∫(_eH⋅_eH)*dΩH) - i_am_main(parts) && println("Error h2H: ", eH) - end - end +using MPI +using PartitionedArrays +using Gridap +using GridapDistributed +using GridapP4est +using Test +using IterativeSolvers + +using GridapSolvers +using GridapSolvers.MultilevelTools + +function run(parts,num_parts_x_level,num_trees,num_refs_coarse) + domain = (0,1,0,1) + cmodel = CartesianDiscreteModel(domain,num_trees) + + nlevs = length(num_parts_x_level) + level_parts = generate_level_parts(parts,num_parts_x_level) + coarse_model = OctreeDistributedDiscreteModel(level_parts[nlevs],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) + + # FE Spaces + order = 1 + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,sol) + + quad_order = 3*order+1 + for lev in 1:nlevs-1 + fparts = get_level_parts(mh,lev) + cparts = get_level_parts(mh,lev+1) + + if GridapP4est.i_am_in(cparts) + Vh = get_fe_space_before_redist(tests,lev) + Uh = get_fe_space_before_redist(trials,lev) + Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) + dΩh = Measure(Ωh,quad_order) + uh = interpolate(sol,Uh) + + VH = get_fe_space(tests,lev+1) + UH = get_fe_space(trials,lev+1) + ΩH = get_triangulation(UH,get_model(mh,lev+1)) + dΩH = Measure(ΩH,quad_order) + uH = interpolate(sol,UH) + dΩhH = Measure(ΩH,Ωh,quad_order) + + # Coarse FEFunction -> Fine FEFunction, by projection + ah(u,v) = ∫(v⋅u)*dΩh + lh(v) = ∫(v⋅uH)*dΩh + Ah = assemble_matrix(ah,Uh,Vh) + bh = assemble_vector(lh,Vh) + xh = PVector(0.0,Ah.cols) + IterativeSolvers.cg!(xh,Ah,bh;verbose=i_am_main(parts),reltol=1.0e-08) + uH_projected = FEFunction(Uh,xh) + + _eh = uh-uH_projected + eh = sum(∫(_eh⋅_eh)*dΩh) + i_am_main(parts) && println("Error H2h: ", eh) + + # Fine FEFunction -> Coarse FEFunction, by projection + aH(u,v) = ∫(v⋅u)*dΩH + lH(v) = ∫(v⋅uH_projected)*dΩhH + AH = assemble_matrix(aH,UH,VH) + bH = assemble_vector(lH,VH) + + xH = PVector(0.0,AH.cols) + IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-08) + uh_projected = FEFunction(UH,xH) + + _eH = uH-uh_projected + eH = sum(∫(_eH⋅_eH)*dΩH) + i_am_main(parts) && println("Error h2H: ", eH) + end end +end + + +num_parts_x_level = [4,2,2] # Procs in each refinement level +num_trees = (1,1) # Number of initial P4est trees +num_refs_coarse = 2 # Number of initial refinements - num_parts_x_level = [4,2,2] # Procs in each refinement level - num_trees = (1,1) # Number of initial P4est trees - num_refs_coarse = 2 # Number of initial refinements - - ranks = num_parts_x_level[1] - prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) - MPI.Finalize() +ranks = num_parts_x_level[1] +prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) +MPI.Finalize() end diff --git a/test/runtests.jl b/test/runtests.jl index 1a3e29f6..21aeb338 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -28,7 +28,7 @@ function run_tests(testdir) testfiles = sort(filter(istest, readdir(testdir))) @time @testset "$f" for f in testfiles MPI.mpiexec() do cmd - if f in ["RedistributeToolsTests.jl","RefinementToolsTests","OctreeDistributedDiscreteModelsTests"] + if f in ["DistributedGridTransferOperatorTests.jl","RedistributeToolsTests.jl","RefinementToolsTests","OctreeDistributedDiscreteModelsTests"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] From 2216aba440a7ff828f4d4c3b0b812ee8bc36c872 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 13 Dec 2022 23:40:36 +0100 Subject: [PATCH 36/95] bugfix in runtests.jl --- test/runtests.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/runtests.jl b/test/runtests.jl index 21aeb338..0308cedf 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -28,7 +28,7 @@ function run_tests(testdir) testfiles = sort(filter(istest, readdir(testdir))) @time @testset "$f" for f in testfiles MPI.mpiexec() do cmd - if f in ["DistributedGridTransferOperatorTests.jl","RedistributeToolsTests.jl","RefinementToolsTests","OctreeDistributedDiscreteModelsTests"] + if f in ["DistributedGridTransferOperatorsTests.jl","RedistributeToolsTests.jl","RefinementToolsTests","OctreeDistributedDiscreteModelsTests"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] From 6e8aaa364776978ef314377a867ab66dea853d7b Mon Sep 17 00:00:00 2001 From: "Alberto F. Martin" Date: Wed, 14 Dec 2022 10:01:25 +1100 Subject: [PATCH 37/95] Removed OctreeDistributedDiscreteModelsTests.jl. It does not belong here. --- .../OctreeDistributedDiscreteModelsTests.jl | 48 ------------------- test/runtests.jl | 2 +- 2 files changed, 1 insertion(+), 49 deletions(-) delete mode 100644 test/mpi/OctreeDistributedDiscreteModelsTests.jl diff --git a/test/mpi/OctreeDistributedDiscreteModelsTests.jl b/test/mpi/OctreeDistributedDiscreteModelsTests.jl deleted file mode 100644 index af089cb7..00000000 --- a/test/mpi/OctreeDistributedDiscreteModelsTests.jl +++ /dev/null @@ -1,48 +0,0 @@ -module OctreeDistributedDiscreteModelsTests -using MPI -using Test -using Gridap -using Gridap.ReferenceFEs -using Gridap.FESpaces -using PartitionedArrays -using GridapDistributed -using GridapP4est - -using GridapSolvers -using GridapSolvers.MultilevelTools - -function run(parts,subdomains,num_parts_x_level) - if length(subdomains) == 2 - domain=(0,1,0,1) - else - @assert length(subdomains) == 3 - domain=(0,1,0,1,0,1) - end - - # Generate model - level_parts = generate_level_parts(parts,num_parts_x_level) - coarse_model = CartesianDiscreteModel(domain,subdomains) - model = GridapP4est.OctreeDistributedDiscreteModel(level_parts[2],coarse_model,1) - - # Refining and distributing - fmodel1 , rglue1 = refine(model,level_parts[1]) - dfmodel1, dglue1 = redistribute(fmodel1) - - fmodel2 , rglue2 = refine(model) - dfmodel2, dglue2 = redistribute(fmodel2,level_parts[1]) - - # FESpaces tests - sol(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,1) - Vh = TestFESpace(dfmodel2, reffe; conformity=:H1) - Uh = TrialFESpace(sol,Vh) - Ω = Triangulation(dfmodel2) - dΩ = Measure(Ω,3) - - a(u,v) = ∫(v⋅u)*dΩ - assemble_matrix(a,Uh,Vh) -end - -prun(run,mpi,4,(2,2),[4,2]) -MPI.Finalize() -end # module diff --git a/test/runtests.jl b/test/runtests.jl index 0308cedf..82a02839 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -28,7 +28,7 @@ function run_tests(testdir) testfiles = sort(filter(istest, readdir(testdir))) @time @testset "$f" for f in testfiles MPI.mpiexec() do cmd - if f in ["DistributedGridTransferOperatorsTests.jl","RedistributeToolsTests.jl","RefinementToolsTests","OctreeDistributedDiscreteModelsTests"] + if f in ["DistributedGridTransferOperatorsTests.jl","RedistributeToolsTests.jl","RefinementToolsTests"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] From bf7d84282974459c49787aa1d451e8ff866b5231 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 14 Dec 2022 12:22:41 +0100 Subject: [PATCH 38/95] Added RichardsonSmoothers --- src/LinearSolvers/JacobiLinearSolvers.jl | 50 ++++++++++++++++ src/LinearSolvers/LinearSolvers.jl | 14 +++++ src/LinearSolvers/RichardsonSmoothers.jl | 73 ++++++++++++++++++++++++ test/mpi/RichardsonSmoothersTests.jl | 63 ++++++++++++++++++++ test/runtests.jl | 11 +++- 5 files changed, 208 insertions(+), 3 deletions(-) create mode 100644 src/LinearSolvers/JacobiLinearSolvers.jl create mode 100644 src/LinearSolvers/RichardsonSmoothers.jl create mode 100644 test/mpi/RichardsonSmoothersTests.jl diff --git a/src/LinearSolvers/JacobiLinearSolvers.jl b/src/LinearSolvers/JacobiLinearSolvers.jl new file mode 100644 index 00000000..efe6ca63 --- /dev/null +++ b/src/LinearSolvers/JacobiLinearSolvers.jl @@ -0,0 +1,50 @@ +struct JacobiLinearSolver <: Gridap.Algebra.LinearSolver +end + +struct JacobiSymbolicSetup <: Gridap.Algebra.SymbolicSetup +end + +function Gridap.Algebra.symbolic_setup(s::JacobiLinearSolver,A::AbstractMatrix) + JacobiSymbolicSetup() +end + +mutable struct JacobiNumericalSetup{A} <: Gridap.Algebra.NumericalSetup + inv_diag :: A +end + +function Gridap.Algebra.numerical_setup(ss::JacobiSymbolicSetup,A::AbstractMatrix) + inv_diag = 1.0./diag(A) + return JacobiNumericalSetup(inv_diag) +end + +function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::AbstractMatrix) + ns.inv_diag .= 1.0 ./ diag(a) +end + +function Gridap.Algebra.numerical_setup(ss::JacobiSymbolicSetup,A::PSparseMatrix) + inv_diag=map_parts(A.owned_owned_values) do a + 1.0 ./ diag(a) + end + return JacobiNumericalSetup(inv_diag) +end + +function Gridap.Algebra.numerical_setup!(ns::JacobiNumericalSetup, A::PSparseMatrix) + map_parts(ns.inv_diag,A.owned_owned_values) do inv_diag, a + inv_diag .= 1.0 ./ diag(a) + end + return ns +end + +function Gridap.Algebra.solve!(x::AbstractVector, ns::JacobiNumericalSetup, b::AbstractVector) + inv_diag = ns.inv_diag + x .= inv_diag .* b + return x +end + +function Gridap.Algebra.solve!(x::PVector, ns::JacobiNumericalSetup, b::PVector) + inv_diag = ns.inv_diag + map_parts(inv_diag,x.owned_values,b.owned_values) do inv_diag, x, b + x .= inv_diag .* b + end + return x +end diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index 0197aed8..e5590b2d 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -1,5 +1,19 @@ module LinearSolvers +using LinearAlgebra +using Gridap +using Gridap.Algebra +using PartitionedArrays + using GridapSolvers.MultilevelTools +import LinearAlgebra: mul!, ldiv! + +export JacobiLinearSolver +export RichardsonSmoother + +include("JacobiLinearSolvers.jl") +include("RichardsonSmoothers.jl") +#include("GMGLinearSolvers.jl") + end \ No newline at end of file diff --git a/src/LinearSolvers/RichardsonSmoothers.jl b/src/LinearSolvers/RichardsonSmoothers.jl new file mode 100644 index 00000000..c36d086c --- /dev/null +++ b/src/LinearSolvers/RichardsonSmoothers.jl @@ -0,0 +1,73 @@ + + +struct RichardsonSmoother{A,B} <: Gridap.Algebra.LinearSolver + M :: Gridap.Algebra.LinearSolver + num_smooth_steps :: A + damping_factor :: B +end + +function RichardsonSmoother(M::Gridap.Algebra.LinearSolver, + num_smooth_steps::Integer=1, + damping_factor::Real=1.0) + A = typeof(num_smooth_steps) + B = typeof(damping_factor) + return RichardsonSmoother{A,B}(M,num_smooth_steps,damping_factor) +end + +struct RichardsonSmootherSymbolicSetup{A} <: Gridap.Algebra.SymbolicSetup + smoother :: RichardsonSmoother + Mss :: A +end + +function Gridap.Algebra.symbolic_setup(smoother::RichardsonSmoother,mat::AbstractMatrix) + Mss=symbolic_setup(smoother.M,mat) + return RichardsonSmootherSymbolicSetup(smoother,Mss) +end + +mutable struct RichardsonSmootherNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup + smoother :: RichardsonSmoother + A :: A + Adx :: B + dx :: C + Mns :: D +end + +function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A::AbstractMatrix{T}) where T + Adx = zeros(size(A,1)) + dx = zeros(size(A,2)) + Mns = numerical_setup(ss.Mss,A) + return RichardsonSmootherNumericalSetup(ss.smoother,A,Adx,dx,Mns) +end + +function Gridap.Algebra.numerical_setup(ss::RichardsonSmootherSymbolicSetup, A::PSparseMatrix) + Adx = PVector(0.0,A.rows) + dx = PVector(0.0,A.cols) + Mns = numerical_setup(ss.Mss,A) + return RichardsonSmootherNumericalSetup(ss.smoother,A,Adx,dx,Mns) +end + +function Gridap.Algebra.numerical_setup!(ns::RichardsonSmootherNumericalSetup, A::AbstractMatrix) + numerical_setup!(ns.Mns,A) +end + +function Gridap.Algebra.solve!(x::AbstractVector,ns::RichardsonSmootherNumericalSetup,r::AbstractVector) + Adx,dx,Mns = ns.Adx,ns.dx,ns.Mns + + iter = 1 + while iter <= ns.smoother.num_smooth_steps + solve!(dx,Mns,r) + dx .= ns.smoother.damping_factor .* dx + x .= x .+ dx + mul!(Adx, ns.A, dx) + r .= r .- Adx + iter += 1 + end +end + +function LinearAlgebra.ldiv!(x::AbstractVector,ns::RichardsonSmootherNumericalSetup,b::AbstractVector) + fill!(x,0.0) + aux = copy(b) + solve!(x,ns,aux) + return x +end + diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl new file mode 100644 index 00000000..9bc2b810 --- /dev/null +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -0,0 +1,63 @@ +module RichardsonSmoothersTests + +using Test +using MPI +using Gridap +using GridapDistributed +using PartitionedArrays +using GridapP4est +using IterativeSolvers + +using GridapSolvers +using GridapSolvers.LinearSolvers + +function main(parts,partition) + domain = (0,1,0,1) + model = CartesianDiscreteModel(parts,domain,partition) + + sol(x) = x[1] + x[2] + f(x) = -Δ(sol)(x) + + order = 1 + qorder = order*2 + 1 + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe,conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(Vh,sol) + u = interpolate(sol,Uh) + + Ω = Triangulation(model) + dΩ = Measure(Ω,qorder) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(v⋅f)*dΩ + + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) + + P = RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0) + ss = symbolic_setup(P,A) + ns = numerical_setup(ss,A) + + x = PVector(1.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=GridapP4est.i_am_main(parts), + reltol=1.0e-8, + Pl=ns, + log=true) + + u = interpolate(sol,Uh) + uh = FEFunction(Uh,x) + eh = uh - u + E = sum(∫(eh*eh)*dΩ) + if GridapP4est.i_am_main(parts) + println("L2 Error: ", E) + end + + @test E < 1.e-8 +end + +partition = (32,32) +ranks = (2,2) +prun(main,mpi,ranks,partition) +MPI.Finalize() + +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 82a02839..83a8ff99 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -28,7 +28,10 @@ function run_tests(testdir) testfiles = sort(filter(istest, readdir(testdir))) @time @testset "$f" for f in testfiles MPI.mpiexec() do cmd - if f in ["DistributedGridTransferOperatorsTests.jl","RedistributeToolsTests.jl","RefinementToolsTests"] + if f in ["DistributedGridTransferOperatorsTests.jl", + "RedistributeToolsTests.jl", + "RefinementToolsTests", + "RichardsonSmoothersTests"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] @@ -53,5 +56,7 @@ function run_tests(testdir) end end -run_tests(@__DIR__) -run_tests(joinpath(@__DIR__, "mpi")) +run(`mpiexec -n 4 julia --project=. mpi/RichardsonSmoothersTests.jl`) + +#run_tests(@__DIR__) +#run_tests(joinpath(@__DIR__, "mpi")) From 781962fa69ed4586385ab209523d807f65a3258d Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 14 Dec 2022 15:41:45 +0100 Subject: [PATCH 39/95] Added GMGLinearSolver --- Manifest.toml | 2 +- Project.toml | 1 + src/LinearSolvers/GMGLinearSolvers.jl | 291 ++++++++++++++++++ src/LinearSolvers/LinearSolvers.jl | 5 +- .../DistributedGridTransferOperators.jl | 8 +- .../DistributedGridTransferOperatorsTests.jl | 1 - test/mpi/GMGLinearSolversLaplacianTests.jl | 142 +++++++++ test/runtests.jl | 5 +- 8 files changed, 446 insertions(+), 9 deletions(-) create mode 100644 src/LinearSolvers/GMGLinearSolvers.jl create mode 100644 test/mpi/GMGLinearSolversLaplacianTests.jl diff --git a/Manifest.toml b/Manifest.toml index 61e61922..05dcd5f2 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.8.1" manifest_format = "2.0" -project_hash = "dacc65432743d6fd7291a02b1cfef7c22067c5b6" +project_hash = "f0e80b077764dd7f022a6156b1fee02c46c8766c" [[deps.AbstractFFTs]] deps = ["ChainRulesCore", "LinearAlgebra"] diff --git a/Project.toml b/Project.toml index 5d6295cd..e2b344e6 100644 --- a/Project.toml +++ b/Project.toml @@ -13,6 +13,7 @@ IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" +Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" [compat] julia = "1.7" diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl new file mode 100644 index 00000000..af0d0b1a --- /dev/null +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -0,0 +1,291 @@ +struct GMGLinearSolver{A,B,C,D,E,F,G,H} <: Gridap.Algebra.LinearSolver + mh :: ModelHierarchy + smatrices :: A + interp :: B + restrict :: C + pre_smoothers :: D + post_smoothers :: E + coarsest_solver :: F + maxiter :: G + rtol :: H + verbose :: Bool + mode :: Symbol +end + +function GMGLinearSolver(mh, + smatrices, + interp, + restrict; + pre_smoothers=Fill(RichardsonSmoother(JacobiLinearSolver(),10),num_levels(mh)-1), + post_smoothers=pre_smoothers, + coarsest_solver=Gridap.Algebra.BackslashSolver(), + maxiter=100, + rtol=1.0e-06, + verbose::Bool=false, + mode=:preconditioner) + + Gridap.Helpers.@check mode ∈ [:preconditioner, :solver] + Gridap.Helpers.@check isa(maxiter,Integer) + Gridap.Helpers.@check isa(rtol,Real) + + A=typeof(smatrices) + B=typeof(interp) + C=typeof(restrict) + D=typeof(pre_smoothers) + E=typeof(post_smoothers) + F=typeof(coarsest_solver) + G=typeof(maxiter) + H=typeof(rtol) + GMGLinearSolver{A,B,C,D,E,F,G,H}(mh, + smatrices, + interp, + restrict, + pre_smoothers, + post_smoothers, + coarsest_solver, + maxiter, + rtol, + verbose, + mode) +end + +struct GMGSymbolicSetup <: Gridap.Algebra.SymbolicSetup + solver :: GMGLinearSolver +end + +function Gridap.Algebra.symbolic_setup(solver::GMGLinearSolver,mat::AbstractMatrix) + return GMGSymbolicSetup(solver) +end + +struct GMGNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup + solver :: GMGLinearSolver + pre_smoothers_caches :: A + post_smoothers_caches :: B + coarsest_solver_cache :: C + work_vectors :: D + + function GMGNumericalSetup(ss::GMGSymbolicSetup) + mh = ss.solver.mh + pre_smoothers = ss.solver.pre_smoothers + post_smoothers = ss.solver.post_smoothers + smatrices = ss.solver.smatrices + coarsest_solver = ss.solver.coarsest_solver + + work_vectors = allocate_work_vectors(mh,smatrices) + pre_smoothers_caches = setup_smoothers_caches(mh,pre_smoothers,smatrices) + if (!(pre_smoothers === post_smoothers)) + post_smoothers_caches = setup_smoothers_caches(mh,post_smoothers,smatrices) + else + post_smoothers_caches = pre_smoothers_caches + end + coarsest_solver_cache = setup_coarsest_solver_cache(mh,coarsest_solver,smatrices) + A = typeof(pre_smoothers_caches) + B = typeof(post_smoothers_caches) + C = typeof(coarsest_solver_cache) + D = typeof(work_vectors) + new{A,B,C,D}(ss.solver, + pre_smoothers_caches, + post_smoothers_caches, + coarsest_solver_cache, + work_vectors) + end +end + +function Gridap.Algebra.numerical_setup(ss::GMGSymbolicSetup,mat::AbstractMatrix) + return GMGNumericalSetup(ss) +end + +function setup_smoothers_caches(mh,smoothers,smatrices) + Gridap.Helpers.@check length(smoothers) == num_levels(mh)-1 + nlevs = num_levels(mh) + # Last (i.e., coarsest) level does not need pre-/post-smoothing + caches = Vector{Any}(undef,nlevs-1) + for i = 1:nlevs-1 + model = get_model(mh,i) + parts = get_level_parts(mh,i) + if (GridapP4est.i_am_in(parts)) + ss = symbolic_setup(smoothers[i], smatrices[i]) + caches[i] = numerical_setup(ss, smatrices[i]) + end + end + return caches +end + +function setup_coarsest_solver_cache(mh,coarsest_solver,smatrices) + cache = nothing + nlevs = num_levels(mh) + parts = get_level_parts(mh,nlevs) + model = get_model(mh,nlevs) + if (GridapP4est.i_am_in(parts)) + if (num_parts(parts) == 1) + cache = map_parts(smatrices[nlevs].owned_owned_values) do Ah + ss = symbolic_setup(coarsest_solver, Ah) + numerical_setup(ss, Ah) + end + cache = cache.part + else + ss = symbolic_setup(coarsest_solver, smatrices[nlevs]) + cache = numerical_setup(ss, smatrices[nlevs]) + end + end + return cache +end + +function allocate_level_work_vectors(mh,smatrices,lev) + parts = get_level_parts(mh,lev+1) + dxh = PVector(0.0, smatrices[lev].cols) + Adxh = PVector(0.0, smatrices[lev].rows) + rh = PVector(0.0, smatrices[lev].rows) + if (GridapP4est.i_am_in(parts)) + AH = smatrices[lev+1] + rH = PVector(0.0,AH.cols) + dxH = PVector(0.0,AH.cols) + else + rH = nothing + dxH = nothing + end + return dxh, Adxh, dxH, rH +end + +function allocate_work_vectors(mh,smatrices) + nlevs = num_levels(mh) + work_vectors = Vector{Any}(undef,nlevs-1) + for i = 1:nlevs-1 + parts = get_level_parts(mh,i) + if GridapP4est.i_am_in(parts) + work_vectors[i] = allocate_level_work_vectors(mh,smatrices,i) + end + end + return work_vectors +end + +function apply_GMG_level!(xh, + rh, + lev, + mh, + smatrices, + restrictions, + interpolations, + pre_smoothers_caches, + post_smoothers_caches, + coarsest_solver_cache, + work_vectors; + verbose=false) + + parts = get_level_parts(mh,lev) + if GridapP4est.i_am_in(parts) + if (lev == num_levels(mh)) + if (GridapP4est.num_parts(parts) == 1) + map_parts(xh.owned_values,rh.owned_values) do xh, rh + solve!(xh,coarsest_solver_cache,rh) + end + else + solve!(xh,coarsest_solver_cache,rh) + end + else + Ah = smatrices[lev] + dxh, Adxh, dxH, rH = work_vectors[lev] + + # Pre-smooth current solution + solve!(xh, pre_smoothers_caches[lev], rh) + + # Restrict the residual + mul!(rH,restrictions[lev],rh) + + if !isa(dxH,Nothing) + fill!(dxH,0.0) + end + + # Apply next_level + apply_GMG_level!(dxH, + rH, + lev+1, + mh, + smatrices, + restrictions, + interpolations, + pre_smoothers_caches, + post_smoothers_caches, + coarsest_solver_cache, + work_vectors; + verbose=verbose) + + # Interpolate dxH in finer space + mul!(dxh,interpolations[lev],dxH) + + # Update solution + xh .= xh .+ dxh + # Update residual + mul!(Adxh, Ah, dxh) + rh .= rh .- Adxh + + # Post-smooth current solution + solve!(xh, post_smoothers_caches[lev], rh) + end + end +end + +function Gridap.Algebra.solve!( + x::AbstractVector,ns::GMGNumericalSetup,b::AbstractVector) + + smatrices = ns.solver.smatrices + mh = ns.solver.mh + maxiter = ns.solver.maxiter + rtol = ns.solver.rtol + restrictions = ns.solver.restrict + interpolations = ns.solver.interp + verbose = ns.solver.verbose + mode = ns.solver.mode + + pre_smoothers_caches = ns.pre_smoothers_caches + post_smoothers_caches = ns.post_smoothers_caches + coarsest_solver_cache = ns.coarsest_solver_cache + work_vectors = ns.work_vectors + + if (mode==:preconditioner) + fill!(x,0.0) + rh = copy(b) + else + Ah = smatrices[1] + rh = PVector(0.0,Ah.rows) + rh .= b .- Ah*x + end + + nrm_r0 = norm(rh) + nrm_r = nrm_r0 + current_iter = 0 + rel_res = nrm_r / nrm_r0 + parts = get_level_parts(mh,1) + + if GridapP4est.i_am_main(parts) + @printf "%6s %12s" "Iter" "Rel res\n" + @printf "%6i %12.4e\n" current_iter rel_res + end + + while (current_iter < maxiter) && (rel_res > rtol) + apply_GMG_level!(x, + rh, + 1, + mh, + smatrices, + restrictions, + interpolations, + pre_smoothers_caches, + post_smoothers_caches, + coarsest_solver_cache, + work_vectors; + verbose=verbose) + nrm_r = norm(rh) + rel_res = nrm_r / nrm_r0 + current_iter += 1 + if GridapP4est.i_am_main(parts) + @printf "%6i %12.4e\n" current_iter rel_res + end + end + converged = (rel_res < rtol) + return current_iter, converged +end + +function LinearAlgebra.ldiv!(x::AbstractVector,ns::GMGNumericalSetup,b::AbstractVector) + solve!(x,ns,b) +end diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index e5590b2d..33342a21 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -1,9 +1,11 @@ module LinearSolvers +using Printf using LinearAlgebra using Gridap using Gridap.Algebra using PartitionedArrays +using GridapP4est using GridapSolvers.MultilevelTools @@ -11,9 +13,10 @@ import LinearAlgebra: mul!, ldiv! export JacobiLinearSolver export RichardsonSmoother +export GMGLinearSolver include("JacobiLinearSolvers.jl") include("RichardsonSmoothers.jl") -#include("GMGLinearSolvers.jl") +include("GMGLinearSolvers.jl") end \ No newline at end of file diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index b5a2b5fa..2e76151e 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -46,11 +46,11 @@ function _get_prolongation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) model_h = get_model_before_redist(mh,lev) Uh = get_fe_space_before_redist(sh,lev) fv_h = PVector(0.0,Uh.gids) - dv_h = get_dirichlet_dof_values(Uh) # Should this be zeros? + dv_h = zero_dirichlet_values(Uh)#get_dirichlet_dof_values(Uh) UH = get_fe_space(sh,lev+1) fv_H = PVector(0.0,UH.gids) - dv_H = get_dirichlet_dof_values(UH) + dv_H = zero_dirichlet_values(UH)#get_dirichlet_dof_values(UH) cache_refine = model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H else @@ -71,7 +71,7 @@ function _get_restriction_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) Uh = get_fe_space_before_redist(sh,lev) Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) fv_h = PVector(0.0,Uh.gids) - dv_h = get_dirichlet_dof_values(Uh) # Should this be zeros? + dv_h = zero_dirichlet_values(Uh)#get_dirichlet_dof_values(Uh) UH = get_fe_space(sh,lev+1) VH = get_test_space(UH) @@ -101,7 +101,7 @@ function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy) Uh_red = get_fe_space(sh,lev) model_h_red = get_model(mh,lev) fv_h_red = PVector(0.0,Uh_red.gids) - dv_h_red = get_dirichlet_dof_values(Uh_red) + dv_h_red = zero_dirichlet_values(Uh_red)#get_dirichlet_dof_values(Uh_red) glue = mh.levels[lev].red_glue cache_redist = fv_h_red, dv_h_red, Uh_red, model_h_red, glue diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index 7bb12de5..c06a82bf 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -26,7 +26,6 @@ function run(parts,num_parts_x_level,num_trees,num_refs_coarse) coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) mh = ModelHierarchy(coarse_model,level_parts) - old_parts = level_parts[2] new_parts = level_parts[1] diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl new file mode 100644 index 00000000..01113676 --- /dev/null +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -0,0 +1,142 @@ +module GMGLinearSolverLaplacianTests +using MPI +using Test +using LinearAlgebra +using IterativeSolvers +using FillArrays + +using Gridap +using Gridap.ReferenceFEs +using PartitionedArrays +using GridapDistributed +using GridapP4est + +using GridapSolvers +using GridapSolvers.LinearSolvers + + +u(x) = x[1] + x[2] +f(x) = -Δ(u)(x) + +function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + level_parts = generate_level_parts(parts,num_parts_x_level) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(v*u)dΩ + ∫(α*∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + + # Finest level problem + model = get_model(mh,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + Vh = get_fe_space(tests,1) + Uh = get_fe_space(trials,1) + + a(u,v) = biform(u,v,dΩ) + l(v) = liform(v,dΩ) + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) + + # Preconditioner + smatrices = compute_hierarchy_matrices(trials,biform,qdegree) + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials, qdegree) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-06, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=GridapP4est.i_am_main(parts), + reltol=1.0e-06, + Pl=ns, + log=true) + + # Error norms and print solution + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e*e)dΩ) + tol = 1.0e-9 + #@test e_l2 < tol + if GridapP4est.i_am_main(parts) + println("L2 error = ", e_l2) + end + + history.iters,num_free_dofs(Vh) +end + +############################################## + +if !MPI.Initialized() + MPI.Init() +end + +# Parameters +order = 1 +coarse_grid_partition = (2,2) +num_refs_coarse = 2 + +α = 1.0 +num_parts_x_level = [4,2,1] +ranks = num_parts_x_level[1] +num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) + +""" + +num_refinements = [1,2,3,4] +alpha_exps = [0,1,2,3] +nr = length(num_refinements) +na = length(alpha_exps) + +# Do experiments +iter_matrix = zeros(Int,nr,na) +free_dofs = Vector{Int64}(undef,nr) +for ref = 1:nr + num_parts_x_level = [1 for i=1:num_refinements[ref]+1] + for alpha_exp = 1:na + α = 10.0^alpha_exps[alpha_exp] + + num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,order,α) + free_dofs[ref] = num_free_dofs2 + iter_matrix[ref,alpha_exp] = num_iters + end +end + +# Display results +if GridapP4est.i_am_main(parts) + println("> α = ", map(exp->10.0^exp,alpha_exp)) +end + +for ref = 1:nr + if GridapP4est.i_am_main(parts) + println("> Num Refinements: ", num_refinements[ref]) + println(" > Num free dofs : ", free_dofs[ref]) + println(" > Num Refinements : ", num_refinements[ref]) + println(" > Num Iters (per alpha) : ", iter_matrix[ref,:]) + end +end +""" + + +MPI.Finalize() +end diff --git a/test/runtests.jl b/test/runtests.jl index 83a8ff99..70f4998c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -31,7 +31,8 @@ function run_tests(testdir) if f in ["DistributedGridTransferOperatorsTests.jl", "RedistributeToolsTests.jl", "RefinementToolsTests", - "RichardsonSmoothersTests"] + "RichardsonSmoothersTests", + "GMGLinearSolversLaplacianTests.jl"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] @@ -56,7 +57,7 @@ function run_tests(testdir) end end -run(`mpiexec -n 4 julia --project=. mpi/RichardsonSmoothersTests.jl`) +run(`mpiexec -n 4 julia --project=. mpi/GMGLinearSolversLaplacianTests.jl`) #run_tests(@__DIR__) #run_tests(joinpath(@__DIR__, "mpi")) From fc55b1b13d28fa81476be0aaf07efdb0f013224f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 14 Dec 2022 16:46:45 +0100 Subject: [PATCH 40/95] Added solution/residual mode for TransferOperators --- .../DistributedGridTransferOperators.jl | 38 +++++++++++-------- test/mpi/GMGLinearSolversLaplacianTests.jl | 6 +-- 2 files changed, 25 insertions(+), 19 deletions(-) diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 2e76151e..1eb11911 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -15,30 +15,36 @@ end ### Constructors -RestrictionOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = DistributedGridTransferOperator(lev,sh,qdegree,:restriction) -ProlongationOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int) = DistributedGridTransferOperator(lev,sh,qdegree,:prolongation) +function RestrictionOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int;kwargs...) + return DistributedGridTransferOperator(lev,sh,qdegree,:restriction;kwargs...) +end + +function ProlongationOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int;kwargs...) + return DistributedGridTransferOperator(lev,sh,qdegree,:prolongation;kwargs...) +end -function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol) +function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol;mode=:solution) mh = sh.mh @check lev < num_levels(mh) @check op_type ∈ [:restriction, :prolongation] + @check mode ∈ [:solution, :residual] # Refinement if (op_type == :restriction) - cache_refine = _get_restriction_cache(lev,sh,qdegree) + cache_refine = _get_restriction_cache(lev,sh,qdegree,mode) else - cache_refine = _get_prolongation_cache(lev,sh,qdegree) + cache_refine = _get_prolongation_cache(lev,sh,qdegree,mode) end # Redistribution redist = has_redistribution(mh,lev) - cache_redist = _get_redistribution_cache(lev,sh) + cache_redist = _get_redistribution_cache(lev,sh,mode) cache = cache_refine, cache_redist return DistributedGridTransferOperator(op_type,redist,sh,cache) end -function _get_prolongation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) +function _get_prolongation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode) mh = sh.mh cparts = get_level_parts(mh,lev+1) @@ -46,11 +52,11 @@ function _get_prolongation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) model_h = get_model_before_redist(mh,lev) Uh = get_fe_space_before_redist(sh,lev) fv_h = PVector(0.0,Uh.gids) - dv_h = zero_dirichlet_values(Uh)#get_dirichlet_dof_values(Uh) + dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh) UH = get_fe_space(sh,lev+1) fv_H = PVector(0.0,UH.gids) - dv_H = zero_dirichlet_values(UH)#get_dirichlet_dof_values(UH) + dv_H = (mode == :solution) ? get_dirichlet_dof_values(UH) : zero_dirichlet_values(UH) cache_refine = model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H else @@ -62,7 +68,7 @@ function _get_prolongation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) return cache_refine end -function _get_restriction_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) +function _get_restriction_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode) mh = sh.mh cparts = get_level_parts(mh,lev+1) @@ -71,7 +77,7 @@ function _get_restriction_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) Uh = get_fe_space_before_redist(sh,lev) Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) fv_h = PVector(0.0,Uh.gids) - dv_h = zero_dirichlet_values(Uh)#get_dirichlet_dof_values(Uh) + dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh) UH = get_fe_space(sh,lev+1) VH = get_test_space(UH) @@ -94,14 +100,14 @@ function _get_restriction_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int) return cache_refine end -function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy) +function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode) mh = sh.mh redist = has_redistribution(mh,lev) if redist Uh_red = get_fe_space(sh,lev) model_h_red = get_model(mh,lev) fv_h_red = PVector(0.0,Uh_red.gids) - dv_h_red = zero_dirichlet_values(Uh_red)#get_dirichlet_dof_values(Uh_red) + dv_h_red = (mode == :solution) ? get_dirichlet_dof_values(Uh_red) : zero_dirichlet_values(Uh_red) glue = mh.levels[lev].red_glue cache_redist = fv_h_red, dv_h_red, Uh_red, model_h_red, glue @@ -111,15 +117,15 @@ function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy) return cache_redist end -function setup_transfer_operators(sh::FESpaceHierarchy, qdegree::Int) +function setup_transfer_operators(sh::FESpaceHierarchy,qdegree::Int;kwargs...) mh = sh.mh restrictions = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) prolongations = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) for lev in 1:num_levels(sh)-1 parts = get_level_parts(mh,lev) if GridapP4est.i_am_in(parts) - restrictions[lev] = RestrictionOperator(lev,sh,qdegree) - prolongations[lev] = ProlongationOperator(lev,sh,qdegree) + restrictions[lev] = RestrictionOperator(lev,sh,qdegree;kwargs...) + prolongations[lev] = ProlongationOperator(lev,sh,qdegree;kwargs...) end end return restrictions, prolongations diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index 01113676..07914691 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -49,7 +49,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, # Preconditioner smatrices = compute_hierarchy_matrices(trials,biform,qdegree) smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) - restrictions, prolongations = setup_transfer_operators(trials, qdegree) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) gmg = GMGLinearSolver(mh, smatrices, @@ -58,7 +58,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, pre_smoothers=smoothers, post_smoothers=smoothers, maxiter=1, - rtol=1.0e-06, + rtol=1.0e-10, verbose=false, mode=:preconditioner) ss = symbolic_setup(gmg,A) @@ -68,7 +68,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, x = PVector(0.0,A.cols) x, history = IterativeSolvers.cg!(x,A,b; verbose=GridapP4est.i_am_main(parts), - reltol=1.0e-06, + reltol=1.0e-10, Pl=ns, log=true) From 0bdc89d9d6662985405458b5a0de0cca857c94df Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 14 Dec 2022 17:33:28 +0100 Subject: [PATCH 41/95] Added Poisson test --- test/mpi/GMGLinearSolversLaplacianTests.jl | 6 +- test/mpi/GMGLinearSolversPoissonTests.jl | 103 +++++++++++++++++++++ 2 files changed, 106 insertions(+), 3 deletions(-) create mode 100644 test/mpi/GMGLinearSolversPoissonTests.jl diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index 07914691..c86205f4 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -68,7 +68,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, x = PVector(0.0,A.cols) x, history = IterativeSolvers.cg!(x,A,b; verbose=GridapP4est.i_am_main(parts), - reltol=1.0e-10, + reltol=1.0e-12, Pl=ns, log=true) @@ -82,7 +82,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, println("L2 error = ", e_l2) end - history.iters,num_free_dofs(Vh) + return history.iters, num_free_dofs(Vh) end ############################################## @@ -92,7 +92,7 @@ if !MPI.Initialized() end # Parameters -order = 1 +order = 2 coarse_grid_partition = (2,2) num_refs_coarse = 2 diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl new file mode 100644 index 00000000..18fad85d --- /dev/null +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -0,0 +1,103 @@ +module GMGLinearSolverPoissonTests +using MPI +using Test +using LinearAlgebra +using IterativeSolvers +using FillArrays + +using Gridap +using Gridap.ReferenceFEs +using PartitionedArrays +using GridapDistributed +using GridapP4est + +using GridapSolvers +using GridapSolvers.LinearSolvers + + +u(x) = x[1] + x[2] +f(x) = -Δ(u)(x) + +function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order) + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + level_parts = generate_level_parts(parts,num_parts_x_level) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + + # Finest level problem + model = get_model(mh,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + Vh = get_fe_space(tests,1) + Uh = get_fe_space(trials,1) + + a(u,v) = biform(u,v,dΩ) + l(v) = liform(v,dΩ) + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) + + # Preconditioner + smatrices = compute_hierarchy_matrices(trials,biform,qdegree) + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=GridapP4est.i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e*e)dΩ) + tol = 1.0e-9 + @test e_l2 < tol + if GridapP4est.i_am_main(parts) + println("L2 error = ", e_l2) + end +end + +############################################## + +if !MPI.Initialized() + MPI.Init() +end + +# Parameters +order = 2 +coarse_grid_partition = (2,2) +num_refs_coarse = 2 + +num_parts_x_level = [4,2,1] +ranks = num_parts_x_level[1] +prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) + + +MPI.Finalize() +end From 61698603a73c165b5111e21e282031d3e312ebeb Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 14 Dec 2022 17:43:03 +0100 Subject: [PATCH 42/95] Added VectorLaplacian Tests --- .../GMGLinearSolversVectorLaplacianTests.jl | 142 ++++++++++++++++++ test/runtests.jl | 6 +- 2 files changed, 146 insertions(+), 2 deletions(-) create mode 100644 test/mpi/GMGLinearSolversVectorLaplacianTests.jl diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl new file mode 100644 index 00000000..8f31deff --- /dev/null +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -0,0 +1,142 @@ +module GMGLinearSolverVectorLaplacianTests +using MPI +using Test +using LinearAlgebra +using IterativeSolvers +using FillArrays + +using Gridap +using Gridap.ReferenceFEs +using PartitionedArrays +using GridapDistributed +using GridapP4est + +using GridapSolvers +using GridapSolvers.LinearSolvers + + +u(x) = VectorValue(x[1],x[2]) +f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) + +function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + level_parts = generate_level_parts(parts,num_parts_x_level) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,VectorValue{2,Float64},order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*∇(v)⊙∇(u))dΩ + liform(v,dΩ) = ∫(v⋅f)dΩ + + # Finest level problem + model = get_model(mh,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + Vh = get_fe_space(tests,1) + Uh = get_fe_space(trials,1) + + a(u,v) = biform(u,v,dΩ) + l(v) = liform(v,dΩ) + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) + + # Preconditioner + smatrices = compute_hierarchy_matrices(trials,biform,qdegree) + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=GridapP4est.i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + #@test e_l2 < tol + if GridapP4est.i_am_main(parts) + println("L2 error = ", e_l2) + end + + return history.iters, num_free_dofs(Vh) +end + +############################################## + +if !MPI.Initialized() + MPI.Init() +end + +# Parameters +order = 2 +coarse_grid_partition = (2,2) +num_refs_coarse = 2 + +α = 1.0 +num_parts_x_level = [4,2,1] +ranks = num_parts_x_level[1] +num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) + +""" + +num_refinements = [1,2,3,4] +alpha_exps = [0,1,2,3] +nr = length(num_refinements) +na = length(alpha_exps) + +# Do experiments +iter_matrix = zeros(Int,nr,na) +free_dofs = Vector{Int64}(undef,nr) +for ref = 1:nr + num_parts_x_level = [1 for i=1:num_refinements[ref]+1] + for alpha_exp = 1:na + α = 10.0^alpha_exps[alpha_exp] + + num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,order,α) + free_dofs[ref] = num_free_dofs2 + iter_matrix[ref,alpha_exp] = num_iters + end +end + +# Display results +if GridapP4est.i_am_main(parts) + println("> α = ", map(exp->10.0^exp,alpha_exp)) +end + +for ref = 1:nr + if GridapP4est.i_am_main(parts) + println("> Num Refinements: ", num_refinements[ref]) + println(" > Num free dofs : ", free_dofs[ref]) + println(" > Num Refinements : ", num_refinements[ref]) + println(" > Num Iters (per alpha) : ", iter_matrix[ref,:]) + end +end +""" + + +MPI.Finalize() +end diff --git a/test/runtests.jl b/test/runtests.jl index 70f4998c..dd0121e6 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -32,7 +32,9 @@ function run_tests(testdir) "RedistributeToolsTests.jl", "RefinementToolsTests", "RichardsonSmoothersTests", - "GMGLinearSolversLaplacianTests.jl"] + "GMGLinearSolversPoissonTests.jl", + "GMGLinearSolversLaplacianTests.jl", + "GMGLinearSolversVectorLaplacianTests.jl"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] @@ -57,7 +59,7 @@ function run_tests(testdir) end end -run(`mpiexec -n 4 julia --project=. mpi/GMGLinearSolversLaplacianTests.jl`) +run(`mpiexec -n 4 julia --project=. mpi/GMGLinearSolversVectorLaplacianTests.jl`) #run_tests(@__DIR__) #run_tests(joinpath(@__DIR__, "mpi")) From 48e77f0b490e681ca3b7fdcc7aec3042db3e31d8 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 14 Dec 2022 17:56:54 +0100 Subject: [PATCH 43/95] Added HDiv-RT tests, do not work (missing Patch-based smoothers) --- test/mpi/GMGLinearSolversHDivRTTests.jl | 142 ++++++++++++++++++++++++ 1 file changed, 142 insertions(+) create mode 100644 test/mpi/GMGLinearSolversHDivRTTests.jl diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl new file mode 100644 index 00000000..49c22c9e --- /dev/null +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -0,0 +1,142 @@ +module GMGLinearSolverLaplacianTests +using MPI +using Test +using LinearAlgebra +using IterativeSolvers +using FillArrays + +using Gridap +using Gridap.ReferenceFEs +using PartitionedArrays +using GridapDistributed +using GridapP4est + +using GridapSolvers +using GridapSolvers.LinearSolvers + + +u(x) = VectorValue(x[1],x[2]) +f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) + +function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + level_parts = generate_level_parts(parts,num_parts_x_level) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) + + qdegree = 2*(order+1) + reffe = ReferenceFE(raviart_thomas,Float64,order) + tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ + liform(v,dΩ) = ∫(v⋅f)dΩ + + # Finest level problem + model = get_model(mh,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + Vh = get_fe_space(tests,1) + Uh = get_fe_space(trials,1) + + a(u,v) = biform(u,v,dΩ) + l(v) = liform(v,dΩ) + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) + + # Preconditioner + smatrices = compute_hierarchy_matrices(trials,biform,qdegree) + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=GridapP4est.i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + #@test e_l2 < tol + if GridapP4est.i_am_main(parts) + println("L2 error = ", e_l2) + end + + return history.iters, num_free_dofs(Vh) +end + +############################################## + +if !MPI.Initialized() + MPI.Init() +end + +# Parameters +order = 2 +coarse_grid_partition = (2,2) +num_refs_coarse = 2 + +α = 1.0 +num_parts_x_level = [4,2,1] +ranks = num_parts_x_level[1] +num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) + +""" + +num_refinements = [1,2,3,4] +alpha_exps = [0,1,2,3] +nr = length(num_refinements) +na = length(alpha_exps) + +# Do experiments +iter_matrix = zeros(Int,nr,na) +free_dofs = Vector{Int64}(undef,nr) +for ref = 1:nr + num_parts_x_level = [1 for i=1:num_refinements[ref]+1] + for alpha_exp = 1:na + α = 10.0^alpha_exps[alpha_exp] + + num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,order,α) + free_dofs[ref] = num_free_dofs2 + iter_matrix[ref,alpha_exp] = num_iters + end +end + +# Display results +if GridapP4est.i_am_main(parts) + println("> α = ", map(exp->10.0^exp,alpha_exp)) +end + +for ref = 1:nr + if GridapP4est.i_am_main(parts) + println("> Num Refinements: ", num_refinements[ref]) + println(" > Num free dofs : ", free_dofs[ref]) + println(" > Num Refinements : ", num_refinements[ref]) + println(" > Num Iters (per alpha) : ", iter_matrix[ref,:]) + end +end +""" + + +MPI.Finalize() +end From 3cc0e9b5cdd9fbb751ed10b076a17e66bbdd1e3f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 15 Dec 2022 10:50:59 +0100 Subject: [PATCH 44/95] Minor convenience change --- src/MultilevelTools/FESpaceHierarchies.jl | 16 +++++++++++---- .../DistributedGridTransferOperatorsTests.jl | 3 ++- test/mpi/GMGLinearSolversHDivRTTests.jl | 20 ++++--------------- test/mpi/GMGLinearSolversLaplacianTests.jl | 18 +++-------------- test/mpi/GMGLinearSolversPoissonTests.jl | 18 +++-------------- .../GMGLinearSolversVectorLaplacianTests.jl | 18 +++-------------- test/runtests.jl | 9 ++++----- 7 files changed, 31 insertions(+), 71 deletions(-) diff --git a/src/MultilevelTools/FESpaceHierarchies.jl b/src/MultilevelTools/FESpaceHierarchies.jl index d81b403d..15395b01 100644 --- a/src/MultilevelTools/FESpaceHierarchies.jl +++ b/src/MultilevelTools/FESpaceHierarchies.jl @@ -102,10 +102,12 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) FESpaceHierarchy(a.mh,trial_spaces) end -function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,qdegree::Int) +function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Function,qdegree::Int) nlevs = num_levels(trials) mh = trials.mh + A = nothing + b = nothing mats = Vector{PSparseMatrix}(undef,nlevs) for lev in 1:nlevs parts = get_level_parts(mh,lev) @@ -116,9 +118,15 @@ function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,qdegree Ω = Triangulation(model) dΩ = Measure(Ω,qdegree) ai(u,v) = a(u,v,dΩ) - A = assemble_matrix(ai,U,V) - mats[lev] = A + if lev == 1 + li(v) = l(v,dΩ) + op = AffineFEOperator(ai,li,U,V) + A, b = get_matrix(op), get_vector(op) + mats[lev] = A + else + mats[lev] = assemble_matrix(ai,U,V) + end end end - return mats + return mats, A, b end diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index c06a82bf..95199672 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -42,7 +42,8 @@ function run(parts,num_parts_x_level,num_trees,num_refs_coarse) restrictions, prolongations = ops a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ - mats = compute_hierarchy_matrices(trials,a,qdegree) + l(v,dΩ) = ∫(v⋅u)*dΩ + mats, A, b = compute_hierarchy_matrices(trials,a,l,qdegree) for lev in 1:num_levels-1 parts_h = get_level_parts(mh,lev) diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index 49c22c9e..b26c76f4 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -31,23 +31,11 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") trials = TrialFESpace(tests,u) - biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ - liform(v,dΩ) = ∫(v⋅f)dΩ - - # Finest level problem - model = get_model(mh,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - Vh = get_fe_space(tests,1) - Uh = get_fe_space(trials,1) - - a(u,v) = biform(u,v,dΩ) - l(v) = liform(v,dΩ) - op = AffineFEOperator(a,l,Uh,Vh) - A, b = get_matrix(op), get_vector(op) - + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ + liform(v,dΩ) = ∫(v⋅f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) + # Preconditioner - smatrices = compute_hierarchy_matrices(trials,biform,qdegree) smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index c86205f4..1312225a 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -31,23 +31,11 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,u) - biform(u,v,dΩ) = ∫(v*u)dΩ + ∫(α*∇(v)⋅∇(u))dΩ - liform(v,dΩ) = ∫(v*f)dΩ - - # Finest level problem - model = get_model(mh,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - Vh = get_fe_space(tests,1) - Uh = get_fe_space(trials,1) - - a(u,v) = biform(u,v,dΩ) - l(v) = liform(v,dΩ) - op = AffineFEOperator(a,l,Uh,Vh) - A, b = get_matrix(op), get_vector(op) + biform(u,v,dΩ) = ∫(v*u)dΩ + ∫(α*∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) # Preconditioner - smatrices = compute_hierarchy_matrices(trials,biform,qdegree) smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index 18fad85d..4a9f8780 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -31,23 +31,11 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,u) - biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ - liform(v,dΩ) = ∫(v*f)dΩ - - # Finest level problem - model = get_model(mh,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - Vh = get_fe_space(tests,1) - Uh = get_fe_space(trials,1) - - a(u,v) = biform(u,v,dΩ) - l(v) = liform(v,dΩ) - op = AffineFEOperator(a,l,Uh,Vh) - A, b = get_matrix(op), get_vector(op) + biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) # Preconditioner - smatrices = compute_hierarchy_matrices(trials,biform,qdegree) smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl index 8f31deff..9f42a1fc 100644 --- a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -31,23 +31,11 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,u) - biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*∇(v)⊙∇(u))dΩ - liform(v,dΩ) = ∫(v⋅f)dΩ - - # Finest level problem - model = get_model(mh,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - Vh = get_fe_space(tests,1) - Uh = get_fe_space(trials,1) - - a(u,v) = biform(u,v,dΩ) - l(v) = liform(v,dΩ) - op = AffineFEOperator(a,l,Uh,Vh) - A, b = get_matrix(op), get_vector(op) + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*∇(v)⊙∇(u))dΩ + liform(v,dΩ) = ∫(v⋅f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) # Preconditioner - smatrices = compute_hierarchy_matrices(trials,biform,qdegree) smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) diff --git a/test/runtests.jl b/test/runtests.jl index dd0121e6..a28f3c63 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -34,7 +34,8 @@ function run_tests(testdir) "RichardsonSmoothersTests", "GMGLinearSolversPoissonTests.jl", "GMGLinearSolversLaplacianTests.jl", - "GMGLinearSolversVectorLaplacianTests.jl"] + "GMGLinearSolversVectorLaplacianTests.jl", + "GMGLinearSolversHDivRTTests.jl"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] @@ -59,7 +60,5 @@ function run_tests(testdir) end end -run(`mpiexec -n 4 julia --project=. mpi/GMGLinearSolversVectorLaplacianTests.jl`) - -#run_tests(@__DIR__) -#run_tests(joinpath(@__DIR__, "mpi")) +run_tests(@__DIR__) +run_tests(joinpath(@__DIR__, "mpi")) From 719cd2fee8392234c62ce4fe0e0e578ff5e02831 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 15 Dec 2022 11:00:05 +0100 Subject: [PATCH 45/95] Activated all tests --- test/mpi/GMGLinearSolversHDivRTTests.jl | 12 ++++++++---- test/mpi/GMGLinearSolversLaplacianTests.jl | 12 ++++++++---- test/mpi/GMGLinearSolversPoissonTests.jl | 12 ++++++++---- test/mpi/GMGLinearSolversVectorLaplacianTests.jl | 12 ++++++++---- 4 files changed, 32 insertions(+), 16 deletions(-) diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index b26c76f4..6b58cb58 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -61,10 +61,14 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, log=true) # Error norms and print solution - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 #@test e_l2 < tol if GridapP4est.i_am_main(parts) println("L2 error = ", e_l2) diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index 1312225a..7bc73ebe 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -61,10 +61,14 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, log=true) # Error norms and print solution - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e*e)dΩ) - tol = 1.0e-9 + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 #@test e_l2 < tol if GridapP4est.i_am_main(parts) println("L2 error = ", e_l2) diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index 4a9f8780..89347ad4 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -61,10 +61,14 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, log=true) # Error norms and print solution - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e*e)dΩ) - tol = 1.0e-9 + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 @test e_l2 < tol if GridapP4est.i_am_main(parts) println("L2 error = ", e_l2) diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl index 9f42a1fc..cbddfd80 100644 --- a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -61,10 +61,14 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, log=true) # Error norms and print solution - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 #@test e_l2 < tol if GridapP4est.i_am_main(parts) println("L2 error = ", e_l2) From 4d74d0c307dc4864bdf94bdd90f3c76a9168d3e0 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 16 Dec 2022 11:26:33 +0100 Subject: [PATCH 46/95] We now export GMG solver --- src/GridapSolvers.jl | 3 +++ 1 file changed, 3 insertions(+) diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 1f882936..4cd7d383 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -22,5 +22,8 @@ module GridapSolvers export setup_transfer_operators # LinearSolvers + export JacobiLinearSolver + export RichardsonSmoother + export GMGLinearSolver end From 53862c548964acc28378c5b0a9c0e614f5b1573c Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 16 Dec 2022 12:14:25 +0100 Subject: [PATCH 47/95] bugfix in tests --- test/mpi/GMGLinearSolversHDivRTTests.jl | 2 +- test/mpi/GMGLinearSolversLaplacianTests.jl | 2 +- test/mpi/GMGLinearSolversVectorLaplacianTests.jl | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index 6b58cb58..08eba8ea 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -91,7 +91,7 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] ranks = num_parts_x_level[1] -num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +#num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index 7bc73ebe..b6ae704c 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -74,7 +74,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, println("L2 error = ", e_l2) end - return history.iters, num_free_dofs(Vh) + return history.iters, num_free_dofs(Uh) end ############################################## diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl index cbddfd80..184fcfd9 100644 --- a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -74,7 +74,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, println("L2 error = ", e_l2) end - return history.iters, num_free_dofs(Vh) + return history.iters, num_free_dofs(Uh) end ############################################## From 9361f9dd2ef3c92ba2a3bdda98bf6225dd4d013c Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 19 Dec 2022 12:46:35 +0100 Subject: [PATCH 48/95] Added MUMPS as coarsest solver test --- Manifest.toml | 81 +++++++++++++++-- Project.toml | 1 + test/mpi/GMGLinearSolversMUMPSTests.jl | 117 +++++++++++++++++++++++++ 3 files changed, 191 insertions(+), 8 deletions(-) create mode 100644 test/mpi/GMGLinearSolversMUMPSTests.jl diff --git a/Manifest.toml b/Manifest.toml index 05dcd5f2..8ce969f4 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.8.1" manifest_format = "2.0" -project_hash = "f0e80b077764dd7f022a6156b1fee02c46c8766c" +project_hash = "dbb1333b0ace488af82ab2035259c592777d3449" [[deps.AbstractFFTs]] deps = ["ChainRulesCore", "LinearAlgebra"] @@ -32,9 +32,9 @@ version = "1.1.1" [[deps.ArrayInterfaceCore]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "c46fb7dd1d8ca1d213ba25848a5ec4e47a1a1b08" +git-tree-sha1 = "badccc4459ffffb6bce5628461119b7057dec32c" uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.26" +version = "0.1.27" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] @@ -225,12 +225,18 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "d5205a26d9fb40dcad83e98efb0d3c7f993de8cd" +git-tree-sha1 = "f8843e24925bd7ef3cd0f923316b2d806c65b258" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" version = "0.1.3" +[[deps.GridapPETSc]] +deps = ["Gridap", "GridapDistributed", "Libdl", "LinearAlgebra", "MPI", "PETSc_jll", "PartitionedArrays", "Random", "SparseArrays", "SparseMatricesCSR"] +git-tree-sha1 = "e49b0ed48134534e8faf1ebfc30a62852cbaa00e" +uuid = "bcdc36c2-0c3e-11ea-095a-c9dadae499f1" +version = "0.4.4" + [[deps.IntelOpenMP_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] git-tree-sha1 = "d979e54b71da82f3a65b62553da4fc3d18c9004c" @@ -276,6 +282,12 @@ git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e" uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" version = "0.21.3" +[[deps.LLVMOpenMP_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "f689897ccbe049adb19a065c495e75f372ecd42b" +uuid = "1d63c593-3942-5779-bab2-d838dc0a180e" +version = "15.0.4+0" + [[deps.LazyArtifacts]] deps = ["Artifacts", "Pkg"] uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" @@ -333,6 +345,12 @@ version = "0.3.19" [[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" +[[deps.METIS_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "1fd0a97409e418b78c53fac671cf4622efdf0f21" +uuid = "d00139f3-1899-568f-a2f0-47f597d42d70" +version = "5.1.2+0" + [[deps.MKL_jll]] deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] git-tree-sha1 = "2ce8695e1e699b68702c03402672a69f54b8aca9" @@ -363,6 +381,12 @@ git-tree-sha1 = "b3f9e42685b4ad614eca0b44bd863cd41b1c86ea" uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748" version = "5.0.2+1" +[[deps.MUMPS_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "METIS_jll", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS32_jll", "OpenMPI_jll", "PARMETIS_jll", "Pkg", "SCALAPACK32_jll", "SCOTCH_jll", "TOML"] +git-tree-sha1 = "99f5791b81e59aad952d0629b066cde87a1e81f9" +uuid = "ca64183c-ec4f-5579-95d5-17e128c21291" +version = "5.5.1+1" + [[deps.MacroTools]] deps = ["Markdown", "Random"] git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2" @@ -419,6 +443,12 @@ version = "0.4.12" uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" version = "1.2.0" +[[deps.OpenBLAS32_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] +git-tree-sha1 = "9c6c2ed4b7acd2137b878eb96c68e63b76199d0f" +uuid = "656ef2d0-ae68-5445-9ca0-591084a874a2" +version = "0.3.17+0" + [[deps.OpenBLAS_jll]] deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" @@ -454,9 +484,21 @@ version = "2.8.1+0" [[deps.P4est_wrapper]] deps = ["CEnum", "Libdl", "MPI", "P4est_jll"] -git-tree-sha1 = "e3231d995bdaa7b00e34934c036551b66f25b4c3" +git-tree-sha1 = "9aaa64b061ef8f4ba4ef0095cad2e25305ee65cf" uuid = "3743d7c0-8adf-11ea-380b-7d33b0ecc1da" -version = "0.1.2" +version = "0.1.3" + +[[deps.PARMETIS_jll]] +deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "METIS_jll", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Pkg", "TOML"] +git-tree-sha1 = "ac9f4ce9b7dd92575b1f0e1d6a6e0f0729597f5f" +uuid = "b247a4be-ddc1-5759-8008-7e02fe3dbdaa" +version = "4.0.6+0" + +[[deps.PETSc_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "METIS_jll", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MUMPS_jll", "MicrosoftMPI_jll", "OpenBLAS32_jll", "OpenMPI_jll", "PARMETIS_jll", "Pkg", "SCALAPACK32_jll", "SCOTCH_jll", "SuiteSparse_jll", "SuperLU_DIST_jll", "TOML"] +git-tree-sha1 = "a6182e59b4c174b847752c4cd70895cfe44fa7a7" +uuid = "8fa3689e-f0b9-5420-9873-adf6ccf46f2d" +version = "3.16.8+0" [[deps.Parameters]] deps = ["OrderedCollections", "UnPack"] @@ -466,9 +508,9 @@ version = "0.12.3" [[deps.Parsers]] deps = ["Dates", "SnoopPrecompile"] -git-tree-sha1 = "b64719e8b4504983c7fca6cc9db3ebc8acc2a4d6" +git-tree-sha1 = "6466e524967496866901a78fca3f2e9ea445a559" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.5.1" +version = "2.5.2" [[deps.PartitionedArrays]] deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] @@ -528,6 +570,18 @@ git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7" uuid = "ae029012-a4dd-5104-9daa-d747884805df" version = "1.3.0" +[[deps.SCALAPACK32_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS32_jll", "OpenMPI_jll", "Pkg", "TOML"] +git-tree-sha1 = "36312ec64bf64a4120ac2a47438f5775ea28abc4" +uuid = "aabda75e-bfe4-5a37-92e3-ffe54af3c273" +version = "2.2.1+0" + +[[deps.SCOTCH_jll]] +deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] +git-tree-sha1 = "7110b749766853054ce8a2afaa73325d72d32129" +uuid = "a8d0f55d-b80e-548d-aff6-1a04c175f0f9" +version = "6.1.3+0" + [[deps.SHA]] uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" version = "0.7.0" @@ -590,6 +644,17 @@ version = "1.5.0" deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" +[[deps.SuiteSparse_jll]] +deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"] +uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" +version = "5.10.1+0" + +[[deps.SuperLU_DIST_jll]] +deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LLVMOpenMP_jll", "LazyArtifacts", "Libdl", "METIS_jll", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS32_jll", "OpenMPI_jll", "PARMETIS_jll", "Pkg", "TOML"] +git-tree-sha1 = "e156418856a6c1cc2f5418f0542d9cc43b24a1f9" +uuid = "9a1356b0-3c82-5da3-b77c-7c198e8bd7ab" +version = "8.0.2+0" + [[deps.TOML]] deps = ["Dates"] uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" diff --git a/Project.toml b/Project.toml index e2b344e6..4fd2707f 100644 --- a/Project.toml +++ b/Project.toml @@ -9,6 +9,7 @@ FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" Gridap = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" GridapDistributed = "f9701e48-63b3-45aa-9a63-9bc6c271f355" GridapP4est = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" +GridapPETSc = "bcdc36c2-0c3e-11ea-095a-c9dadae499f1" IterativeSolvers = "42fd0dbc-a981-5370-80f2-aaf504508153" LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" diff --git a/test/mpi/GMGLinearSolversMUMPSTests.jl b/test/mpi/GMGLinearSolversMUMPSTests.jl new file mode 100644 index 00000000..fb4c0e7f --- /dev/null +++ b/test/mpi/GMGLinearSolversMUMPSTests.jl @@ -0,0 +1,117 @@ +module GMGLinearSolversMUMPSTests +using MPI +using Test +using LinearAlgebra +using IterativeSolvers +using FillArrays + +using Gridap +using Gridap.ReferenceFEs +using PartitionedArrays +using GridapDistributed +using GridapP4est + +using GridapSolvers +using GridapSolvers.LinearSolvers + +using GridapPETSc + +u(x) = x[1] + x[2] +f(x) = -Δ(u)(x) + +function set_ksp_options(ksp) + pc = Ref{GridapPETSc.PETSC.PC}() + mumpsmat = Ref{GridapPETSc.PETSC.Mat}() + @check_error_code GridapPETSc.PETSC.KSPView(ksp[],C_NULL) + @check_error_code GridapPETSc.PETSC.KSPSetType(ksp[],GridapPETSc.PETSC.KSPPREONLY) + @check_error_code GridapPETSc.PETSC.KSPGetPC(ksp[],pc) + @check_error_code GridapPETSc.PETSC.PCSetType(pc[],GridapPETSc.PETSC.PCLU) + @check_error_code GridapPETSc.PETSC.PCFactorSetMatSolverType(pc[],GridapPETSc.PETSC.MATSOLVERMUMPS) + @check_error_code GridapPETSc.PETSC.PCFactorSetUpMatSolverType(pc[]) + @check_error_code GridapPETSc.PETSC.PCFactorGetMatrix(pc[],mumpsmat) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 4, 1) + # percentage increase in the estimated working space + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 14, 1000) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2) + @check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6) +end + +function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order) + GridapPETSc.with() do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + level_parts = generate_level_parts(parts,num_parts_x_level) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) + mh = ModelHierarchy(coarse_model,level_parts) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) + + # Preconditioner + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) + mumps_solver = PETScLinearSolver(set_ksp_options) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + coarsest_solver=mumps_solver, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=GridapP4est.i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + @test e_l2 < tol + if GridapP4est.i_am_main(parts) + println("L2 error = ", e_l2) + end + end +end + +############################################## + +if !MPI.Initialized() + MPI.Init() +end + +# Parameters +order = 2 +coarse_grid_partition = (2,2) +num_refs_coarse = 3 + +num_parts_x_level = [4,2] +ranks = num_parts_x_level[1] +prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) + + +MPI.Finalize() +end \ No newline at end of file From e68e1294bad2d7730cd2409c586aacd71767d694 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 20 Dec 2022 15:00:13 +0100 Subject: [PATCH 49/95] Fixed MUMPS tests --- .github/workflows/CI.yml | 30 +++++++++++-- test/mpi/MUMPSSolversTests.jl | 81 +++++++++++++++++++++++++++++++++++ test/runtests.jl | 4 +- 3 files changed, 111 insertions(+), 4 deletions(-) create mode 100644 test/mpi/MUMPSSolversTests.jl diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 64c74e48..26e3120b 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -17,6 +17,7 @@ jobs: env: JULIA_MPI_BINARY: "system" P4EST_ROOT_DIR: "/opt/p4est/2.2/" + JULIA_PETSC_LIBRARY: "/opt/petsc/3.15.4/lib/libpetsc" strategy: fail-fast: false matrix: @@ -68,10 +69,33 @@ jobs: make --quiet make --quiet install rm -rf $ROOT_DIR/$TAR_FILE $SOURCES_DIR - cd $CURR_DIR + cd $CURR_DIR + - name: Install petsc + ##if: steps.cache-petsc.outputs.cache-hit != 'true' + run: | + CURR_DIR=$(pwd) + PACKAGE=petsc + VERSION=3.18 + INSTALL_ROOT=/opt + PETSC_INSTALL=$INSTALL_ROOT/$PACKAGE/$VERSION + TAR_FILE=$PACKAGE-$VERSION.tar.gz + URL="https://ftp.mcs.anl.gov/pub/petsc/release-snapshots/" + ROOT_DIR=/tmp + SOURCES_DIR=$ROOT_DIR/$PACKAGE-$VERSION + BUILD_DIR=$SOURCES_DIR/build + wget -q $URL/$TAR_FILE -O $ROOT_DIR/$TAR_FILE + mkdir -p $SOURCES_DIR + tar xzf $ROOT_DIR/$TAR_FILE -C $SOURCES_DIR --strip-components=1 + cd $SOURCES_DIR + ./configure --prefix=$PETSC_INSTALL --with-cc=mpicc --with-cxx=mpicxx --with-fc=mpif90 \ + --download-mumps --download-scalapack --download-parmetis --download-metis \ + --download-ptscotch --with-debugging --with-x=0 --with-shared=1 \ + --with-mpi=1 --with-64-bit-indices + make + make install - uses: julia-actions/julia-buildpkg@latest - - run: echo $PWD - - run: julia --project=. --color=yes -e 'using Pkg; Pkg.instantiate()' + - run: echo $PWD + - run: julia --project=. -e 'using Pkg; Pkg.instantiate(); Pkg.build(); Pkg.precompile()' - run: julia --project=. --color=yes --check-bounds=yes test/runtests.jl - uses: codecov/codecov-action@v1 with: diff --git a/test/mpi/MUMPSSolversTests.jl b/test/mpi/MUMPSSolversTests.jl new file mode 100644 index 00000000..df5f4c62 --- /dev/null +++ b/test/mpi/MUMPSSolversTests.jl @@ -0,0 +1,81 @@ +module RichardsonSmoothersTests + +using Test +using MPI +using Gridap +using GridapDistributed +using PartitionedArrays +using GridapP4est +using IterativeSolvers + +using GridapSolvers +using GridapSolvers.LinearSolvers + +using GridapPETSc + +function set_ksp_options(ksp) + pc = Ref{GridapPETSc.PETSC.PC}() + mumpsmat = Ref{GridapPETSc.PETSC.Mat}() + @check_error_code GridapPETSc.PETSC.KSPView(ksp[],C_NULL) + @check_error_code GridapPETSc.PETSC.KSPSetType(ksp[],GridapPETSc.PETSC.KSPPREONLY) + @check_error_code GridapPETSc.PETSC.KSPGetPC(ksp[],pc) + @check_error_code GridapPETSc.PETSC.PCSetType(pc[],GridapPETSc.PETSC.PCLU) + @check_error_code GridapPETSc.PETSC.PCFactorSetMatSolverType(pc[],GridapPETSc.PETSC.MATSOLVERMUMPS) + @check_error_code GridapPETSc.PETSC.PCFactorSetUpMatSolverType(pc[]) + @check_error_code GridapPETSc.PETSC.PCFactorGetMatrix(pc[],mumpsmat) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 4, 1) + # percentage increase in the estimated working space + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 14, 1000) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 28, 2) + @check_error_code GridapPETSc.PETSC.MatMumpsSetIcntl(mumpsmat[], 29, 2) + @check_error_code GridapPETSc.PETSC.MatMumpsSetCntl(mumpsmat[], 3, 1.0e-6) +end + +function main(parts,partition) + GridapPETSc.with() do + domain = (0,1,0,1) + model = CartesianDiscreteModel(parts,domain,partition) + + sol(x) = x[1] + x[2] + f(x) = -Δ(sol)(x) + + order = 1 + qorder = order*2 + 1 + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe,conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(Vh,sol) + u = interpolate(sol,Uh) + + Ω = Triangulation(model) + dΩ = Measure(Ω,qorder) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(v⋅f)*dΩ + + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) + + P = PETScLinearSolver(set_ksp_options) + ss = symbolic_setup(P,A) + ns = numerical_setup(ss,A) + + x = PVector(1.0,A.cols) + solve!(x,ns,b) + + u = interpolate(sol,Uh) + uh = FEFunction(Uh,x) + eh = uh - u + E = sum(∫(eh*eh)*dΩ) + if GridapP4est.i_am_main(parts) + println("L2 Error: ", E) + end + + @test E < 1.e-8 + end +end + +partition = (32,32) +ranks = (2,2) +prun(main,mpi,ranks,partition) +MPI.Finalize() + +end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index a28f3c63..13825054 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -35,7 +35,9 @@ function run_tests(testdir) "GMGLinearSolversPoissonTests.jl", "GMGLinearSolversLaplacianTests.jl", "GMGLinearSolversVectorLaplacianTests.jl", - "GMGLinearSolversHDivRTTests.jl"] + "GMGLinearSolversHDivRTTests.jl", + "MUMPSSolversTests.jl", + "GMGLinearSolversMUMPSTests.jl"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] From 42529532f3a05043a0b7d534ff2a40360028d631 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 21 Dec 2022 00:17:33 +0100 Subject: [PATCH 50/95] Refactored ModelHierarchies constructor to include coarsening option --- .github/workflows/CI.yml | 2 +- Manifest.toml | 4 +- src/GridapSolvers.jl | 2 +- src/LinearSolvers/PMGLinearSolvers.jl | 181 ++++++++++++++++++ src/MultilevelTools/ModelHierarchies.jl | 70 ++++++- src/MultilevelTools/MultilevelTools.jl | 1 + .../PartitionedArraysExtensions.jl | 39 ++-- .../DistributedGridTransferOperatorsTests.jl | 14 +- test/mpi/GMGLinearSolversHDivRTTests.jl | 6 +- test/mpi/GMGLinearSolversLaplacianTests.jl | 6 +- test/mpi/GMGLinearSolversMUMPSTests.jl | 7 +- test/mpi/GMGLinearSolversPoissonTests.jl | 6 +- .../GMGLinearSolversVectorLaplacianTests.jl | 6 +- test/mpi/ModelHierarchiesTests.jl | 32 +++- test/mpi/RedistributeToolsTests.jl | 12 +- test/mpi/RefinementToolsTests.jl | 15 +- 16 files changed, 331 insertions(+), 72 deletions(-) create mode 100644 src/LinearSolvers/PMGLinearSolvers.jl diff --git a/.github/workflows/CI.yml b/.github/workflows/CI.yml index 26e3120b..41bd4b39 100644 --- a/.github/workflows/CI.yml +++ b/.github/workflows/CI.yml @@ -17,7 +17,7 @@ jobs: env: JULIA_MPI_BINARY: "system" P4EST_ROOT_DIR: "/opt/p4est/2.2/" - JULIA_PETSC_LIBRARY: "/opt/petsc/3.15.4/lib/libpetsc" + JULIA_PETSC_LIBRARY: "/opt/petsc/3.18/lib/libpetsc" strategy: fail-fast: false matrix: diff --git a/Manifest.toml b/Manifest.toml index 8ce969f4..1e7082f7 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -209,7 +209,7 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DataStructures", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "e29f6aa54e749cfe3f5cf903d7b1c4543c177ade" +git-tree-sha1 = "fc037f1500908743c1447490173bd6c2290e069e" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -225,7 +225,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "f8843e24925bd7ef3cd0f923316b2d806c65b258" +git-tree-sha1 = "1af540ef833c94dcc557d6863833bfd506f321ab" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 4cd7d383..595cbe8e 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -7,7 +7,7 @@ module GridapSolvers using GridapSolvers.LinearSolvers # MultilevelTools - export get_parts, generate_level_parts + export get_parts, generate_level_parts, generate_subparts export ModelHierarchy export num_levels, get_level, get_level_parts diff --git a/src/LinearSolvers/PMGLinearSolvers.jl b/src/LinearSolvers/PMGLinearSolvers.jl new file mode 100644 index 00000000..db7c669c --- /dev/null +++ b/src/LinearSolvers/PMGLinearSolvers.jl @@ -0,0 +1,181 @@ +""" + struct PMG{S1,S2,CS} <: Gridap.Algebra.LinearSolver + +Implementation of a P-MultiGrid solver. + +Through the constructor kwargs, one can specify the smoothers and +solver used for the pre, post smoothing steps and the coarsest level solve. +""" +struct PMG{S1,S2,CS} <: Gridap.Algebra.LinearSolver + sh ::FESpaceHierarchy + pre_smoother ::S1 + post_smoother ::S2 + coarse_solver ::CS + rtol ::Float64 + maxiter ::Int + verbose ::Bool + mode ::Symbol +end + +function PMG( + sh::FESpaceHierarchy; + pre_smoother=JacobiSmoother(5), + post_smoother=pre_smoother, + coarse_solver=BackslashSolver(), + rtol=1.0e-6, + maxiter=1000, + verbose=false, + mode=:preconditioner) + Gridap.Helpers.@check mode==:preconditioner || mode==:solver + return PMG(sh,pre_smoother,post_smoother,coarse_solver,rtol,maxiter,verbose,mode) +end + +struct PMGSymbolicSetup{S1,S2,CS} <: Gridap.Algebra.SymbolicSetup + pmg :: PMG + ss_pre_smoothers :: Vector{S1} + ss_post_smoothers :: Vector{S2} + ss_coarse_solver :: CS +end + +function Gridap.Algebra.symbolic_setup(pmg::PMG, sysmats) + nlev = get_num_levels(pmg.sh) + + ss_pre_smoothers = map(mat -> symbolic_setup(pmg.pre_smoother,mat),sysmats[1:nlev-1]) + if pmg.post_smoother === pmg.pre_smoother + ss_post_smoothers = ss_pre_smoothers + else + ss_post_smoothers = map(mat -> symbolic_setup(pmg.post_smoother,mat),sysmats[1:nlev-1]) + end + ss_coarse_solver = symbolic_setup(pmg.coarse_solver,sysmats[nlev]) + + return PMGSymbolicSetup(pmg,ss_pre_smoothers,ss_post_smoothers,ss_coarse_solver) +end + +mutable struct PMGNumericalSetup{M,C,T,S1,S2,CS} <: Gridap.Algebra.NumericalSetup + pmg :: PMG + sysmats :: Vector{M} + caches :: Vector{C} + transfers :: Vector{T} + ns_pre_smoothers :: Vector{S1} + ns_post_smoothers :: Vector{S2} + ns_coarse_solver :: CS +end + +function get_pmg_caches(lev::Int, sysmats, sh::FESpaceHierarchy) + nlev = length(sysmats) + Adxh = fill(0.0,size(sysmats[lev],1)) + dxh = fill(0.0,size(sysmats[lev],2)) + if (lev != nlev) # Not the coarsest level + dxH = fill(0.0,size(sysmats[lev+1],2)) + rH = fill(0.0,size(sysmats[lev+1],2)) + else + dxH, rH = nothing, nothing + end + return Adxh, dxh, dxH, rH +end + +function get_pmg_caches(lev::Int, sysmats::Vector{T}, sh::FESpaceHierarchy) where T <: PSparseMatrix + nlev = length(sysmats) + Adxh = PVector(0.0,sysmats[lev].rows) + dxh = PVector(0.0,sysmats[lev].cols) + if (lev != nlev) # Not the coarsest level + dxH = PVector(0.0,sysmats[lev+1].cols) + rH = PVector(0.0,sysmats[lev+1].rows) + else + dxH, rH = nothing, nothing + end + return Adxh, dxh, dxH, rH +end + +function Gridap.Algebra.numerical_setup(ss::PMGSymbolicSetup, sysmats) + pmg = ss.pmg + nlev = get_num_levels(pmg.sh) + + # Caches + caches = map(k -> get_pmg_caches(k,sysmats,pmg.sh), collect(1:nlev)) + + # Transfer Operators + transfers = get_transfer_operators(pmg.sh) + + # Smoother/Solvers setups + ns_pre_smoothers = map((ss,mat) -> numerical_setup(ss,mat),ss.ss_pre_smoothers,sysmats[1:nlev-1]) + if pmg.post_smoother === pmg.pre_smoother + ns_post_smoothers = ns_pre_smoothers + else + ns_post_smoothers = map((ss,mat) -> numerical_setup(ss,mat),ss.ss_post_smoothers,sysmats[1:nlev-1]) + end + ns_coarse_solver = numerical_setup(ss.ss_coarse_solver,sysmats[nlev]) + + return PMGNumericalSetup(pmg,sysmats,caches,transfers,ns_pre_smoothers,ns_post_smoothers,ns_coarse_solver) +end + +function solve!(x::AbstractVector,ns::PMGNumericalSetup,b::AbstractVector) + maxiter = ns.pmg.maxiter + rtol = ns.pmg.rtol + verbose = ns.pmg.verbose + mode = ns.pmg.mode + + if mode == :preconditioner + fill!(x,0.0) + r = copy(b) + else + A = ns.sysmats[1] + r = similar(b); mul!(r,A,x); r .= b .- r + end + + iter = 0 + err = 1.0 + nrm_r0 = norm(r) + verbose && println("> PMG: Starting convergence loop.") + while err > rtol && iter < maxiter + solve!(1,x,ns,r) + + nrm_r = norm(r) + err = nrm_r/nrm_r0 + verbose && println(" > Iteration ", iter, ": (eAbs, eRel) = (", nrm_r, " , ", err, ")") + iter = iter + 1 + end + + converged = (err < rtol) + return iter, converged +end + +function solve!(lev::Int,xh::AbstractVector,ns::PMGNumericalSetup,rh::AbstractVector) + nlev = get_num_levels(ns.pmg.sh) + + ### Coarsest level + if (lev == nlev) + solve!(xh,ns.ns_coarse_solver,rh) + return + end + + ### Fine levels + Ah = ns.sysmats[lev] + Adxh, dxh, dxH, rH = ns.caches[lev] + R, Rt = ns.transfers[lev] + + # Pre-smooth current solution + solve!(xh, ns.ns_pre_smoothers[lev], rh) + + # Restrict the residual + mul!(rH, R, rh) + + # Apply next level + fill!(dxH,0.0) + solve!(lev+1,dxH,ns,rH) + + # Interpolate dxH in finer space + mul!(dxh, Rt, dxH) + + # Update solution & residual + xh .= xh .+ dxh + mul!(Adxh, Ah, dxh) + rh .= rh .- Adxh + + # Post-smooth current solution + solve!(xh, ns.ns_post_smoothers[lev], rh) +end + +function LinearAlgebra.ldiv!(x::AbstractVector,ns::PMGNumericalSetup,b::AbstractVector) + solve!(x,ns,b) +end diff --git a/src/MultilevelTools/ModelHierarchies.jl b/src/MultilevelTools/ModelHierarchies.jl index 98b6f2a2..76cb3f8b 100644 --- a/src/MultilevelTools/ModelHierarchies.jl +++ b/src/MultilevelTools/ModelHierarchies.jl @@ -25,6 +25,7 @@ end num_levels(a::ModelHierarchy) = length(a.levels) get_level(a::ModelHierarchy,level::Integer) = a.levels[level] +get_level_parts(a::ModelHierarchy) = a.level_parts get_level_parts(a::ModelHierarchy,level::Integer) = a.level_parts[level] get_model(a::ModelHierarchy,level::Integer) = get_model(get_level(a,level)) @@ -45,11 +46,37 @@ has_redistribution(a::ModelHierarchyLevel{A,B,C,Nothing}) where {A,B,C} = false each level into. We need `num_procs_x_level[end]` to be equal to the number of parts of `model`. """ -function ModelHierarchy(coarsest_model::GridapDistributed.AbstractDistributedDiscreteModel,level_parts; num_refs_x_level=nothing) +function ModelHierarchy(root_parts ::AbstractPData, + model ::GridapDistributed.AbstractDistributedDiscreteModel, + num_procs_x_level ::Vector{<:Integer}; + kwargs...) + + # Request correct number of parts from MAIN + model_parts = get_parts(model) + my_num_parts = map_parts(root_parts) do _p + num_parts(model_parts) # == -1 if !i_am_in(my_parts) + end + main_num_parts = get_main_part(my_num_parts) + + if main_num_parts == num_procs_x_level[end] # Coarsest model + return create_model_hierarchy_by_refinement(root_parts,model,num_procs_x_level;kwargs...) + end + if main_num_parts == num_procs_x_level[1] # Finest model + return create_model_hierarchy_by_coarsening(root_parts,model,num_procs_x_level;kwargs...) + end + @error "Model parts do not correspond to coarsest or finest parts!" +end + +function create_model_hierarchy_by_refinement(root_parts::AbstractPData, + coarsest_model::GridapDistributed.AbstractDistributedDiscreteModel, + num_procs_x_level ::Vector{<:Integer}; + num_refs_x_level=nothing) # TODO: Implement support for num_refs_x_level? (future work) - num_levels = length(level_parts) - num_procs_x_level = map(num_parts,level_parts) + num_levels = length(num_procs_x_level) + level_parts = Vector{typeof(root_parts)}(undef,num_levels) meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + + level_parts[num_levels] = get_parts(coarsest_model) meshes[num_levels] = ModelHierarchyLevel(num_levels,coarsest_model,nothing,nothing,nothing) for i = num_levels-1:-1:1 @@ -57,9 +84,11 @@ function ModelHierarchy(coarsest_model::GridapDistributed.AbstractDistributedDis if (num_procs_x_level[i] != num_procs_x_level[i+1]) # meshes[i+1].model is distributed among P processors # model_ref is distributed among Q processors, with P!=Q + level_parts[i] = generate_subparts(root_parts,num_procs_x_level[i]) model_ref,ref_glue = Gridap.Adaptivity.refine(modelH) - model_red,red_glue = redistribute(model_ref,level_parts[i]) + model_red,red_glue = GridapDistributed.redistribute(model_ref,level_parts[i]) else + level_parts[i] = level_parts[i+1] model_ref,ref_glue = Gridap.Adaptivity.refine(modelH) model_red,red_glue = nothing,nothing end @@ -70,6 +99,39 @@ function ModelHierarchy(coarsest_model::GridapDistributed.AbstractDistributedDis return convert_to_refined_models(mh) end +function create_model_hierarchy_by_coarsening(root_parts::AbstractPData, + finest_model::GridapDistributed.AbstractDistributedDiscreteModel, + num_procs_x_level ::Vector{<:Integer}; + num_refs_x_level=nothing) + # TODO: Implement support for num_refs_x_level? (future work) + num_levels = length(num_procs_x_level) + level_parts = Vector{typeof(root_parts)}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + + level_parts[1] = get_parts(finest_model) + model = finest_model + for i = 1:num_levels-1 + if (num_procs_x_level[i] != num_procs_x_level[i+1]) + level_parts[i+1] = generate_subparts(root_parts,num_procs_x_level[i]) + model_red = model + model_ref,red_glue = GridapDistributed.redistribute(model_red,level_parts[i+1]) + model_H ,ref_glue = Gridap.Adaptivity.coarsen(model_ref) + else + level_parts[i+1] = level_parts[i] + model_red = nothing + model_ref,red_glue = model, nothing + model_H ,ref_glue = Gridap.Adaptivity.coarsen(model_ref) + end + model = model_H + meshes[i] = ModelHierarchyLevel(i,model_ref,ref_glue,model_red,red_glue) + end + + meshes[num_levels] = ModelHierarchyLevel(num_levels,model,nothing,nothing,nothing) + + mh = ModelHierarchy(level_parts,meshes) + return convert_to_refined_models(mh) +end + function convert_to_refined_models(mh::ModelHierarchy) nlevs = num_levels(mh) levels = Vector{ModelHierarchyLevel}(undef,nlevs) diff --git a/src/MultilevelTools/MultilevelTools.jl b/src/MultilevelTools/MultilevelTools.jl index 9d4f2f32..ef15f0ba 100644 --- a/src/MultilevelTools/MultilevelTools.jl +++ b/src/MultilevelTools/MultilevelTools.jl @@ -21,6 +21,7 @@ import GridapP4est: i_am_in, i_am_main export change_parts export generate_level_parts +export generate_subparts export redistribute_fe_function export redistribute_free_values! diff --git a/src/MultilevelTools/PartitionedArraysExtensions.jl b/src/MultilevelTools/PartitionedArraysExtensions.jl index 51c80e41..102212e0 100644 --- a/src/MultilevelTools/PartitionedArraysExtensions.jl +++ b/src/MultilevelTools/PartitionedArraysExtensions.jl @@ -80,27 +80,34 @@ function PartitionedArrays.prun(driver::Function,b::MPIBackend,nparts::Union{Int # MPI.jl, which registers MPI.Finalize() in atexit() end -function generate_level_parts(parts,num_procs_x_level) - root_comm = parts.comm +function generate_subparts(root_parts::AbstractPData,subpart_size::Integer) + root_comm = root_parts.comm rank = MPI.Comm_rank(root_comm) size = MPI.Comm_size(root_comm) - Gridap.Helpers.@check all(num_procs_x_level .<= size) - Gridap.Helpers.@check all(num_procs_x_level .>= 1) + Gridap.Helpers.@check all(subpart_size .<= size) + Gridap.Helpers.@check all(subpart_size .>= 1) + if rank < subpart_size + comm = MPI.Comm_split(root_comm, 0, 0) + else + comm = MPI.Comm_split(root_comm, MPI.MPI_UNDEFINED, MPI.MPI_UNDEFINED) + end + return get_part_ids(comm) +end + +function generate_level_parts(root_parts::AbstractPData,last_level_parts::AbstractPData,level_parts_size::Integer) + if level_parts_size == num_parts(last_level_parts) + return last_level_parts + end + return generate_subparts(root_parts,level_parts_size) +end + +function generate_level_parts(root_parts::AbstractPData,num_procs_x_level::Vector{<:Integer}) num_levels = length(num_procs_x_level) level_parts = Vector{typeof(parts)}(undef,num_levels) - for l = 1:num_levels - lsize = num_procs_x_level[l] - if l>1 && lsize==num_procs_x_level[l-1] - level_parts[l] = level_parts[l-1] - else - if rank < lsize - comm = MPI.Comm_split(root_comm, 0, 0) - else - comm = MPI.Comm_split(root_comm, MPI.MPI_UNDEFINED, MPI.MPI_UNDEFINED) - end - level_parts[l] = get_part_ids(comm) - end + level_parts[1] = generate_subparts(root_parts,num_procs_x_level[1]) + for l = 2:num_levels + level_parts[l] = generate_level_parts(root_parts,level_parts[l-1],num_procs_x_level[l]) end return level_parts end diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index 95199672..cc901768 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -17,17 +17,13 @@ function model_hierarchy_free!(mh::ModelHierarchy) end end -function run(parts,num_parts_x_level,num_trees,num_refs_coarse) +function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) domain = (0,1,0,1) - cmodel = CartesianDiscreteModel(domain,num_trees) - num_levels = length(num_parts_x_level) - level_parts = generate_level_parts(parts,num_parts_x_level) - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) - - old_parts = level_parts[2] - new_parts = level_parts[1] + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) # Create Operators: order = 1 diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index 08eba8ea..158d094c 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -21,10 +21,10 @@ f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) domain = (0,1,0,1) num_levels = length(num_parts_x_level) - level_parts = generate_level_parts(parts,num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) qdegree = 2*(order+1) reffe = ReferenceFE(raviart_thomas,Float64,order) diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index b6ae704c..4e05d7a7 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -21,10 +21,10 @@ f(x) = -Δ(u)(x) function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) domain = (0,1,0,1) num_levels = length(num_parts_x_level) - level_parts = generate_level_parts(parts,num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) qdegree = 2*(order+1) reffe = ReferenceFE(lagrangian,Float64,order) diff --git a/test/mpi/GMGLinearSolversMUMPSTests.jl b/test/mpi/GMGLinearSolversMUMPSTests.jl index fb4c0e7f..970aeec2 100644 --- a/test/mpi/GMGLinearSolversMUMPSTests.jl +++ b/test/mpi/GMGLinearSolversMUMPSTests.jl @@ -41,10 +41,10 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, GridapPETSc.with() do domain = (0,1,0,1) num_levels = length(num_parts_x_level) - level_parts = generate_level_parts(parts,num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) qdegree = 2*(order+1) reffe = ReferenceFE(lagrangian,Float64,order) @@ -67,6 +67,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, pre_smoothers=smoothers, post_smoothers=smoothers, coarsest_solver=mumps_solver, + maxiter=1, rtol=1.0e-10, verbose=false, mode=:preconditioner) diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index 89347ad4..a28d6d7b 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -21,10 +21,10 @@ f(x) = -Δ(u)(x) function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order) domain = (0,1,0,1) num_levels = length(num_parts_x_level) - level_parts = generate_level_parts(parts,num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) qdegree = 2*(order+1) reffe = ReferenceFE(lagrangian,Float64,order) diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl index 184fcfd9..68c44cc7 100644 --- a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -21,10 +21,10 @@ f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) domain = (0,1,0,1) num_levels = length(num_parts_x_level) - level_parts = generate_level_parts(parts,num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) qdegree = 2*(order+1) reffe = ReferenceFE(lagrangian,VectorValue{2,Float64},order) diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index d9e70677..8c54cfb5 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -18,14 +18,28 @@ function model_hierarchy_free!(mh::ModelHierarchy) end end -function main(parts,num_parts_x_level,num_trees,num_refs_coarse) - domain = (0,1,0,1) - cmodel = CartesianDiscreteModel(domain,num_trees) - +function main(parts,num_parts_x_level) + # Start from coarse, refine models + """ + domain = (0,1,0,1) num_levels = length(num_parts_x_level) - level_parts = generate_level_parts(parts,num_parts_x_level) - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,(2,2)) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,2) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,1) + tests = TestFESpace(mh,reffe,conformity=:H1) + trials = TrialFESpace(tests,sol) + """ + + # Start from fine, coarsen models + domain = (0,1,0,1) + fparts = generate_subparts(parts,num_parts_x_level[1]) + fmodel = CartesianDiscreteModel(domain,(2^8,2^8)) + fine_model = OctreeDistributedDiscreteModel(fparts,fmodel) + mh = ModelHierarchy(parts,fine_model,num_parts_x_level) sol(x) = x[1] + x[2] reffe = ReferenceFE(lagrangian,Float64,1) @@ -36,11 +50,9 @@ function main(parts,num_parts_x_level,num_trees,num_refs_coarse) end num_parts_x_level = [4,4,2,2] # Procs in each refinement level -num_trees = (1,1) # Number of initial P4est trees -num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] -prun(main,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) +prun(main,mpi,ranks,num_parts_x_level) MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index 971e4993..0e80d95a 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -17,15 +17,15 @@ function model_hierarchy_free!(mh::ModelHierarchy) end end -function run(parts,num_parts_x_level,num_trees,num_refs_coarse) +function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) domain = (0,1,0,1) - cmodel = CartesianDiscreteModel(domain,num_trees) - num_levels = length(num_parts_x_level) - level_parts = generate_level_parts(parts,num_parts_x_level) - coarse_model = OctreeDistributedDiscreteModel(level_parts[num_levels],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + level_parts = get_level_parts(mh) old_parts = level_parts[2] new_parts = level_parts[1] diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 4e5d3b0b..a5ebc40e 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -10,14 +10,13 @@ using IterativeSolvers using GridapSolvers using GridapSolvers.MultilevelTools -function run(parts,num_parts_x_level,num_trees,num_refs_coarse) +function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) domain = (0,1,0,1) - cmodel = CartesianDiscreteModel(domain,num_trees) - - nlevs = length(num_parts_x_level) - level_parts = generate_level_parts(parts,num_parts_x_level) - coarse_model = OctreeDistributedDiscreteModel(level_parts[nlevs],cmodel,num_refs_coarse) - mh = ModelHierarchy(coarse_model,level_parts) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) # FE Spaces order = 1 @@ -27,7 +26,7 @@ function run(parts,num_parts_x_level,num_trees,num_refs_coarse) trials = TrialFESpace(tests,sol) quad_order = 3*order+1 - for lev in 1:nlevs-1 + for lev in 1:num_levels-1 fparts = get_level_parts(mh,lev) cparts = get_level_parts(mh,lev+1) From 1f0e545345664d76869ff1c65a9581040093367b Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 21 Dec 2022 00:50:40 +0100 Subject: [PATCH 51/95] bugfix --- src/MultilevelTools/ModelHierarchies.jl | 2 +- test/mpi/ModelHierarchiesTests.jl | 6 ++---- 2 files changed, 3 insertions(+), 5 deletions(-) diff --git a/src/MultilevelTools/ModelHierarchies.jl b/src/MultilevelTools/ModelHierarchies.jl index 76cb3f8b..0ba050c2 100644 --- a/src/MultilevelTools/ModelHierarchies.jl +++ b/src/MultilevelTools/ModelHierarchies.jl @@ -112,7 +112,7 @@ function create_model_hierarchy_by_coarsening(root_parts::AbstractPData, model = finest_model for i = 1:num_levels-1 if (num_procs_x_level[i] != num_procs_x_level[i+1]) - level_parts[i+1] = generate_subparts(root_parts,num_procs_x_level[i]) + level_parts[i+1] = generate_subparts(root_parts,num_procs_x_level[i+1]) model_red = model model_ref,red_glue = GridapDistributed.redistribute(model_red,level_parts[i+1]) model_H ,ref_glue = Gridap.Adaptivity.coarsen(model_ref) diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index 8c54cfb5..7742b077 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -20,7 +20,6 @@ end function main(parts,num_parts_x_level) # Start from coarse, refine models - """ domain = (0,1,0,1) num_levels = length(num_parts_x_level) cparts = generate_subparts(parts,num_parts_x_level[num_levels]) @@ -32,13 +31,12 @@ function main(parts,num_parts_x_level) reffe = ReferenceFE(lagrangian,Float64,1) tests = TestFESpace(mh,reffe,conformity=:H1) trials = TrialFESpace(tests,sol) - """ # Start from fine, coarsen models domain = (0,1,0,1) fparts = generate_subparts(parts,num_parts_x_level[1]) - fmodel = CartesianDiscreteModel(domain,(2^8,2^8)) - fine_model = OctreeDistributedDiscreteModel(fparts,fmodel) + fmodel = CartesianDiscreteModel(domain,(2,2)) + fine_model = OctreeDistributedDiscreteModel(fparts,fmodel,8) mh = ModelHierarchy(parts,fine_model,num_parts_x_level) sol(x) = x[1] + x[2] From 10bc579181e14c4864f7feeb4ed371acf418f12e Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Sat, 24 Dec 2022 19:56:12 +0100 Subject: [PATCH 52/95] Added option to be able to interpolate in both direction --- Manifest.toml | 8 +- .../DistributedGridTransferOperators.jl | 72 ++++-- .../GridapDistributedExtensions.jl | 1 - src/MultilevelTools/GridapFixes.jl | 206 +++--------------- src/MultilevelTools/MultilevelTools.jl | 3 +- .../DistributedGridTransferOperatorsTests.jl | 10 +- 6 files changed, 97 insertions(+), 203 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 1e7082f7..ff97b2f6 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -209,7 +209,7 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DataStructures", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "fc037f1500908743c1447490173bd6c2290e069e" +git-tree-sha1 = "6a8d79e9dd4596248b5a1236ec73b61234a109df" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -225,7 +225,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "1af540ef833c94dcc557d6863833bfd506f321ab" +git-tree-sha1 = "914f28a332b2122d61fb22f37d7e215e9b4a72a0" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -435,9 +435,9 @@ version = "1.0.1" [[deps.NearestNeighbors]] deps = ["Distances", "StaticArrays"] -git-tree-sha1 = "440165bf08bc500b8fe4a7be2dc83271a00c0716" +git-tree-sha1 = "2c3726ceb3388917602169bed973dbc97f1b51a8" uuid = "b8a86587-4115-5ab1-83bc-aa920d37bbce" -version = "0.4.12" +version = "0.4.13" [[deps.NetworkOptions]] uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 1eb11911..d48241ce 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -1,15 +1,16 @@ -struct DistributedGridTransferOperator{T,R,A,B} +struct DistributedGridTransferOperator{T,R,M,A,B} sh :: A cache :: B - function DistributedGridTransferOperator(op_type::Symbol,redist::Bool,sh::FESpaceHierarchy,cache) + function DistributedGridTransferOperator(op_type::Symbol,redist::Bool,restriction_method::Symbol,sh::FESpaceHierarchy,cache) T = typeof(Val(op_type)) R = typeof(Val(redist)) + M = typeof(Val(restriction_method)) A = typeof(sh) B = typeof(cache) - new{T,R,A,B}(sh,cache) + new{T,R,M,A,B}(sh,cache) end end @@ -23,17 +24,19 @@ function ProlongationOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int;kwargs. return DistributedGridTransferOperator(lev,sh,qdegree,:prolongation;kwargs...) end -function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol;mode=:solution) +function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol; + mode::Symbol=:solution,restriction_method::Symbol=:projection) mh = sh.mh @check lev < num_levels(mh) @check op_type ∈ [:restriction, :prolongation] @check mode ∈ [:solution, :residual] + @check restriction_method ∈ [:projection, :interpolation] # Refinement - if (op_type == :restriction) - cache_refine = _get_restriction_cache(lev,sh,qdegree,mode) + if (op_type == :prolongation) || (restriction_method == :interpolation) + cache_refine = _get_interpolation_cache(lev,sh,qdegree,mode) else - cache_refine = _get_prolongation_cache(lev,sh,qdegree,mode) + cache_refine = _get_projection_cache(lev,sh,qdegree,mode) end # Redistribution @@ -41,10 +44,10 @@ function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree:: cache_redist = _get_redistribution_cache(lev,sh,mode) cache = cache_refine, cache_redist - return DistributedGridTransferOperator(op_type,redist,sh,cache) + return DistributedGridTransferOperator(op_type,redist,restriction_method,sh,cache) end -function _get_prolongation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode) +function _get_interpolation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode::Symbol) mh = sh.mh cparts = get_level_parts(mh,lev+1) @@ -68,7 +71,7 @@ function _get_prolongation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode return cache_refine end -function _get_restriction_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode) +function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode::Symbol) mh = sh.mh cparts = get_level_parts(mh,lev+1) @@ -100,7 +103,7 @@ function _get_restriction_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode) return cache_refine end -function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode) +function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol) mh = sh.mh redist = has_redistribution(mh,lev) if redist @@ -131,7 +134,7 @@ function setup_transfer_operators(sh::FESpaceHierarchy,qdegree::Int;kwargs...) return restrictions, prolongations end -### Applying the operators: +### Applying the operators: # A) Prolongation, without redistribution function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:prolongation},Val{false}},x::PVector) @@ -146,8 +149,21 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:p return y end -# B) Restriction, without redistribution -function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{false}},x::PVector) +# B.1) Restriction, without redistribution, by interpolation +function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{false},Val{:interpolation}},x::PVector) + cache_refine, cache_redist = A.cache + model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine + + copy!(fv_h,x) # Matrix layout -> FE layout + uh = FEFunction(Uh,fv_h,dv_h) + uH = interpolate!(uh,fv_H,UH) + copy!(y,fv_H) # FE layout -> Matrix layout + + return y +end + +# B.2) Restriction, without redistribution, by projection +function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{false},Val{:projection}},x::PVector) cache_refine, cache_redist = A.cache model_h, Uh, fv_h, dv_h, VH, AH, lH, xH = cache_refine @@ -167,7 +183,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:p model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine fv_h_red, dv_h_red, Uh_red, model_h_red, glue = cache_redist - # 1 - Solve c2f projection in coarse partition + # 1 - Interpolate in coarse partition if !isa(x,Nothing) copy!(fv_H,x) # Matrix layout -> FE layout uH = FEFunction(UH,fv_H,dv_H) @@ -181,8 +197,28 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:p return y end -# D) Restriction, with redistribution -function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true}},x::PVector) +# D.1) Restriction, with redistribution, by interpolation +function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true},Val{:interpolation}},x::PVector) + cache_refine, cache_redist = A.cache + model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine + fv_h_red, dv_h_red, Uh_red, model_h_red, glue = cache_redist + + # 1 - Redistribute from fine partition to coarse partition + copy!(fv_h_red,x) + redistribute_free_values!(fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) + + # 2 - Interpolate in coarse partition + if !isa(y,Nothing) + uh = FEFunction(Uh,fv_h,dv_h) + uH = interpolate!(uh,fv_H,UH) + copy!(y,fv_H) # FE layout -> Matrix layout + end + + return y +end + +# D.2) Restriction, with redistribution, by projection +function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true},Val{:projection}},x::PVector) cache_refine, cache_redist = A.cache model_h, Uh, fv_h, dv_h, VH, AH, lH, xH = cache_refine fv_h_red, dv_h_red, Uh_red, model_h_red, glue = cache_redist @@ -191,7 +227,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer copy!(fv_h_red,x) redistribute_free_values!(fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) - # 2 - Solve f2c projection in fine partition + # 2 - Solve f2c projection coarse partition if !isa(y,Nothing) uh = FEFunction(Uh,fv_h,dv_h) rhs(v) = lH(v,uh) diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index 3b485714..b58015c3 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -166,7 +166,6 @@ function FESpaces.get_triangulation(f::VoidDistributedFESpace,model::VoidDistrib return VoidDistributedTriangulation(model) end - # Void Gridap structures function void(::Type{<:CartesianDiscreteModel{Dc,Dp}}) where {Dc,Dp} diff --git a/src/MultilevelTools/GridapFixes.jl b/src/MultilevelTools/GridapFixes.jl index 2d7cec18..2689cdd2 100644 --- a/src/MultilevelTools/GridapFixes.jl +++ b/src/MultilevelTools/GridapFixes.jl @@ -1,193 +1,44 @@ -function Base.map(::typeof(Gridap.Arrays.testitem), - a::Tuple{<:AbstractVector{<:AbstractVector{<:VectorValue}},<:AbstractVector{<:Gridap.Fields.LinearCombinationFieldVector}}) - a2=Gridap.Arrays.testitem(a[2]) - a1=Vector{eltype(eltype(a[1]))}(undef,size(a2,1)) - a1.=zero(Gridap.Arrays.testitem(a1)) - (a1,a2) -end - -# Fixes Err3 (see below) -function Gridap.Geometry.is_change_possible( -strian::Gridap.Geometry.Triangulation, -ttrian::Gridap.Geometry.Triangulation) - if strian === ttrian || num_cells(strian)==num_cells(ttrian)==0 - return true - end - Gridap.Helpers.@check get_background_model(strian) === get_background_model(ttrian) "Triangulations do not point to the same background discrete model!" - D = num_cell_dims(strian) - sglue = get_glue(strian,Val(D)) - tglue = get_glue(ttrian,Val(D)) - Gridap.Geometry.is_change_possible(sglue,tglue) # Fails here -end - -# Fixes Err3 (see below) -function Gridap.CellData.change_domain(a::CellField, - ::ReferenceDomain, - ttrian::Gridap.Geometry.Triangulation, - ::ReferenceDomain) - msg = """\n - We cannot move the given CellField to the reference domain of the requested triangulation. - Make sure that the given triangulation is either the same as the triangulation on which the - CellField is defined, or that the latter triangulation is the background of the former. - """ - strian = get_triangulation(a) - if strian === ttrian || num_cells(strian)==num_cells(ttrian)==0 - return a - end - @assert Gridap.Geometry.is_change_possible(strian,ttrian) msg - D = num_cell_dims(strian) - sglue = get_glue(strian,Val(D)) - tglue = get_glue(ttrian,Val(D)) - Gridap.CellData.change_domain_ref_ref(a,ttrian,sglue,tglue) -end - -function Gridap.FESpaces.get_cell_fe_data(fun,f,ttrian) - sface_to_data = fun(f) - strian = get_triangulation(f) - if strian === ttrian || num_cells(strian)==num_cells(ttrian)==0 - return sface_to_data - end - @assert Gridap.Geometry.is_change_possible(strian,ttrian) - D = num_cell_dims(strian) - sglue = get_glue(strian,Val(D)) - tglue = get_glue(ttrian,Val(D)) - Gridap.FESpaces.get_cell_fe_data(fun,sface_to_data,sglue,tglue) -end - -function Gridap.Geometry.best_target(trian1::Gridap.Geometry.Triangulation,trian2::Gridap.Geometry.Triangulation) - if (num_cells(trian1)==num_cells(trian2)==0) - return trian1 - end - Gridap.Helpers.@check Gridap.Geometry.is_change_possible(trian1,trian2) - Gridap.Helpers.@check Gridap.Geometry.is_change_possible(trian2,trian1) - D1 = num_cell_dims(trian1) - D2 = num_cell_dims(trian2) - glue1 = get_glue(trian1,Val(D2)) - glue2 = get_glue(trian2,Val(D1)) - Gridap.Geometry.best_target(trian1,trian2,glue1,glue2) -end - - -function Gridap.Geometry.is_change_possible(strian::Gridap.Adaptivity.AdaptedTriangulation,ttrian::Gridap.Adaptivity.AdaptedTriangulation) - (strian === ttrian) && (return true) - (num_cells(strian)==num_cells(ttrian)==0) && (return true) - if (get_background_model(strian) === get_background_model(ttrian)) - return Gridap.Geometry.is_change_possible(strian.trian,ttrian.trian) - end - if typeof(strian.trian) == typeof(ttrian.trian) - smodel = Gridap.Adaptivity.get_adapted_model(strian) - tmodel = Gridap.Adaptivity.get_adapted_model(ttrian) - a = Gridap.Adaptivity.get_parent(tmodel) === Gridap.Adaptivity.get_model(smodel) # tmodel = refine(smodel) - b = Gridap.Adaptivity.get_parent(smodel) === Gridap.Adaptivity.get_model(tmodel) # smodel = refine(tmodel) - return a || b - end - @notimplemented - return false -end - -function Gridap.Geometry.is_change_possible(strian::Gridap.Adaptivity.AdaptedTriangulation,ttrian::Gridap.Geometry.Triangulation) - (num_cells(strian)==num_cells(ttrian)==0) && (return true) - if (get_background_model(strian) === get_background_model(ttrian)) - return Gridap.Geometry.is_change_possible(strian.trian,ttrian) - end - if typeof(strian.trian) == typeof(ttrian) - smodel = Gridap.Adaptivity.get_adapted_model(strian) - tmodel = get_background_model(ttrian) - return get_parent(smodel) === tmodel # smodel = refine(tmodel) - end - @notimplemented - return false -end -function Gridap.Geometry.is_change_possible(strian::Gridap.Geometry.Triangulation,ttrian::Gridap.Adaptivity.AdaptedTriangulation) - (num_cells(strian)==num_cells(ttrian)==0) && (return true) - if (get_background_model(strian) === get_background_model(ttrian)) - return Gridap.Geometry.is_change_possible(strian,ttrian.trian) - end - if typeof(strian) == typeof(ttrian.trian) - smodel = get_background_model(strian) - tmodel = Gridap.Adaptivity.get_adapted_model(ttrian) - return Gridap.Adaptivity.get_parent(tmodel) === smodel # tmodel = refine(smodel) - end - @notimplemented - return false -end -function Gridap.Geometry.best_target(strian::Gridap.Adaptivity.AdaptedTriangulation,ttrian::Gridap.Adaptivity.AdaptedTriangulation) - @check Gridap.Geometry.is_change_possible(strian,ttrian) - (num_cells(strian)==num_cells(ttrian)==0) && (return strian) +function Gridap.Adaptivity.change_domain_n2o(f_fine,ctrian::Gridap.Geometry.Triangulation{Dc},glue::Gridap.Adaptivity.AdaptivityGlue{<:Gridap.Adaptivity.RefinementGlue,Dc}) where Dc + @notimplementedif num_dims(ctrian) != Dc + msg = "Evaluating a fine CellField in the coarse mesh is costly! If you are using this feature + to integrate, consider using a CompositeMeasure instead (see test/AdaptivityTests/GridTransferTests.jl)." + @warn msg - (strian === ttrian) && (return ttrian) - if (get_background_model(strian) === get_background_model(ttrian)) - return Gridap.Geometry.best_target(strian.trian,ttrian.trian) - end - if typeof(strian.trian) == typeof(ttrian.trian) - smodel = Gridap.Adaptivity.get_adapted_model(strian) - tmodel = Gridap.Adaptivity.get_adapted_model(ttrian) - a = Gridap.Adaptivity.get_parent(tmodel) === Gridap.Adaptivity.get_model(smodel) # tmodel = refine(smodel) - a ? (return ttrian) : (return strian) - end - @notimplemented - return nothing -end - -function Gridap.Geometry.best_target(strian::Gridap.Adaptivity.AdaptedTriangulation,ttrian::Gridap.Geometry.Triangulation) - @check Gridap.Geometry.is_change_possible(strian,ttrian) - return strian -end + if (num_cells(ctrian) != 0) + # f_c2f[i_coarse] = [f_fine[i_fine_1], ..., f_fine[i_fine_nChildren]] + f_c2f = Gridap.Adaptivity.f2c_reindex(f_fine,glue) -function Gridap.Geometry.best_target(strian::Gridap.Geometry.Triangulation,ttrian::Gridap.Adaptivity.AdaptedTriangulation) - @check Gridap.Geometry.is_change_possible(strian,ttrian) - return ttrian -end - -function Gridap.CellData.change_domain(a::CellField,ttrian::Gridap.Adaptivity.AdaptedTriangulation,::ReferenceDomain) - strian = get_triangulation(a) - if (strian === ttrian) || (num_cells(strian)==num_cells(ttrian)==0) - return a - end - @assert Gridap.Geometry.is_change_possible(strian,ttrian) - if (get_background_model(strian) === get_background_model(ttrian)) - return Gridap.CellData.change_domain(a,ttrian.trian,ReferenceDomain()) + child_ids = Gridap.Adaptivity.f2c_reindex(glue.n2o_cell_to_child_id,glue) + rrules = Gridap.Adaptivity.get_old_cell_refinement_rules(glue) + f_coarse = lazy_map(Gridap.Adaptivity.FineToCoarseField,f_c2f,rrules,child_ids) + return Gridap.CellData.GenericCellField(f_coarse,ctrian,ReferenceDomain()) + else + f_coarse = Fill(Gridap.Fields.ConstantField(0.0),num_cells(ftrian)) + return Gridap.CellData.GenericCellField(f_coarse,ctrian,ReferenceDomain()) end - return Gridap.Adaptivity.change_domain_o2n(a,ttrian) end -function Gridap.CellData.change_domain(a::Gridap.CellData.OperationCellField,ttrian::Gridap.Adaptivity.AdaptedTriangulation,::ReferenceDomain) - strian = get_triangulation(a) - if (strian === ttrian) || (num_cells(strian)==num_cells(ttrian)==0) - return a - end - @assert Gridap.Geometry.is_change_possible(strian,ttrian) - if (get_background_model(strian) === get_background_model(ttrian)) - return Gridap.CellDatachange_domain(a,ttrian.trian,ReferenceDomain()) +function Gridap.Adaptivity.FineToCoarseField(fine_fields::AbstractArray{<:Gridap.Fields.Field},rrule::Gridap.Adaptivity.RefinementRule,child_ids::AbstractArray{<:Integer}) + fields = Vector{Gridap.Fields.Field}(undef,Gridap.Adaptivity.num_subcells(rrule)) + fields = fill!(fields,Gridap.Fields.ConstantField(0.0)) + for (k,id) in enumerate(child_ids) + fields[id] = fine_fields[k] end - return Gridap.Adaptivity.change_domain_o2n(a,ttrian) + return Gridap.Adaptivity.FineToCoarseField(fields,rrule) end -function Gridap.CellData.change_domain(a::CellField,ttrian::Gridap.Adaptivity.AdaptedTriangulation,::PhysicalDomain) - strian = get_triangulation(a) - if (strian === ttrian) || (num_cells(strian)==num_cells(ttrian)==0) - return a - end - @assert Gridap.Geometry.is_change_possible(strian,ttrian) - if (get_background_model(strian) === get_background_model(ttrian)) - return Gridap.Adaptivity.change_domain(a,ttrian.trian,PhysicalDomain()) - end - @notimplemented -end -function Gridap.Geometry.move_contributions(scell_to_val::AbstractArray, strian::Gridap.Adaptivity.AdaptedTriangulation, ttrian::Gridap.Geometry.Triangulation) - (num_cells(strian)==num_cells(ttrian)==0) && (return scell_to_val) - - smodel = Gridap.Adaptivity.get_adapted_model(strian) - @check Gridap.Adaptivity.get_parent(smodel) === get_background_model(ttrian) - tcell_to_val = Gridap.Geometry.move_contributions(scell_to_val,get_adaptivity_glue(smodel)) - return tcell_to_val +""" +function Base.map(::typeof(Gridap.Arrays.testitem), + a::Tuple{<:AbstractVector{<:AbstractVector{<:VectorValue}},<:AbstractVector{<:Gridap.Fields.LinearCombinationFieldVector}}) + a2=Gridap.Arrays.testitem(a[2]) + a1=Vector{eltype(eltype(a[1]))}(undef,size(a2,1)) + a1.=zero(Gridap.Arrays.testitem(a1)) + (a1,a2) end - - # This fix is required to be able to integrate in the overlapped mesh underlying patch smoothers function Gridap.Geometry.get_glue(trian::BodyFittedTriangulation{Dt},::Val{Dt}) where Dt tface_to_mface = trian.tface_to_mface @@ -201,3 +52,4 @@ function Gridap.Geometry.get_glue(trian::BodyFittedTriangulation{Dt},::Val{Dt}) end FaceToFaceGlue(tface_to_mface,tface_to_mface_map,mface_to_tface) end +""" \ No newline at end of file diff --git a/src/MultilevelTools/MultilevelTools.jl b/src/MultilevelTools/MultilevelTools.jl index ef15f0ba..6211d518 100644 --- a/src/MultilevelTools/MultilevelTools.jl +++ b/src/MultilevelTools/MultilevelTools.jl @@ -41,7 +41,7 @@ export mul! include("PartitionedArraysExtensions.jl") include("GridapDistributedExtensions.jl") -#include("GridapFixes.jl") +include("GridapFixes.jl") include("RefinementTools.jl") include("RedistributeTools.jl") include("ModelHierarchies.jl") @@ -49,6 +49,5 @@ include("FESpaceHierarchies.jl") include("DistributedGridTransferOperators.jl") - end diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index cc901768..ec5d86db 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -34,8 +34,10 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) trials = TrialFESpace(tests,u) qdegree = order*2+1 - ops = setup_transfer_operators(trials, qdegree) + ops = setup_transfer_operators(trials, qdegree; restriction_method=:projection) restrictions, prolongations = ops + ops2 = setup_transfer_operators(trials, qdegree; restriction_method=:interpolation) + restrictions2, prolongations2 = ops2 a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ l(v,dΩ) = ∫(v⋅u)*dΩ @@ -64,9 +66,15 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) R = restrictions[lev] mul!(yH,R,xh) + R2 = restrictions2[lev] + mul!(yH,R2,xh) + GridapP4est.i_am_main(parts_h) && println(" > Prolongation") P = prolongations[lev] mul!(yh,P,xH) + + P2 = prolongations2[lev] + mul!(yh,P2,xH) end end From 11e9d534d3bf900b00f2d6c41ced83695ee97bc8 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 27 Dec 2022 00:41:55 +0100 Subject: [PATCH 53/95] Changed number of mpi procs in tests --- test/runtests.jl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/test/runtests.jl b/test/runtests.jl index 13825054..e6f203f1 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -30,8 +30,8 @@ function run_tests(testdir) MPI.mpiexec() do cmd if f in ["DistributedGridTransferOperatorsTests.jl", "RedistributeToolsTests.jl", - "RefinementToolsTests", - "RichardsonSmoothersTests", + "RefinementToolsTests.jl", + "RichardsonSmoothersTests.jl", "GMGLinearSolversPoissonTests.jl", "GMGLinearSolversLaplacianTests.jl", "GMGLinearSolversVectorLaplacianTests.jl", From effc1324781084b51001095c13005c84e6b3d07b Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 27 Dec 2022 16:33:37 +0100 Subject: [PATCH 54/95] Added caches for redistribute_cell_dofs() --- .../DistributedGridTransferOperators.jl | 44 ++++++---- src/MultilevelTools/RedistributeTools.jl | 83 +++++++++++++++---- 2 files changed, 95 insertions(+), 32 deletions(-) diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index d48241ce..ed136b8e 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -41,7 +41,7 @@ function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree:: # Redistribution redist = has_redistribution(mh,lev) - cache_redist = _get_redistribution_cache(lev,sh,mode) + cache_redist = _get_redistribution_cache(lev,sh,mode,op_type,cache_refine) cache = cache_refine, cache_redist return DistributedGridTransferOperator(op_type,redist,restriction_method,sh,cache) @@ -103,20 +103,30 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: return cache_refine end -function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol) +function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol,op_type::Symbol,cache_refine) mh = sh.mh redist = has_redistribution(mh,lev) - if redist - Uh_red = get_fe_space(sh,lev) - model_h_red = get_model(mh,lev) - fv_h_red = PVector(0.0,Uh_red.gids) - dv_h_red = (mode == :solution) ? get_dirichlet_dof_values(Uh_red) : zero_dirichlet_values(Uh_red) - glue = mh.levels[lev].red_glue - - cache_redist = fv_h_red, dv_h_red, Uh_red, model_h_red, glue - else + if !redist cache_redist = nothing + return cache_redist end + + Uh_red = get_fe_space(sh,lev) + model_h_red = get_model(mh,lev) + fv_h_red = PVector(0.0,Uh_red.gids) + dv_h_red = (mode == :solution) ? get_dirichlet_dof_values(Uh_red) : zero_dirichlet_values(Uh_red) + glue = mh.levels[lev].red_glue + + if op_type == :prolongation + model_h, Uh, fv_h, dv_h = cache_refine + cache_exchange = get_redistribute_free_values_cache(fv_h_red,Uh_red,fv_h,dv_h,Uh,model_h_red,glue;reverse=false) + else + model_h, Uh, fv_h, dv_h = cache_refine + cache_exchange = get_redistribute_free_values_cache(fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) + end + + cache_redist = fv_h_red, dv_h_red, Uh_red, model_h_red, glue, cache_exchange + return cache_redist end @@ -181,7 +191,7 @@ end function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:prolongation},Val{true}},x::Union{PVector,Nothing}) cache_refine, cache_redist = A.cache model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine - fv_h_red, dv_h_red, Uh_red, model_h_red, glue = cache_redist + fv_h_red, dv_h_red, Uh_red, model_h_red, glue, cache_exchange = cache_redist # 1 - Interpolate in coarse partition if !isa(x,Nothing) @@ -191,7 +201,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:p end # 2 - Redistribute from coarse partition to fine partition - redistribute_free_values!(fv_h_red,Uh_red,fv_h,dv_h,Uh,model_h_red,glue;reverse=false) + redistribute_free_values!(cache_exchange,fv_h_red,Uh_red,fv_h,dv_h,Uh,model_h_red,glue;reverse=false) copy!(y,fv_h_red) # FE layout -> Matrix layout return y @@ -201,11 +211,11 @@ end function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true},Val{:interpolation}},x::PVector) cache_refine, cache_redist = A.cache model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine - fv_h_red, dv_h_red, Uh_red, model_h_red, glue = cache_redist + fv_h_red, dv_h_red, Uh_red, model_h_red, glue, cache_exchange = cache_redist # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - redistribute_free_values!(fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) + redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Interpolate in coarse partition if !isa(y,Nothing) @@ -221,11 +231,11 @@ end function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true},Val{:projection}},x::PVector) cache_refine, cache_redist = A.cache model_h, Uh, fv_h, dv_h, VH, AH, lH, xH = cache_refine - fv_h_red, dv_h_red, Uh_red, model_h_red, glue = cache_redist + fv_h_red, dv_h_red, Uh_red, model_h_red, glue, cache_exchange = cache_redist # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) - redistribute_free_values!(fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) + redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Solve f2c projection coarse partition if !isa(y,Nothing) diff --git a/src/MultilevelTools/RedistributeTools.jl b/src/MultilevelTools/RedistributeTools.jl index 93c0adb7..3905edff 100644 --- a/src/MultilevelTools/RedistributeTools.jl +++ b/src/MultilevelTools/RedistributeTools.jl @@ -32,7 +32,7 @@ function _update_cell_dof_values_with_local_info!(cell_dof_values_new, end end -function allocate_comm_data(num_dofs_x_cell,lids) +function _allocate_comm_data(num_dofs_x_cell,lids) map_parts(num_dofs_x_cell,lids) do num_dofs_x_cell,lids n = length(lids) ptrs = Vector{Int32}(undef,n+1) @@ -49,7 +49,7 @@ function allocate_comm_data(num_dofs_x_cell,lids) end end -function pack_snd_data!(snd_data,cell_dof_values,snd_lids) +function _pack_snd_data!(snd_data,cell_dof_values,snd_lids) map_parts(snd_data,cell_dof_values,snd_lids) do snd_data,cell_dof_values,snd_lids cache = array_cache(cell_dof_values) s = 1 @@ -67,7 +67,7 @@ function pack_snd_data!(snd_data,cell_dof_values,snd_lids) end end -function unpack_rcv_data!(cell_dof_values,rcv_data,rcv_lids) +function _unpack_rcv_data!(cell_dof_values,rcv_data,rcv_lids) map_parts(cell_dof_values,rcv_data,rcv_lids) do cell_dof_values,rcv_data,rcv_lids s = 1 for i = 1:length(rcv_lids.ptrs)-1 @@ -92,31 +92,58 @@ function get_glue_components(glue::GridapDistributed.RedistributeGlue,reverse::V return glue.lids_snd, glue.lids_rcv, glue.parts_snd, glue.parts_rcv, glue.old2new end -function num_dofs_x_cell(cell_dofs_array,lids) +function _num_dofs_x_cell(cell_dofs_array,lids) map_parts(cell_dofs_array,lids) do cell_dofs_array, lids data = [length(cell_dofs_array[i]) for i = 1:length(cell_dofs_array) ] PartitionedArrays.Table(data,lids.ptrs) end end +function get_redistribute_cell_dofs_cache(cell_dof_values_old, + cell_dof_ids_new, + model_new, + glue::GridapDistributed.RedistributeGlue; + reverse=false) + + lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) + + cell_dof_values_old = change_parts(cell_dof_values_old,get_parts(glue);default=[]) + cell_dof_ids_new = change_parts(cell_dof_ids_new,get_parts(glue);default=[[]]) + + num_dofs_x_cell_snd = _num_dofs_x_cell(cell_dof_values_old, lids_snd) + num_dofs_x_cell_rcv = _num_dofs_x_cell(cell_dof_ids_new, lids_rcv) + snd_data = _allocate_comm_data(num_dofs_x_cell_snd, lids_snd) + rcv_data = _allocate_comm_data(num_dofs_x_cell_rcv, lids_rcv) + + cell_dof_values_new = _allocate_cell_wise_dofs(cell_dof_ids_new) + + caches = snd_data, rcv_data, cell_dof_values_new + return caches +end function redistribute_cell_dofs(cell_dof_values_old, cell_dof_ids_new, model_new, glue::GridapDistributed.RedistributeGlue; reverse=false) + caches = get_redistribute_cell_dofs_cache(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) + return redistribute_cell_dofs!(caches,cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) +end + +function redistribute_cell_dofs!(caches, + cell_dof_values_old, + cell_dof_ids_new, + model_new, + glue::GridapDistributed.RedistributeGlue; + reverse=false) + snd_data, rcv_data, cell_dof_values_new = caches lids_rcv, lids_snd, parts_rcv, parts_snd, new2old = get_glue_components(glue,Val(reverse)) cell_dof_values_old = change_parts(cell_dof_values_old,get_parts(glue);default=[]) cell_dof_ids_new = change_parts(cell_dof_ids_new,get_parts(glue);default=[[]]) - num_dofs_x_cell_snd = num_dofs_x_cell(cell_dof_values_old, lids_snd) - num_dofs_x_cell_rcv = num_dofs_x_cell(cell_dof_ids_new, lids_rcv) - snd_data = allocate_comm_data(num_dofs_x_cell_snd, lids_snd) - rcv_data = allocate_comm_data(num_dofs_x_cell_rcv, lids_rcv) - - pack_snd_data!(snd_data,cell_dof_values_old,lids_snd) + _pack_snd_data!(snd_data,cell_dof_values_old,lids_snd) tout = async_exchange!(rcv_data, snd_data, @@ -125,8 +152,6 @@ function redistribute_cell_dofs(cell_dof_values_old, PartitionedArrays._empty_tasks(parts_rcv)) map_parts(schedule,tout) - cell_dof_values_new = _allocate_cell_wise_dofs(cell_dof_ids_new) - # We have to build the owned part of "cell_dof_values_new" out of # 1. cell_dof_values_old (for those cells s.t. new2old[:]!=0) # 2. cell_dof_values_new_rcv (for those cells s.t. new2old[:]=0) @@ -135,7 +160,7 @@ function redistribute_cell_dofs(cell_dof_values_old, new2old) map_parts(wait,tout) - unpack_rcv_data!(cell_dof_values_new,rcv_data,lids_rcv) + _unpack_rcv_data!(cell_dof_values_new,rcv_data,lids_rcv) # Now that every part knows it's new owned dofs, exchange ghosts new_parts = get_parts(model_new) @@ -148,6 +173,21 @@ function redistribute_cell_dofs(cell_dof_values_old, return cell_dof_values_new end +function get_redistribute_free_values_cache(fv_new::Union{PVector,Nothing}, + Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, + fv_old::Union{PVector,Nothing}, + dv_old::Union{AbstractPData,Nothing}, + Uh_old::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, + model_new, + glue::GridapDistributed.RedistributeGlue; + reverse=false) + cell_dof_values_old = !isa(fv_old,Nothing) ? map_parts(scatter_free_and_dirichlet_values,local_views(Uh_old),local_views(fv_old),dv_old) : nothing + cell_dof_ids_new = !isa(fv_new,Nothing) ? map_parts(get_cell_dof_ids, local_views(Uh_new)) : nothing + caches = get_redistribute_cell_dofs_cache(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) + + return caches +end + function redistribute_free_values!(fv_new::Union{PVector,Nothing}, Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, fv_old::Union{PVector,Nothing}, @@ -157,9 +197,23 @@ function redistribute_free_values!(fv_new::Union{PVector,Nothing}, glue::GridapDistributed.RedistributeGlue; reverse=false) + caches = get_redistribute_free_values_cache(fv_new,Uh_new,fv_old,dv_old,Uh_old,model_new,glue;reverse=reverse) + return redistribute_free_values!(caches,fv_new,Uh_new,fv_old,dv_old,Uh_old,model_new,glue;reverse=reverse) +end + +function redistribute_free_values!(caches, + fv_new::Union{PVector,Nothing}, + Uh_new::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, + fv_old::Union{PVector,Nothing}, + dv_old::Union{AbstractPData,Nothing}, + Uh_old::Union{GridapDistributed.DistributedSingleFieldFESpace,VoidDistributedFESpace}, + model_new, + glue::GridapDistributed.RedistributeGlue; + reverse=false) + cell_dof_values_old = !isa(fv_old,Nothing) ? map_parts(scatter_free_and_dirichlet_values,local_views(Uh_old),local_views(fv_old),dv_old) : nothing cell_dof_ids_new = !isa(fv_new,Nothing) ? map_parts(get_cell_dof_ids, local_views(Uh_new)) : nothing - cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) + cell_dof_values_new = redistribute_cell_dofs!(caches,cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) # Gather the new free dofs if !isa(fv_new,Nothing) @@ -189,7 +243,6 @@ function redistribute_fe_function(uh_old::Union{GridapDistributed.DistributedSin end end - function Gridap.FESpaces.gather_free_and_dirichlet_values(f::GridapDistributed.DistributedFESpace,cv) free_values, dirichlet_values = map_parts(local_views(f),cv) do f, cv Gridap.FESpaces.gather_free_and_dirichlet_values(f,cv) From bcc7072c2388d8ee231855d34d0ff6a872475eee Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 27 Dec 2022 17:42:09 +0100 Subject: [PATCH 55/95] Added more caches for projection rhs --- .../DistributedGridTransferOperators.jl | 28 ++++++++++++------- 1 file changed, 18 insertions(+), 10 deletions(-) diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index ed136b8e..b904af91 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -90,14 +90,20 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: aH(u,v) = ∫(v⋅u)*dΩH lH(v,uh) = ∫(v⋅uh)*dΩhH - AH = assemble_matrix(aH,UH,VH) + assem = SparseMatrixAssembler(UH,VH) + + AH = assemble_matrix(aH,assem,UH,VH) xH = PVector(0.0,AH.rows) - cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH + v = get_fe_basis(VH) + vec_data = collect_cell_vector(VH,lH(v,1.0)) + bH = allocate_vector(assem,vec_data) + + cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, assem else model_h = get_model_before_redist(mh,lev) Uh = get_fe_space_before_redist(sh,lev) - cache_refine = model_h, Uh, nothing, nothing, nothing, nothing, nothing, nothing + cache_refine = model_h, Uh, nothing, nothing, nothing, nothing, nothing, nothing, nothing, nothing end return cache_refine @@ -175,14 +181,15 @@ end # B.2) Restriction, without redistribution, by projection function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{false},Val{:projection}},x::PVector) cache_refine, cache_redist = A.cache - model_h, Uh, fv_h, dv_h, VH, AH, lH, xH = cache_refine + model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, assem = cache_refine copy!(fv_h,x) # Matrix layout -> FE layout uh = FEFunction(Uh,fv_h,dv_h) - rhs(v) = lH(v,uh) - bH = assemble_vector(rhs,VH) # Matrix layout + v = get_fe_basis(VH) + vec_data = collect_cell_vector(VH,lH(v,uh)) + assemble_vector!(bH,assem,vec_data) # Matrix layout IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) - copy!(y,xH) # TO UNDERSTAND: Why can't we use directly y instead of xH? + copy!(y,xH) return y end @@ -230,7 +237,7 @@ end # D.2) Restriction, with redistribution, by projection function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true},Val{:projection}},x::PVector) cache_refine, cache_redist = A.cache - model_h, Uh, fv_h, dv_h, VH, AH, lH, xH = cache_refine + model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, assem = cache_refine fv_h_red, dv_h_red, Uh_red, model_h_red, glue, cache_exchange = cache_redist # 1 - Redistribute from fine partition to coarse partition @@ -240,8 +247,9 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 2 - Solve f2c projection coarse partition if !isa(y,Nothing) uh = FEFunction(Uh,fv_h,dv_h) - rhs(v) = lH(v,uh) - bH = assemble_vector(rhs,VH) # Matrix layout + v = get_fe_basis(VH) + vec_data = collect_cell_vector(VH,lH(v,uh)) + assemble_vector!(bH,assem,vec_data) # Matrix layout IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) copy!(y,xH) end From fa3c473ea07642eadb539f8c953de14d10afa02a Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 30 Dec 2022 12:23:32 +0100 Subject: [PATCH 56/95] Added dof restriction for transfer ops --- .../DistributedGridTransferOperators.jl | 35 ++++++- src/MultilevelTools/RefinementTools.jl | 57 ++++++++++- .../DistributedGridTransferOperatorsTests.jl | 8 ++ test/mpi/RestrictDofsTests.jl | 95 +++++++++++++++++++ test/runtests.jl | 3 +- 5 files changed, 194 insertions(+), 4 deletions(-) create mode 100644 test/mpi/RestrictDofsTests.jl diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index b904af91..5cef9aee 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -30,10 +30,10 @@ function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree:: @check lev < num_levels(mh) @check op_type ∈ [:restriction, :prolongation] @check mode ∈ [:solution, :residual] - @check restriction_method ∈ [:projection, :interpolation] + @check restriction_method ∈ [:projection, :interpolation, :dof_mask] # Refinement - if (op_type == :prolongation) || (restriction_method == :interpolation) + if (op_type == :prolongation) || (restriction_method ∈ [:interpolation,:dof_mask]) cache_refine = _get_interpolation_cache(lev,sh,qdegree,mode) else cache_refine = _get_projection_cache(lev,sh,qdegree,mode) @@ -194,6 +194,18 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r return y end +# B.3) Restriction, without redistribution, by dof selection (only nodal dofs) +function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{false},Val{:dof_mask}},x::PVector) + cache_refine, cache_redist = A.cache + model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine + + copy!(fv_h,x) # Matrix layout -> FE layout + restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) + copy!(y,fv_H) # FE layout -> Matrix layout + + return y +end + # C) Prolongation, with redistribution function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:prolongation},Val{true}},x::Union{PVector,Nothing}) cache_refine, cache_redist = A.cache @@ -256,3 +268,22 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer return y end + +# D.3) Restriction, with redistribution, by dof selection (only nodal dofs) +function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true},Val{:dof_mask}},x::PVector) + cache_refine, cache_redist = A.cache + model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine + fv_h_red, dv_h_red, Uh_red, model_h_red, glue, cache_exchange = cache_redist + + # 1 - Redistribute from fine partition to coarse partition + copy!(fv_h_red,x) + redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) + + # 2 - Interpolate in coarse partition + if !isa(y,Nothing) + restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) + copy!(y,fv_H) # FE layout -> Matrix layout + end + + return y +end \ No newline at end of file diff --git a/src/MultilevelTools/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl index 9964eb5e..4625eed2 100644 --- a/src/MultilevelTools/RefinementTools.jl +++ b/src/MultilevelTools/RefinementTools.jl @@ -32,6 +32,9 @@ function DistributedAdaptedDiscreteModel(model::GridapDistributed.AbstractDistri return DistributedAdaptedDiscreteModel(model,parent_models,glues) end +function Gridap.Adaptivity.get_adaptivity_glue(model::DistributedAdaptedDiscreteModel) + return map_parts(Gridap.Adaptivity.get_adaptivity_glue,local_views(model)) +end # DistributedRefinedTriangulations @@ -43,7 +46,7 @@ function Gridap.Adaptivity.change_domain_o2n(c_cell_field, ftrian::GridapDistributed.DistributedTriangulation{Dc,Dp}, glue::AbstractPData{Gridap.Adaptivity.AdaptivityGlue}) where {Dc,Dp} - i_am_in_coarse = (c_cell_field != nothing) + i_am_in_coarse = !isa(c_cell_field, Nothing) fields = map_parts(local_views(ftrian)) do Ω if (i_am_in_coarse) @@ -58,3 +61,55 @@ function Gridap.Adaptivity.change_domain_o2n(c_cell_field, dfield = map_parts(Gridap.Adaptivity.change_domain_o2n,local_views(c_cell_field_fine),local_views(ftrian),glue) return GridapDistributed.DistributedCellField(dfield) end + +# Restriction of dofs + +function restrict_dofs!(fv_c::PVector, + fv_f::PVector, + dv_f::AbstractPData, + U_f ::GridapDistributed.DistributedSingleFieldFESpace, + U_c ::GridapDistributed.DistributedSingleFieldFESpace, + glue::AbstractPData{<:AdaptivityGlue}) + + map_parts(restrict_dofs!,local_views(fv_c),local_views(fv_f),dv_f,local_views(U_f),local_views(U_c),glue) + async_exchange!(fv_c) + + return fv_c +end + +function restrict_dofs!(fv_c::AbstractVector, + fv_f::AbstractVector, + dv_f::AbstractVector, + U_f ::FESpace, + U_c ::FESpace, + glue::AdaptivityGlue) + + fine_cell_ids = get_cell_dof_ids(U_f) + fine_cell_values = Gridap.Arrays.Table(lazy_map(Gridap.Arrays.PosNegReindex(fv_f,dv_f),fine_cell_ids.data),fine_cell_ids.ptrs) + coarse_rrules = Gridap.Adaptivity.get_old_cell_refinement_rules(glue) + f2c_cell_values = Gridap.Adaptivity.f2c_reindex(fine_cell_values,glue) + child_ids = Gridap.Adaptivity.f2c_reindex(glue.n2o_cell_to_child_id,glue) + + f2c_maps = lazy_map(FineToCoarseDofMap,coarse_rrules) + caches = lazy_map(Gridap.Arrays.return_cache,f2c_maps,f2c_cell_values,child_ids) + coarse_cell_values = lazy_map(Gridap.Arrays.evaluate!,caches,f2c_maps,f2c_cell_values,child_ids) + fv_c = gather_free_values!(fv_c,U_c,coarse_cell_values) + + return fv_c +end + +struct FineToCoarseDofMap{A} + rr::A +end + +function Gridap.Arrays.return_cache(m::FineToCoarseDofMap,fine_cell_vals,child_ids) + return fill(0.0,Gridap.Adaptivity.num_subcells(m.rr)) +end + +function Gridap.Arrays.evaluate!(cache,m::FineToCoarseDofMap,fine_cell_vals,child_ids) + fill!(cache,0.0) + for (k,i) in enumerate(child_ids) + cache[i] = fine_cell_vals[k][i] + end + return cache +end diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index ec5d86db..8c69bbfb 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -38,6 +38,8 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) restrictions, prolongations = ops ops2 = setup_transfer_operators(trials, qdegree; restriction_method=:interpolation) restrictions2, prolongations2 = ops2 + ops3 = setup_transfer_operators(trials, qdegree; restriction_method=:dof_mask) + restrictions3, prolongations3 = ops3 a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ l(v,dΩ) = ∫(v⋅u)*dΩ @@ -69,12 +71,18 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) R2 = restrictions2[lev] mul!(yH,R2,xh) + R3 = restrictions3[lev] + mul!(yH,R3,xh) + GridapP4est.i_am_main(parts_h) && println(" > Prolongation") P = prolongations[lev] mul!(yh,P,xH) P2 = prolongations2[lev] mul!(yh,P2,xH) + + P3 = prolongations3[lev] + mul!(yh,P3,xH) end end diff --git a/test/mpi/RestrictDofsTests.jl b/test/mpi/RestrictDofsTests.jl new file mode 100644 index 00000000..ff71b9b2 --- /dev/null +++ b/test/mpi/RestrictDofsTests.jl @@ -0,0 +1,95 @@ +module RestrictDofsTests +using MPI +using Test +using LinearAlgebra +using IterativeSolvers +using FillArrays + +using Gridap +using Gridap.ReferenceFEs +using PartitionedArrays +using GridapDistributed +using GridapP4est + +using GridapSolvers +using GridapSolvers.LinearSolvers + + +u(x) = x[1] + x[2] +f(x) = -Δ(u)(x) + +function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order) + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) + + # Preconditioner + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual,restriction_method=:dof_mask) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=GridapP4est.i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + @test e_l2 < tol + if GridapP4est.i_am_main(parts) + println("L2 error = ", e_l2) + end +end + +############################################## + +if !MPI.Initialized() + MPI.Init() +end + +# Parameters +order = 1 +coarse_grid_partition = (2,2) +num_refs_coarse = 2 + +num_parts_x_level = [4,2,1] +ranks = num_parts_x_level[1] +prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) + + +MPI.Finalize() +end diff --git a/test/runtests.jl b/test/runtests.jl index e6f203f1..869d393c 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -37,7 +37,8 @@ function run_tests(testdir) "GMGLinearSolversVectorLaplacianTests.jl", "GMGLinearSolversHDivRTTests.jl", "MUMPSSolversTests.jl", - "GMGLinearSolversMUMPSTests.jl"] + "GMGLinearSolversMUMPSTests.jl", + "RestrictDofsTests.jl"] np = 4 extra_args = "-s 2 2 -r 2" elseif f in ["ModelHierarchiesTests.jl"] From 396e5c34adad399d757eda8790591d763503181a Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 30 Dec 2022 13:26:20 +0100 Subject: [PATCH 57/95] Bugfix --- src/MultilevelTools/RefinementTools.jl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/MultilevelTools/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl index 4625eed2..4fe7a850 100644 --- a/src/MultilevelTools/RefinementTools.jl +++ b/src/MultilevelTools/RefinementTools.jl @@ -72,7 +72,7 @@ function restrict_dofs!(fv_c::PVector, glue::AbstractPData{<:AdaptivityGlue}) map_parts(restrict_dofs!,local_views(fv_c),local_views(fv_f),dv_f,local_views(U_f),local_views(U_c),glue) - async_exchange!(fv_c) + exchange!(fv_c) return fv_c end From 7ff613b1aa4a9165ea0749c26a8d1d550908bf41 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Sun, 8 Jan 2023 11:38:06 +0100 Subject: [PATCH 58/95] Updated Manifest.toml --- Manifest.toml | 24 ++++++++-------- .../GridapDistributedExtensions.jl | 6 ++-- src/MultilevelTools/ModelHierarchies.jl | 17 +++++++---- src/MultilevelTools/RefinementTools.jl | 28 +++---------------- 4 files changed, 29 insertions(+), 46 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index ff97b2f6..ed8b7c6f 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -32,9 +32,9 @@ version = "1.1.1" [[deps.ArrayInterfaceCore]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "badccc4459ffffb6bce5628461119b7057dec32c" +git-tree-sha1 = "14c3f84a763848906ac681f94cf469a851601d92" uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.27" +version = "0.1.28" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] @@ -209,15 +209,15 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DataStructures", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "6a8d79e9dd4596248b5a1236ec73b61234a109df" +git-tree-sha1 = "b702fa4fbdc42d586f5ab6bbdd621f113f549511" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" -version = "0.17.15" +version = "0.17.16" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "b247a9beb4d11d5931c878ffdfdab477bc0f2607" +git-tree-sha1 = "60a9432ee5958967d5401967bf34c04961627401" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapDistributed.jl.git" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" @@ -225,7 +225,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "914f28a332b2122d61fb22f37d7e215e9b4a72a0" +git-tree-sha1 = "df1d778b3f6eaba536b3b954de330e5cf2aa7cd6" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -555,9 +555,9 @@ uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" [[deps.RecipesBase]] deps = ["SnoopPrecompile"] -git-tree-sha1 = "18c35ed630d7229c5584b945641a73ca83fb5213" +git-tree-sha1 = "261dddd3b862bd2c940cf6ca4d1c8fe593e457c8" uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" -version = "1.3.2" +version = "1.3.3" [[deps.Reexport]] git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" @@ -621,9 +621,9 @@ version = "2.1.7" [[deps.StaticArrays]] deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] -git-tree-sha1 = "ffc098086f35909741f71ce21d03dadf0d2bfa76" +git-tree-sha1 = "6954a456979f23d05085727adb17c4551c19ecd1" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.5.11" +version = "1.5.12" [[deps.StaticArraysCore]] git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a" @@ -700,9 +700,9 @@ version = "1.16.0" [[deps.XML2_jll]] deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] -git-tree-sha1 = "58443b63fb7e465a8a7210828c91c08b92132dff" +git-tree-sha1 = "93c41695bc1c08c46c5899f4fe06d6ead504bb73" uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" -version = "2.9.14+0" +version = "2.10.3+0" [[deps.Zlib_jll]] deps = ["Libdl"] diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index b58015c3..c12b62bc 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -98,14 +98,12 @@ function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) spaces = map_parts(local_views(U)) do U U.space end - gids = U.gids - vector_type = U.vector_type - return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,vector_type) + return GridapDistributed.DistributedSingleFieldFESpace(spaces,U.gids,U.vector_type) end function FESpaces.get_triangulation(f::GridapDistributed.DistributedSingleFieldFESpace,model::GridapDistributed.AbstractDistributedDiscreteModel) trians = map_parts(get_triangulation,local_views(f)) - GridapDistributed.DistributedTriangulation(trians,model) + return GridapDistributed.DistributedTriangulation(trians,model) end # Void GridapDistributed structures diff --git a/src/MultilevelTools/ModelHierarchies.jl b/src/MultilevelTools/ModelHierarchies.jl index 0ba050c2..2d816211 100644 --- a/src/MultilevelTools/ModelHierarchies.jl +++ b/src/MultilevelTools/ModelHierarchies.jl @@ -136,13 +136,18 @@ function convert_to_refined_models(mh::ModelHierarchy) nlevs = num_levels(mh) levels = Vector{ModelHierarchyLevel}(undef,nlevs) for lev in 1:nlevs-1 - model = get_model_before_redist(mh,lev) - parent = get_model(mh,lev+1) - ref_glue = change_parts(mh.levels[lev].ref_glue,get_parts(model);default=void(AdaptivityGlue)) - #ref_glue = mh.levels[lev].ref_glue - model_ref = DistributedAdaptedDiscreteModel(model,parent,ref_glue) + parts = get_level_parts(mh,lev+1) + if i_am_in(parts) + model = get_model_before_redist(mh,lev) + parent = get_model(mh,lev+1) + ref_glue = mh.levels[lev].ref_glue + model_ref = DistributedAdaptedDiscreteModel(model,parent,ref_glue) + else + model = get_model_before_redist(mh,lev) + model_ref = VoidDistributedDiscreteModel(model) + end - levels[lev] = ModelHierarchyLevel(lev,model_ref,ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) + levels[lev] = ModelHierarchyLevel(lev,model_ref,mh.levels[lev].ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) end levels[nlevs] = mh.levels[nlevs] diff --git a/src/MultilevelTools/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl index 4fe7a850..6b3051a0 100644 --- a/src/MultilevelTools/RefinementTools.jl +++ b/src/MultilevelTools/RefinementTools.jl @@ -4,41 +4,21 @@ const DistributedAdaptedDiscreteModel{Dc,Dp} = GridapDistributed.DistributedDiscreteModel{Dc,Dp,<:AbstractPData{<:AdaptedDiscreteModel{Dc,Dp}}} function DistributedAdaptedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel, - parent_models::AbstractPData{<:DiscreteModel}, + parent::GridapDistributed.AbstractDistributedDiscreteModel, glue::AbstractPData{<:AdaptivityGlue}) - models = map_parts(local_views(model),parent_models,glue) do model, parent, glue + models = map_parts(local_views(model),local_views(parent),glue) do model, parent, glue AdaptedDiscreteModel(model,parent,glue) end return GridapDistributed.DistributedDiscreteModel(models,get_cell_gids(model)) end -function DistributedAdaptedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel, - parent::GridapDistributed.AbstractDistributedDiscreteModel, - glue::AbstractPData{<:Union{AdaptivityGlue,Nothing}}) - mparts = get_parts(model) - pparts = get_parts(parent) - - !i_am_in(mparts) && (return VoidDistributedDiscreteModel(model)) - (mparts === pparts) && (return DistributedAdaptedDiscreteModel(model,local_views(parent),glue)) - - parent_models, glues = map_parts(local_views(model)) do m - if i_am_in(pparts) - parent_models = local_views(parent) - parent_models.part, glue.part - else - void(typeof(m)), void(AdaptivityGlue) - end - end - return DistributedAdaptedDiscreteModel(model,parent_models,glues) -end - function Gridap.Adaptivity.get_adaptivity_glue(model::DistributedAdaptedDiscreteModel) return map_parts(Gridap.Adaptivity.get_adaptivity_glue,local_views(model)) end -# DistributedRefinedTriangulations +# DistributedAdaptedTriangulations -const DistributedRefinedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:AdaptedTriangulation{Dc,Dp}}} +const DistributedAdaptedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:AdaptedTriangulation{Dc,Dp}}} # ChangeDomain From c3c18fe882517b01f13104c4883462f52d61fb46 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 12 Jan 2023 12:13:46 +0100 Subject: [PATCH 59/95] Added GMG spetialisation for PETScLinearSolvers. We now allocate and re-use PETSc vectors. --- src/LinearSolvers/GMGLinearSolvers.jl | 217 +++++++++++++------------- src/LinearSolvers/LinearSolvers.jl | 1 + 2 files changed, 107 insertions(+), 111 deletions(-) diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index af0d0b1a..bdc19ceb 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -12,19 +12,16 @@ struct GMGLinearSolver{A,B,C,D,E,F,G,H} <: Gridap.Algebra.LinearSolver mode :: Symbol end -function GMGLinearSolver(mh, - smatrices, - interp, - restrict; - pre_smoothers=Fill(RichardsonSmoother(JacobiLinearSolver(),10),num_levels(mh)-1), - post_smoothers=pre_smoothers, - coarsest_solver=Gridap.Algebra.BackslashSolver(), - maxiter=100, - rtol=1.0e-06, - verbose::Bool=false, - mode=:preconditioner) - - Gridap.Helpers.@check mode ∈ [:preconditioner, :solver] +function GMGLinearSolver(mh,smatrices,interp,restrict; + pre_smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10),num_levels(mh)-1), + post_smoothers = pre_smoothers, + coarsest_solver = Gridap.Algebra.BackslashSolver(), + maxiter = 100, + rtol = 1.0e-06, + verbose::Bool = false, + mode = :preconditioner) + + Gridap.Helpers.@check mode ∈ [:preconditioner,:solver] Gridap.Helpers.@check isa(maxiter,Integer) Gridap.Helpers.@check isa(rtol,Real) @@ -36,17 +33,8 @@ function GMGLinearSolver(mh, F=typeof(coarsest_solver) G=typeof(maxiter) H=typeof(rtol) - GMGLinearSolver{A,B,C,D,E,F,G,H}(mh, - smatrices, - interp, - restrict, - pre_smoothers, - post_smoothers, - coarsest_solver, - maxiter, - rtol, - verbose, - mode) + return GMGLinearSolver{A,B,C,D,E,F,G,H}(mh,smatrices,interp,restrict,pre_smoothers,post_smoothers, + coarsest_solver,maxiter,rtol,verbose,mode) end struct GMGSymbolicSetup <: Gridap.Algebra.SymbolicSetup @@ -95,13 +83,12 @@ function Gridap.Algebra.numerical_setup(ss::GMGSymbolicSetup,mat::AbstractMatrix return GMGNumericalSetup(ss) end -function setup_smoothers_caches(mh,smoothers,smatrices) +function setup_smoothers_caches(mh::ModelHierarchy,smoothers::AbstractVector{<:LinearSolver},smatrices::Vector{<:AbstractMatrix}) Gridap.Helpers.@check length(smoothers) == num_levels(mh)-1 nlevs = num_levels(mh) # Last (i.e., coarsest) level does not need pre-/post-smoothing caches = Vector{Any}(undef,nlevs-1) for i = 1:nlevs-1 - model = get_model(mh,i) parts = get_level_parts(mh,i) if (GridapP4est.i_am_in(parts)) ss = symbolic_setup(smoothers[i], smatrices[i]) @@ -111,32 +98,58 @@ function setup_smoothers_caches(mh,smoothers,smatrices) return caches end -function setup_coarsest_solver_cache(mh,coarsest_solver,smatrices) +function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::LinearSolver,smatrices::Vector{<:AbstractMatrix}) cache = nothing nlevs = num_levels(mh) parts = get_level_parts(mh,nlevs) - model = get_model(mh,nlevs) if (GridapP4est.i_am_in(parts)) - if (num_parts(parts) == 1) - cache = map_parts(smatrices[nlevs].owned_owned_values) do Ah + mat = smatrices[nlevs] + if (num_parts(parts) == 1) # Serial + cache = map_parts(mat.owned_owned_values) do Ah ss = symbolic_setup(coarsest_solver, Ah) numerical_setup(ss, Ah) end cache = cache.part - else - ss = symbolic_setup(coarsest_solver, smatrices[nlevs]) - cache = numerical_setup(ss, smatrices[nlevs]) + else # Parallel + ss = symbolic_setup(coarsest_solver, mat) + cache = numerical_setup(ss, mat) + end + end + return cache +end + +function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLinearSolver,smatrices::Vector{<:AbstractMatrix}) + cache = nothing + nlevs = num_levels(mh) + parts = get_level_parts(mh,nlevs) + if (GridapP4est.i_am_in(parts)) + mat = smatrices[nlevs] + if (num_parts(parts) == 1) # Serial + cache = map_parts(mat.owned_owned_values) do Ah + rh = convert(PETScVector,fill(0.0,size(A,2))) + xh = convert(PETScVector,fill(0.0,size(A,2))) + ss = symbolic_setup(coarsest_solver, Ah) + ns = numerical_setup(ss, Ah) + return ns, xh, rh + end + cache = cache.part + else # Parallel + rh = convert(PETScVector,PVector(0.0,mat.cols)) + xh = convert(PETScVector,PVector(0.0,mat.cols)) + ss = symbolic_setup(coarsest_solver, mat) + ns = numerical_setup(ss, mat) + cache = ns, xh, rh end end return cache end -function allocate_level_work_vectors(mh,smatrices,lev) - parts = get_level_parts(mh,lev+1) +function allocate_level_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMatrix},lev::Integer) dxh = PVector(0.0, smatrices[lev].cols) Adxh = PVector(0.0, smatrices[lev].rows) - rh = PVector(0.0, smatrices[lev].rows) - if (GridapP4est.i_am_in(parts)) + + cparts = get_level_parts(mh,lev+1) + if (GridapP4est.i_am_in(cparts)) AH = smatrices[lev+1] rH = PVector(0.0,AH.cols) dxH = PVector(0.0,AH.cols) @@ -147,7 +160,7 @@ function allocate_level_work_vectors(mh,smatrices,lev) return dxh, Adxh, dxH, rH end -function allocate_work_vectors(mh,smatrices) +function allocate_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMatrix}) nlevs = num_levels(mh) work_vectors = Vector{Any}(undef,nlevs-1) for i = 1:nlevs-1 @@ -159,59 +172,58 @@ function allocate_work_vectors(mh,smatrices) return work_vectors end -function apply_GMG_level!(xh, - rh, - lev, - mh, - smatrices, - restrictions, - interpolations, - pre_smoothers_caches, - post_smoothers_caches, - coarsest_solver_cache, - work_vectors; - verbose=false) +function solve_coarsest_level!(parts::AbstractPData,::LinearSolver,xh::PVector,rh::PVector,caches) + if (GridapP4est.num_parts(parts) == 1) + map_parts(xh.owned_values,rh.owned_values) do xh, rh + solve!(xh,caches,rh) + end + else + solve!(xh,caches,rh) + end +end +function solve_coarsest_level!(parts::AbstractPData,::PETScLinearSolver,xh::PVector,rh::PVector,caches) + solver_ns, xh_petsc, rh_petsc = caches + if (GridapP4est.num_parts(parts) == 1) + map_parts(xh.owned_values,rh.owned_values) do xh, rh + copy!(rh_petsc,rh) + solve!(xh_petsc,solver_ns,rh_petsc) + copy!(xh,xh_petsc) + end + else + copy!(rh_petsc,rh) + solve!(xh_petsc,solver_ns,rh_petsc) + copy!(xh,xh_petsc) + end +end + +function apply_GMG_level!(lev::Integer,xh::Union{PVector,Nothing},rh::Union{PVector,Nothing},ns::GMGNumericalSetup;verbose=false) + mh = ns.solver.mh parts = get_level_parts(mh,lev) if GridapP4est.i_am_in(parts) - if (lev == num_levels(mh)) - if (GridapP4est.num_parts(parts) == 1) - map_parts(xh.owned_values,rh.owned_values) do xh, rh - solve!(xh,coarsest_solver_cache,rh) - end - else - solve!(xh,coarsest_solver_cache,rh) - end - else - Ah = smatrices[lev] - dxh, Adxh, dxH, rH = work_vectors[lev] + if (lev == num_levels(mh)) + ## Coarsest level + coarsest_solver = ns.solver.coarsest_solver + coarsest_solver_cache = ns.coarsest_solver_cache + solve_coarsest_level!(parts,coarsest_solver,xh,rh,coarsest_solver_cache) + else + ## General case + Ah = ns.solver.smatrices[lev] + restrict, interp = ns.solver.restrict[lev], ns.solver.interp[lev] + dxh, Adxh, dxH, rH = ns.work_vectors[lev] # Pre-smooth current solution - solve!(xh, pre_smoothers_caches[lev], rh) + solve!(xh, ns.pre_smoothers_caches[lev], rh) # Restrict the residual - mul!(rH,restrictions[lev],rh) - - if !isa(dxH,Nothing) - fill!(dxH,0.0) - end + mul!(rH,restrict,rh) # Apply next_level - apply_GMG_level!(dxH, - rH, - lev+1, - mh, - smatrices, - restrictions, - interpolations, - pre_smoothers_caches, - post_smoothers_caches, - coarsest_solver_cache, - work_vectors; - verbose=verbose) + !isa(dxH,Nothing) && fill!(dxH,0.0) + apply_GMG_level!(lev+1,dxH,rH,ns;verbose=verbose) # Interpolate dxH in finer space - mul!(dxh,interpolations[lev],dxH) + mul!(dxh,interp,dxH) # Update solution xh .= xh .+ dxh @@ -220,33 +232,26 @@ function apply_GMG_level!(xh, rh .= rh .- Adxh # Post-smooth current solution - solve!(xh, post_smoothers_caches[lev], rh) + solve!(xh, ns.post_smoothers_caches[lev], rh) end end end -function Gridap.Algebra.solve!( - x::AbstractVector,ns::GMGNumericalSetup,b::AbstractVector) - - smatrices = ns.solver.smatrices - mh = ns.solver.mh - maxiter = ns.solver.maxiter - rtol = ns.solver.rtol - restrictions = ns.solver.restrict - interpolations = ns.solver.interp - verbose = ns.solver.verbose - mode = ns.solver.mode +function Gridap.Algebra.solve!(x::AbstractVector,ns::GMGNumericalSetup,b::AbstractVector) - pre_smoothers_caches = ns.pre_smoothers_caches - post_smoothers_caches = ns.post_smoothers_caches - coarsest_solver_cache = ns.coarsest_solver_cache - work_vectors = ns.work_vectors + mh = ns.solver.mh + maxiter = ns.solver.maxiter + rtol = ns.solver.rtol + verbose = ns.solver.verbose + mode = ns.solver.mode - if (mode==:preconditioner) + # TODO: rh could definitely be cached + # TODO: When running in preconditioner mode, do we really need to compute the norm? It's a global com.... + if (mode == :preconditioner) fill!(x,0.0) rh = copy(b) else - Ah = smatrices[1] + Ah = ns.solver.smatrices[1] rh = PVector(0.0,Ah.rows) rh .= b .- Ah*x end @@ -263,19 +268,9 @@ function Gridap.Algebra.solve!( end while (current_iter < maxiter) && (rel_res > rtol) - apply_GMG_level!(x, - rh, - 1, - mh, - smatrices, - restrictions, - interpolations, - pre_smoothers_caches, - post_smoothers_caches, - coarsest_solver_cache, - work_vectors; - verbose=verbose) - nrm_r = norm(rh) + apply_GMG_level!(1,x,rh,ns;verbose=verbose) + + nrm_r = norm(rh) rel_res = nrm_r / nrm_r0 current_iter += 1 if GridapP4est.i_am_main(parts) diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index 33342a21..f8ba2dc2 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -6,6 +6,7 @@ using Gridap using Gridap.Algebra using PartitionedArrays using GridapP4est +using GridapPETSc using GridapSolvers.MultilevelTools From c3081399dfcf320363ebbe1366b2d4ab6a86e971 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 12 Jan 2023 13:35:34 +0100 Subject: [PATCH 60/95] Added finest level cache to reduce allocations --- src/LinearSolvers/GMGLinearSolvers.jl | 47 +++++++++++++++++---------- 1 file changed, 29 insertions(+), 18 deletions(-) diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index bdc19ceb..135ac1f9 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -45,12 +45,13 @@ function Gridap.Algebra.symbolic_setup(solver::GMGLinearSolver,mat::AbstractMatr return GMGSymbolicSetup(solver) end -struct GMGNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup +struct GMGNumericalSetup{A,B,C,D,E} <: Gridap.Algebra.NumericalSetup solver :: GMGLinearSolver - pre_smoothers_caches :: A - post_smoothers_caches :: B - coarsest_solver_cache :: C - work_vectors :: D + finest_level_cache :: A + pre_smoothers_caches :: B + post_smoothers_caches :: C + coarsest_solver_cache :: D + work_vectors :: E function GMGNumericalSetup(ss::GMGSymbolicSetup) mh = ss.solver.mh @@ -59,6 +60,7 @@ struct GMGNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup smatrices = ss.solver.smatrices coarsest_solver = ss.solver.coarsest_solver + finest_level_cache = setup_finest_level_cache(mh,smatrices) work_vectors = allocate_work_vectors(mh,smatrices) pre_smoothers_caches = setup_smoothers_caches(mh,pre_smoothers,smatrices) if (!(pre_smoothers === post_smoothers)) @@ -67,15 +69,13 @@ struct GMGNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup post_smoothers_caches = pre_smoothers_caches end coarsest_solver_cache = setup_coarsest_solver_cache(mh,coarsest_solver,smatrices) - A = typeof(pre_smoothers_caches) - B = typeof(post_smoothers_caches) - C = typeof(coarsest_solver_cache) - D = typeof(work_vectors) - new{A,B,C,D}(ss.solver, - pre_smoothers_caches, - post_smoothers_caches, - coarsest_solver_cache, - work_vectors) + + A = typeof(finest_level_cache) + B = typeof(pre_smoothers_caches) + C = typeof(post_smoothers_caches) + D = typeof(coarsest_solver_cache) + E = typeof(work_vectors) + return new{A,B,C,D,E}(ss.solver,finest_level_cache,pre_smoothers_caches,post_smoothers_caches,coarsest_solver_cache,work_vectors) end end @@ -83,6 +83,17 @@ function Gridap.Algebra.numerical_setup(ss::GMGSymbolicSetup,mat::AbstractMatrix return GMGNumericalSetup(ss) end +function setup_finest_level_cache(mh::ModelHierarchy,smatrices::Vector{<:AbstractMatrix}) + cache = nothing + parts = get_level_parts(mh,1) + if (GridapP4est.i_am_in(parts)) + Ah = smatrices[1] + rh = PVector(0.0, Ah.cols) + cache = rh + end + return cache +end + function setup_smoothers_caches(mh::ModelHierarchy,smoothers::AbstractVector{<:LinearSolver},smatrices::Vector{<:AbstractMatrix}) Gridap.Helpers.@check length(smoothers) == num_levels(mh)-1 nlevs = num_levels(mh) @@ -245,15 +256,15 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::GMGNumericalSetup,b::Abstra verbose = ns.solver.verbose mode = ns.solver.mode - # TODO: rh could definitely be cached # TODO: When running in preconditioner mode, do we really need to compute the norm? It's a global com.... + rh = ns.finest_level_cache if (mode == :preconditioner) fill!(x,0.0) - rh = copy(b) + copy!(rh,b) else Ah = ns.solver.smatrices[1] - rh = PVector(0.0,Ah.rows) - rh .= b .- Ah*x + mul!(rh,Ah,x) + rh .= b .- rh end nrm_r0 = norm(rh) From 512f24a69fe357359677bf4295f746bda8dcb7b2 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Sat, 14 Jan 2023 09:39:59 +0100 Subject: [PATCH 61/95] Copied over patch-based smoothers from GridapP4est --- src/LinearSolvers/GMGLinearSolvers.jl | 2 +- .../mpi/PatchDecompositions.jl | 33 ++ src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 76 ++++ .../seq/PatchBasedLinearSolvers.jl | 77 ++++ .../seq/PatchDecompositions.jl | 254 ++++++++++++++ src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 331 ++++++++++++++++++ 6 files changed, 772 insertions(+), 1 deletion(-) create mode 100644 src/PatchBasedSmoothers/mpi/PatchDecompositions.jl create mode 100644 src/PatchBasedSmoothers/mpi/PatchFESpaces.jl create mode 100644 src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl create mode 100644 src/PatchBasedSmoothers/seq/PatchDecompositions.jl create mode 100644 src/PatchBasedSmoothers/seq/PatchFESpaces.jl diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index 135ac1f9..39dd132a 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -219,7 +219,7 @@ function apply_GMG_level!(lev::Integer,xh::Union{PVector,Nothing},rh::Union{PVec solve_coarsest_level!(parts,coarsest_solver,xh,rh,coarsest_solver_cache) else ## General case - Ah = ns.solver.smatrices[lev] + Ah = ns.solver.smatrices[lev] restrict, interp = ns.solver.restrict[lev], ns.solver.interp[lev] dxh, Adxh, dxH, rH = ns.work_vectors[lev] diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl new file mode 100644 index 00000000..609b0028 --- /dev/null +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -0,0 +1,33 @@ + +struct DistributedPatchDecomposition{Dc,Dp,A,B} <: GridapType + patch_decompositions::A + model::B +end + +function PatchDecomposition(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}; + Dr=0, + patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} + patch_decompositions=map_parts(model.models) do lmodel + PatchDecomposition(lmodel; + Dr=Dr, + patch_boundary_style=patch_boundary_style) + end + A=typeof(patch_decompositions) + B=typeof(model) + DistributedPatchDecomposition{Dc,Dp,A,B}(patch_decompositions,model) +end + +function Gridap.Geometry.Triangulation(a::DistributedPatchDecomposition) + trians=map_parts(a.patch_decompositions) do a + Triangulation(a) + end + GridapDistributed.DistributedTriangulation(trians,a.model) +end + +function get_patch_root_dim(a::DistributedPatchDecomposition) + patch_root_dim=0 + map_parts(a.patch_decompositions) do patch_decomposition + patch_root_dim=patch_decomposition.Dr + end + patch_root_dim +end diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl new file mode 100644 index 00000000..608bdfdf --- /dev/null +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -0,0 +1,76 @@ +# Rationale behind distributed PatchFESpace: +# 1. Patches have an owner. Only owners compute subspace correction. +# If am not owner of a patch, all dofs in my patch become -1. +# 2. Subspace correction on an owned patch may affect DoFs which +# are non-owned. These corrections should be sent to the owner +# process. I.e., NO -> O (reversed) communication. [PENDING] +# 3. Each processor needs to know how many patches "touch" its owned DoFs. +# This requires NO->O communication as well. [PENDING] + +function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, + reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}, + conformity::Gridap.FESpaces.Conformity, + patch_decomposition::DistributedPatchDecomposition, + Vh::GridapDistributed.DistributedSingleFieldFESpace) + root_gids=get_face_gids(model,get_patch_root_dim(patch_decomposition)) + + function f(model,patch_decomposition,Vh,partition) + patches_mask = fill(false,length(partition.lid_to_gid)) + patches_mask[partition.hid_to_lid] .= true # Mask ghost patch roots + PatchFESpace(model, + reffe, + conformity, + patch_decomposition, + Vh; + patches_mask=patches_mask) + end + + spaces=map_parts(f, + model.models, + patch_decomposition.patch_decompositions, + Vh.spaces, + root_gids.partition) + parts=get_part_ids(model.models) + nodofs=map_parts(spaces) do space + num_free_dofs(space) + end + ngdofs=sum(nodofs) + first_gdof, _ = xscan(+,reduce,nodofs,init=1) + # This PRange has no ghost dofs + gids = PRange(parts,ngdofs,nodofs,first_gdof) + GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,get_vector_type(Vh)) +end + +# x \in PatchFESpace +# y \in SingleFESpace +function prolongate!(x::PVector, + Ph::GridapDistributed.DistributedSingleFieldFESpace, + y::PVector) + parts=get_part_ids(x.owned_values) + Gridap.Helpers.@notimplementedif num_parts(parts)!=1 + map_parts(x.owned_values,Ph.spaces,y.owned_values) do x,Ph,y + prolongate!(x,Ph,y) + end +end + +function inject!(x::PVector, + Ph::GridapDistributed.DistributedSingleFieldFESpace, + y::PVector, + w::PVector) + parts=get_part_ids(x.owned_values) + Gridap.Helpers.@notimplementedif num_parts(parts)!=1 + map_parts(x.owned_values,Ph.spaces,y.owned_values,w.owned_values) do x,Ph,y,w + inject!(x,Ph,y,w) + end +end + +function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace) + parts=get_part_ids(Ph.spaces) + Gridap.Helpers.@notimplementedif num_parts(parts)!=1 + w=PVector(0.0,Ph.gids) + map_parts(w.owned_values,Ph.spaces) do w,Ph + w .= compute_weight_operators(Ph) + #println(w) + end + w +end diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl new file mode 100644 index 00000000..d6381d46 --- /dev/null +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -0,0 +1,77 @@ +# ON another note. Related to FE assembly. We are going to need: +# "Por otra parte, tb podemos tener metodos q reciben una patch-cell array y la +# aplanan para q parezca una cell array (aunq con cells repetidas). Combinando las +# patch-cell local matrices y cell_dofs aplanadas puedes usar el assembly verbatim si +# quieres ensamblar la matriz." + +# Another note. During FE assembly we may end computing the cell matrix of a given cell +# more than once due to cell overlapping among patches (recall the computation of these +# matrices is lazy, it occurs on first touch). Can we live with that or should we pay +# attention on how to avoid this? I think that Gridap already includes tools for +# taking profit of this, I think it is called MemoArray, but it might be something else +# (not 100% sure, to investigate) + + +struct PatchBasedLinearSolver{A} <: Gridap.Algebra.LinearSolver + bilinear_form :: Function + Ph :: A + M :: Gridap.Algebra.LinearSolver +end + +struct PatchBasedSymbolicSetup <: Gridap.Algebra.SymbolicSetup + solver :: PatchBasedLinearSolver +end + +function Gridap.Algebra.symbolic_setup(ls::PatchBasedLinearSolver,mat::AbstractMatrix) + PatchBasedSymbolicSetup(ls) +end + +struct PatchBasedSmootherNumericalSetup{A,B,C,D,E} <: Gridap.Algebra.NumericalSetup + solver :: PatchBasedLinearSolver + Ap :: A + nsAp :: B + rp :: C + dxp :: D + w :: E +end + +function Gridap.Algebra.numerical_setup(ss::PatchBasedSymbolicSetup,A::AbstractMatrix) + Ph=ss.solver.Ph + assembler=SparseMatrixAssembler(Ph,Ph) + Ap=assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph) + solver = ss.solver.M + ssAp = symbolic_setup(solver,Ap) + nsAp = numerical_setup(ssAp,Ap) + rp = _allocate_row_vector(Ap) + dxp = _allocate_col_vector(Ap) + w = compute_weight_operators(Ph) + PatchBasedSmootherNumericalSetup(ss.solver,Ap,nsAp,rp,dxp,w) +end + +function _allocate_col_vector(A::AbstractMatrix) + zeros(size(A,2)) +end + +function _allocate_row_vector(A::AbstractMatrix) + zeros(size(A,1)) +end + +function _allocate_col_vector(A::PSparseMatrix) + PVector(0.0,A.cols) +end + +function _allocate_row_vector(A::PSparseMatrix) + PVector(0.0,A.rows) +end + +function Gridap.Algebra.numerical_setup!(ns::PatchBasedSmootherNumericalSetup, A::AbstractMatrix) + Gridap.Helpers.@notimplemented +end + +function Gridap.Algebra.solve!( + x::AbstractVector,ns::PatchBasedSmootherNumericalSetup,r::AbstractVector) + Ap,nsAp,rp,dxp,w=ns.Ap,ns.nsAp,ns.rp,ns.dxp,ns.w + prolongate!(rp,ns.solver.Ph,r) + solve!(dxp,nsAp,rp) + inject!(x,ns.solver.Ph,dxp,w) +end diff --git a/src/PatchBasedSmoothers/seq/PatchDecompositions.jl b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl new file mode 100644 index 00000000..6517fa15 --- /dev/null +++ b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl @@ -0,0 +1,254 @@ +abstract type PatchBoundaryStyle end ; +struct PatchBoundaryExclude <: PatchBoundaryStyle end ; +struct PatchBoundaryInclude <: PatchBoundaryStyle end ; + +# Question? Might a patch decomposition involve patches +# with roots of different topological dimension? +# This is not currently supported. +struct PatchDecomposition{Dc,Dp} <: GridapType + model :: DiscreteModel{Dc,Dp} + Dr :: Int # Topological dim of the face at the root of the patch + patch_cells :: AbstractVector{<:AbstractVector} # Patch+local cell -> cell + patch_cells_overlapped_mesh :: Gridap.Arrays.Table # Patch+local cell -> overlapped cell + patch_cells_faces_on_boundary :: Vector{Gridap.Arrays.Table} # Df + overlapped cell -> faces on +end + +num_patches(a::PatchDecomposition)= length(a.patch_cells_overlapped_mesh.ptrs)-1 +Gridap.Geometry.num_cells(a::PatchDecomposition) = a.patch_cells_overlapped_mesh.data[end] + + +function PatchDecomposition( + model::DiscreteModel{Dc,Dp}; + Dr=0, + patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} + Gridap.Helpers.@check 0 <= Dr <= Dc-1 + + grid = get_grid(model) + ctype_reffe = get_reffes(grid) + cell_type = get_cell_type(grid) + d_ctype_num_dfaces = [ map(reffe->num_faces(Gridap.Geometry.get_polytope(reffe),d),ctype_reffe) for d in 0:Dc] + topology = get_grid_topology(model) + + patch_cells=Gridap.Geometry.get_faces(topology,Dr,Dc) + patch_facets=Gridap.Geometry.get_faces(topology,Dr,Dc-1) + patch_cells_overlapped_mesh= + setup_patch_cells_overlapped_mesh(patch_cells) + + patch_cells_faces_on_boundary = allocate_patch_cells_faces_on_boundary( + Dr, + model, + cell_type, + d_ctype_num_dfaces, + patch_cells, + patch_cells_overlapped_mesh) + + + generate_patch_boundary_faces!(model, + patch_cells_faces_on_boundary, + patch_cells, + patch_cells_overlapped_mesh, + patch_facets, + patch_boundary_style) + + PatchDecomposition{Dc,Dp}(model, + Dr, + patch_cells, + patch_cells_overlapped_mesh, + patch_cells_faces_on_boundary) +end + +function Gridap.Geometry.Triangulation(a::PatchDecomposition) + patch_cells=Gridap.Arrays.Table(a.patch_cells) + view(Triangulation(a.model),patch_cells.data) +end + +function setup_patch_cells_overlapped_mesh(patch_cells) + num_patches=length(patch_cells) + cache = array_cache(patch_cells) + ptrs=Vector{Int}(undef,num_patches+1) + ptrs[1]=1 + for patch_id=1:num_patches + cells_around_patch=getindex!(cache,patch_cells,patch_id) + ptrs[patch_id+1]=ptrs[patch_id]+length(cells_around_patch) + end + data=Gridap.Arrays.IdentityVector(ptrs[end]-1) + Gridap.Arrays.Table(data,ptrs) +end + + +function allocate_patch_cells_faces_on_boundary(Dr, + model::DiscreteModel{Dc}, + cell_type, + d_ctype_num_dfaces, + patch_cells, + patch_cells_overlapped_mesh) where {Dc} + patch_cells_faces_on_boundary = Vector{Gridap.Arrays.Table}(undef,Dc) + for d=0:Dc-1 + patch_cells_faces_on_boundary[d+1]= + allocate_cell_overlapped_mesh_lface(Bool, + patch_cells, + patch_cells_overlapped_mesh, + cell_type, + d_ctype_num_dfaces, + d) + end + patch_cells_faces_on_boundary +end + +# Table 2 +# position_of_cell_within_global_array -> sublist of entities associated to that +function allocate_cell_overlapped_mesh_lface(::Type{T}, + patch_cells, + patch_cells_overlapped_mesh, + cell_type, + d_ctype_num_dfaces, + dim) where T<:Number # dim=0,1,...,Dc-1 + n=length(patch_cells_overlapped_mesh.data) # number of cells in the overlapped mesh + ptrs=Vector{Int}(undef,n+1) + ptrs[1]=1 + n=1 + for patch=1:length(patch_cells) + cells_patch=patch_cells[patch] + for cell in cells_patch + ctype = cell_type[cell] + nfaces = d_ctype_num_dfaces[dim+1][ctype] + # To get the cell in the non overlapped mesh + ptrs[n+1]=ptrs[n]+nfaces + n=n+1 + end + end + data=zeros(T,ptrs[n]-1) + Gridap.Arrays.Table(data,ptrs) +end + +function generate_patch_boundary_faces!(model, + patch_cells_faces_on_boundary, + patch_cells, + patch_cells_overlapped_mesh, + patch_facets, + patch_boundary_style) + Dc=num_cell_dims(model) + topology=get_grid_topology(model) + labeling=get_face_labeling(model) + num_patches=length(patch_cells.ptrs)-1 + cache_patch_cells=array_cache(patch_cells) + cache_patch_facets=array_cache(patch_facets) + for patch=1:num_patches + current_patch_cells=getindex!(cache_patch_cells,patch_cells,patch) + current_patch_facets=getindex!(cache_patch_facets,patch_facets,patch) + generate_patch_boundary_faces!(patch_cells_faces_on_boundary, + Dc, + topology, + labeling, + patch, + current_patch_cells, + patch_cells_overlapped_mesh, + current_patch_facets, + patch_boundary_style) + end +end + +function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, + Dc, + topology, + face_labeling, + patch, + patch_cells, + patch_cells_overlapped_mesh, + patch_facets, + patch_boundary_style) + + boundary_tag=findfirst(x->(x=="boundary"),face_labeling.tag_to_name) + Gridap.Helpers.@check boundary_tag != nothing + boundary_entities=face_labeling.tag_to_entities[boundary_tag] + + # Cells facets + Df=Dc-1 + cells_facets=Gridap.Geometry.get_faces(topology,Dc,Df) + cache_cells_facets=array_cache(cells_facets) + + # Cells around facets + cells_around_facets=Gridap.Geometry.get_faces(topology,Df,Dc) + cache_cells_around_facets=array_cache(cells_around_facets) + + # Go over all cells in the current patch + for (lpatch_cell,patch_cell) in enumerate(patch_cells) + cell_facets=getindex!(cache_cells_facets,cells_facets,patch_cell) + # Go over the facets (i.e., faces of dim D-1) in the current cell + for (lfacet,facet) in enumerate(cell_facets) + facet_entity=face_labeling.d_to_dface_to_entity[Df+1][facet] + + cells_around_facet=getindex!(cache_cells_around_facets, + cells_around_facets, + facet) + + # Go over the cells around facet + cell_not_in_patch_found=false + for cell_around_facet in cells_around_facet + if !(cell_around_facet in patch_cells) + cell_not_in_patch_found=true + break + end + end + + facet_at_global_boundary = facet_entity in boundary_entities + if (facet_at_global_boundary) + if (facet in patch_facets) + facet_at_patch_boundary = false + else + facet_at_patch_boundary = true + end + elseif (patch_boundary_style isa PatchBoundaryInclude) + facet_at_patch_boundary = false + elseif ((patch_boundary_style isa PatchBoundaryExclude) && cell_not_in_patch_found) + facet_at_patch_boundary = true + else + facet_at_patch_boundary = false + end + + # if (facet_at_neumann_boundary) + # println("XXX") + # println(facet) + # println(length(cells_around_facet)) + # @assert length(cells_around_facet)==1 + # println(cell_not_in_patch_found) + # @assert !cell_not_in_patch_found + # println("YYY") + # @assert !facet_at_boundary + # end + + if (facet_at_patch_boundary) + cell_overlapped_mesh = patch_cells_overlapped_mesh[patch][lpatch_cell] + position=patch_cells_faces_on_boundary[Df+1].ptrs[cell_overlapped_mesh]+lfacet-1 + patch_cells_faces_on_boundary[Df+1].data[position]=true + + # Go over the faces of the lower dimension on the boundary of + # the facet. And then propagate true to all cells around, and + # for each cell around, we need to identify which is the local + # face identifier within that cell + + # Go over the faces on the boundary of the current facet + for d=0:Df-1 + d_faces_on_boundary_of_current_facet=Gridap.Geometry.get_faces(topology,Df,d)[facet] + for f in d_faces_on_boundary_of_current_facet + # # TO-DO: to use caches!!! + # Locate the local position of f within the cell (lface) + cells_d_faces = Gridap.Geometry.get_faces(topology,Dc,d) + d_faces_cells = Gridap.Geometry.get_faces(topology,d,Dc) + for cell_around_face in d_faces_cells[f] + if (cell_around_face in patch_cells) + cell_d_face = cells_d_faces[cell_around_face] + lface = findfirst((x->x==f),cell_d_face) + lpatch_cell2 = findfirst((x->x==cell_around_face),patch_cells) + cell_overlapped_mesh = + patch_cells_overlapped_mesh[patch][lpatch_cell2] + position=patch_cells_faces_on_boundary[d+1].ptrs[cell_overlapped_mesh]+lface-1 + patch_cells_faces_on_boundary[d+1].data[position]=true + end + end + end + end + end + end + end +end diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl new file mode 100644 index 00000000..77d0caa8 --- /dev/null +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -0,0 +1,331 @@ +struct PatchFESpace <: Gridap.FESpaces.SingleFieldFESpace + num_dofs::Int + patch_cell_dofs_ids::Gridap.Arrays.Table + Vh::Gridap.FESpaces.SingleFieldFESpace + patch_decomposition::PatchDecomposition +end + +# INPUT +# [[1, 2]] +# [[1, 2], [2, 3]] +# [[2, 3], [3, 4]] +# [[3, 4], [4, 5]] +# [[4, 5]] + +# OUTPUT +# [[1, 2]] +# [[3, 4], [4, 5]] +# [[6, 7], [7, 8]] +# [[9, 10], [10, 11]] +# [[12, 13]] + +# Negative numbers correspond to Dirichlet DoFs +# in the GLOBAL space. In these examples, we +# are neglecting Dirichlet DoFs in the boundary +# of the patches (assuming they are needed) + +# INPUT +# [[-1, 1]] +# [[-1, 1], [1, 2]] +# [[1, 2], [2, 3]] +# [[2, 3], [3, -2]] +# [[3, -2]] + +# OUTPUT +# [[-1, 1]] +# [[-1, 2], [2, 3]] +# [[4, 5], [5, 6]] +# [[6, 7], [7, -2]] +# [[8, -2]] + + +# Issue: I have to pass model, reffe, and conformity, so that I can +# build the cell_conformity instance. I would have liked to +# avoid that, given that these were already used in order to +# build Vh. However, I cannot extract this info out of Vh!!! :-( +function PatchFESpace(model::DiscreteModel, + reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}, + conformity::Gridap.FESpaces.Conformity, + patch_decomposition::PatchDecomposition, + Vh::Gridap.FESpaces.SingleFieldFESpace; + patches_mask=Fill(false,num_patches(patch_decomposition))) + + cell_reffe = setup_cell_reffe(model,reffe) + cell_conformity = CellConformity(cell_reffe,conformity) + + cell_dofs_ids=get_cell_dof_ids(Vh) + num_cells_overlapped_mesh=num_cells(patch_decomposition) + patch_cell_dofs_ids=allocate_patch_cell_dofs_ids(num_cells_overlapped_mesh, + patch_decomposition.patch_cells, + cell_dofs_ids) + + num_dofs=generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, + get_grid_topology(model), + patch_decomposition.patch_cells, + patch_decomposition.patch_cells_overlapped_mesh, + patch_decomposition.patch_cells_faces_on_boundary, + cell_dofs_ids, + cell_conformity, + patches_mask) + + PatchFESpace(num_dofs,patch_cell_dofs_ids,Vh,patch_decomposition) +end + +Gridap.FESpaces.get_dof_value_type(a::PatchFESpace)=Gridap.FESpaces.get_dof_value_type(a.Vh) +Gridap.FESpaces.get_free_dof_ids(a::PatchFESpace)=Base.OneTo(a.num_dofs) +Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace)=a.patch_cell_dofs_ids +Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::Triangulation)=a.patch_cell_dofs_ids +Gridap.FESpaces.get_fe_basis(a::PatchFESpace)=get_fe_basis(a.Vh) +Gridap.FESpaces.ConstraintStyle(a::PatchFESpace)=Gridap.FESpaces.UnConstrained() +Gridap.FESpaces.get_vector_type(a::PatchFESpace)=get_vector_type(a.Vh) + +function Gridap.FESpaces.scatter_free_and_dirichlet_values(f::PatchFESpace, + free_values, + dirichlet_values) + lazy_map(Broadcasting(Gridap.Fields.PosNegReindex(free_values,dirichlet_values)), + f.patch_cell_dofs_ids) +end + +function setup_cell_reffe(model::DiscreteModel, + reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}; kwargs...) + basis, reffe_args,reffe_kwargs = reffe + cell_reffe = ReferenceFE(model,basis,reffe_args...;reffe_kwargs...) +end + +function allocate_patch_cell_dofs_ids(num_cells_overlapped_mesh, + cell_patches, + cell_dof_ids) + + ptrs=Vector{Int}(undef,num_cells_overlapped_mesh+1) + ptrs[1]=1 + cache=array_cache(cell_patches) + cache_cdofids=array_cache(cell_dof_ids) + gcell_overlapped_mesh=1 + for patch=1:length(cell_patches) + cells_patch=getindex!(cache,cell_patches,patch) + for cell in cells_patch + current_cell_dof_ids=getindex!(cache_cdofids,cell_dof_ids,cell) + ptrs[gcell_overlapped_mesh+1]=ptrs[gcell_overlapped_mesh]+length(current_cell_dof_ids) + gcell_overlapped_mesh+=1 + end + end + #println(num_cells_overlapped_mesh, " ", gcell_overlapped_mesh) + Gridap.Helpers.@check num_cells_overlapped_mesh+1 == gcell_overlapped_mesh + data=Vector{Int}(undef,ptrs[end]-1) + Gridap.Arrays.Table(data,ptrs) +end + +function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, + topology, + patch_cells, + patch_cells_overlapped_mesh, + patch_cells_faces_on_boundary, + cell_dofs_ids, + cell_conformity, + patches_mask) + + cache=array_cache(patch_cells) + num_patches=length(patch_cells) + current_dof=1 + for patch=1:num_patches + current_patch_cells=getindex!(cache,patch_cells,patch) + current_dof=generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, + topology, + patch, + current_patch_cells, + patch_cells_overlapped_mesh, + patch_cells_faces_on_boundary, + cell_dofs_ids, + cell_conformity; + free_dofs_offset=current_dof, + mask=patches_mask[patch]) + end + return current_dof-1 +end + +# TO-THINK/STRESS: +# 1. MultiFieldFESpace case? +# 2. FESpaces which are directly defined on physical space? We think this cased is covered by +# the fact that we are using a CellConformity instance to rely on ownership info. +# free_dofs_offset : the ID from which we start to assign free DoF IDs upwards +# Note: we do not actually need to generate a global numbering for Dirichlet DoFs. We can +# tag all as them with -1, as we are always imposing homogenous Dirichlet boundary +# conditions, and thus there is no need to address the result of interpolating Dirichlet +# Data into the FE space. +function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, + topology, + patch::Integer, + patch_cells::AbstractVector{<:Integer}, + patch_cells_overlapped_mesh::Gridap.Arrays.Table, + patch_cells_faces_on_boundary, + global_space_cell_dofs_ids, + cell_conformity; + free_dofs_offset=1, + mask=false) + + patch_global_space_cell_dofs_ids= + lazy_map(Broadcasting(Reindex(global_space_cell_dofs_ids)),patch_cells) + + o = patch_cells_overlapped_mesh.ptrs[patch] + if mask + for lpatch_cell=1:length(patch_cells) + cell_overlapped_mesh=patch_cells_overlapped_mesh.data[o+lpatch_cell-1] + s,e=patch_cell_dofs_ids.ptrs[cell_overlapped_mesh], + patch_cell_dofs_ids.ptrs[cell_overlapped_mesh+1]-1 + patch_cell_dofs_ids.data[s:e] .= -1 + end + else + g2l=Dict{Int,Int}() + Dc = length(patch_cells_faces_on_boundary) + + # Loop over cells of the patch (local_cell_id_within_patch) + for (lpatch_cell,patch_cell) in enumerate(patch_cells) + cell_overlapped_mesh=patch_cells_overlapped_mesh.data[o+lpatch_cell-1] + s,e=patch_cell_dofs_ids.ptrs[cell_overlapped_mesh], + patch_cell_dofs_ids.ptrs[cell_overlapped_mesh+1]-1 + current_patch_cell_dofs_ids=view(patch_cell_dofs_ids.data,s:e) + face_offset=0 + ctype = cell_conformity.cell_ctype[patch_cell] + for d=0:Dc-1 + cells_d_faces = Gridap.Geometry.get_faces(topology,Dc,d) + cell_d_face = cells_d_faces[patch_cell] + #println(patch_cell, " ", patch_cells_faces_on_boundary[d+1][cell_overlapped_mesh]) + #println(patch_cell, " ", cell_d_face, " ", s:e) + + for (lf,f) in enumerate(cell_d_face) + # If current face is on the patch boundary + if (patch_cells_faces_on_boundary[d+1][cell_overlapped_mesh][lf]) + # assign negative indices to DoFs owned by face + for ldof in cell_conformity.ctype_lface_own_ldofs[ctype][face_offset+lf] + gdof=global_space_cell_dofs_ids[patch_cell][ldof] + current_patch_cell_dofs_ids[ldof] = -1 + # println(ldof) + end + else + # rely on the existing glued info (available at global_space_cell_dof_ids) + # (we will need a Dict{Int,Int} to hold the correspondence among global + # space and patch cell dofs IDs) + for ldof in cell_conformity.ctype_lface_own_ldofs[ctype][face_offset+lf] + gdof=global_space_cell_dofs_ids[patch_cell][ldof] + if (gdof>0) + if gdof in keys(g2l) + current_patch_cell_dofs_ids[ldof] = g2l[gdof] + else + g2l[gdof] = free_dofs_offset + current_patch_cell_dofs_ids[ldof] = free_dofs_offset + free_dofs_offset += 1 + end + else + current_patch_cell_dofs_ids[ldof] = -1 + end + end + end + end + face_offset += cell_conformity.d_ctype_num_dfaces[d+1][ctype] + end + # Interior DoFs + for ldof in cell_conformity.ctype_lface_own_ldofs[ctype][face_offset+1] + # println("ldof: $(ldof) $(length(current_patch_cell_dofs_ids))") + current_patch_cell_dofs_ids[ldof] = free_dofs_offset + free_dofs_offset += 1 + end + end + end + return free_dofs_offset +end + + +# x \in PatchFESpace +# y \in SingleFESpace +# TO-DO: Replace PatchFESpace by a proper operator. +function prolongate!(x::AbstractVector{T},Ph::PatchFESpace,y::AbstractVector{T}) where T + Gridap.Helpers.@check num_free_dofs(Ph.Vh) == length(y) + Gridap.Helpers.@check num_free_dofs(Ph) == length(x) + + # Gather y cell-wise + y_cell_wise=scatter_free_and_dirichlet_values(Ph.Vh, + y, + get_dirichlet_dof_values(Ph.Vh)) + + # Gather y cell-wise in overlapped mesh + y_cell_wise_with_overlap=lazy_map(Broadcasting(Reindex(y_cell_wise)), + Ph.patch_decomposition.patch_cells.data) + + Gridap.FESpaces._free_and_dirichlet_values_fill!( + x, + [1.0], # We need an array of size 1 as we put -1 everywhere at the patch boundaries + array_cache(y_cell_wise_with_overlap), + array_cache(Ph.patch_cell_dofs_ids), + y_cell_wise_with_overlap, + Ph.patch_cell_dofs_ids, + Gridap.Arrays.IdentityVector(length(Ph.patch_cell_dofs_ids))) + +end + +# x \in SingleFESpace +# y \in PatchFESpace +function inject!(x,Ph::PatchFESpace,y) + w = compute_weight_operators(Ph) + inject!(x,Ph::PatchFESpace,y,w) +end + +function inject!(x,Ph::PatchFESpace,y,w) + touched=Dict{Int,Bool}() + cell_mesh_overlapped=1 + cache_patch_cells=array_cache(Ph.patch_decomposition.patch_cells) + cell_dof_ids=get_cell_dof_ids(Ph.Vh) + cache_cell_dof_ids=array_cache(cell_dof_ids) + fill!(x,0.0) + for patch=1:length(Ph.patch_decomposition.patch_cells) + current_patch_cells=getindex!(cache_patch_cells, + Ph.patch_decomposition.patch_cells, + patch) + for cell in current_patch_cells + current_cell_dof_ids=getindex!(cache_cell_dof_ids,cell_dof_ids,cell) + s = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped] + e = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 + current_patch_cell_dof_ids=view(Ph.patch_cell_dofs_ids.data,s:e) + for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) + if pdof >0 && !(dof in keys(touched)) + touched[dof]=true + x[dof]+=y[pdof]*w[pdof] + end + end + cell_mesh_overlapped+=1 + end + empty!(touched) + end +end + +function compute_weight_operators(Ph::PatchFESpace) + cell_dof_ids=get_cell_dof_ids(Ph.Vh) + cache_cell_dof_ids=array_cache(cell_dof_ids) + cache_patch_cells=array_cache(Ph.patch_decomposition.patch_cells) + + w=zeros(num_free_dofs(Ph.Vh)) + touched=Dict{Int,Bool}() + cell_mesh_overlapped=1 + for patch=1:length(Ph.patch_decomposition.patch_cells) + current_patch_cells=getindex!(cache_patch_cells, + Ph.patch_decomposition.patch_cells, + patch) + for cell in current_patch_cells + current_cell_dof_ids=getindex!(cache_cell_dof_ids,cell_dof_ids,cell) + s = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped] + e = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 + current_patch_cell_dof_ids=view(Ph.patch_cell_dofs_ids.data,s:e) + for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) + if pdof > 0 && !(dof in keys(touched)) + touched[dof]=true + w[dof]+=1.0 + end + end + cell_mesh_overlapped+=1 + end + empty!(touched) + end + w .= 1.0 ./ w + w_Ph=similar(w,num_free_dofs(Ph)) + prolongate!(w_Ph,Ph,w) + w_Ph +end From 8cf70df7e71b04d6d16899d1e6ab4a1d43fb6f50 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Sat, 14 Jan 2023 14:32:51 +0100 Subject: [PATCH 62/95] Added tests, everything working. --- src/GridapSolvers.jl | 1 + src/MultilevelTools/GridapFixes.jl | 2 - .../PatchBasedSmoothers.jl | 26 +++ src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 28 +-- .../seq/PatchBasedLinearSolvers.jl | 18 +- .../seq/PatchDecompositions.jl | 184 ++++++++---------- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 177 ++++++++--------- test/runtests.jl | 4 + test/seq/PatchLinearSolverTests.jl | 92 +++++++++ 9 files changed, 309 insertions(+), 223 deletions(-) create mode 100644 src/PatchBasedSmoothers/PatchBasedSmoothers.jl create mode 100644 test/seq/PatchLinearSolverTests.jl diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 595cbe8e..0317137a 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -2,6 +2,7 @@ module GridapSolvers include("MultilevelTools/MultilevelTools.jl") include("LinearSolvers/LinearSolvers.jl") + include("PatchBasedSmoothers/PatchBasedSmoothers.jl") using GridapSolvers.MultilevelTools using GridapSolvers.LinearSolvers diff --git a/src/MultilevelTools/GridapFixes.jl b/src/MultilevelTools/GridapFixes.jl index 2689cdd2..1f78e4b5 100644 --- a/src/MultilevelTools/GridapFixes.jl +++ b/src/MultilevelTools/GridapFixes.jl @@ -30,7 +30,6 @@ function Gridap.Adaptivity.FineToCoarseField(fine_fields::AbstractArray{<:Gridap end -""" function Base.map(::typeof(Gridap.Arrays.testitem), a::Tuple{<:AbstractVector{<:AbstractVector{<:VectorValue}},<:AbstractVector{<:Gridap.Fields.LinearCombinationFieldVector}}) a2=Gridap.Arrays.testitem(a[2]) @@ -52,4 +51,3 @@ function Gridap.Geometry.get_glue(trian::BodyFittedTriangulation{Dt},::Val{Dt}) end FaceToFaceGlue(tface_to_mface,tface_to_mface_map,mface_to_tface) end -""" \ No newline at end of file diff --git a/src/PatchBasedSmoothers/PatchBasedSmoothers.jl b/src/PatchBasedSmoothers/PatchBasedSmoothers.jl new file mode 100644 index 00000000..dcaf0d6c --- /dev/null +++ b/src/PatchBasedSmoothers/PatchBasedSmoothers.jl @@ -0,0 +1,26 @@ +module PatchBasedSmoothers + +using FillArrays +using LinearAlgebra +using Gridap +using Gridap.Helpers +using Gridap.Algebra +using Gridap.Arrays +using Gridap.Geometry +using Gridap.FESpaces + +using PartitionedArrays +using GridapDistributed + +export PatchDecomposition +export PatchFESpace +export PatchBasedLinearSolver + +include("seq/PatchDecompositions.jl") +include("seq/PatchFESpaces.jl") +include("seq/PatchBasedLinearSolvers.jl") + +include("mpi/PatchDecompositions.jl") +include("mpi/PatchFESpaces.jl") + +end \ No newline at end of file diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 608bdfdf..76924b8b 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -25,20 +25,22 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, patches_mask=patches_mask) end - spaces=map_parts(f, + spaces = map_parts(f, model.models, patch_decomposition.patch_decompositions, Vh.spaces, root_gids.partition) - parts=get_part_ids(model.models) - nodofs=map_parts(spaces) do space + + parts = get_part_ids(model.models) + nodofs = map_parts(spaces) do space num_free_dofs(space) end - ngdofs=sum(nodofs) + ngdofs = sum(nodofs) + first_gdof, _ = xscan(+,reduce,nodofs,init=1) # This PRange has no ghost dofs gids = PRange(parts,ngdofs,nodofs,first_gdof) - GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,get_vector_type(Vh)) + return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,get_vector_type(Vh)) end # x \in PatchFESpace @@ -48,6 +50,7 @@ function prolongate!(x::PVector, y::PVector) parts=get_part_ids(x.owned_values) Gridap.Helpers.@notimplementedif num_parts(parts)!=1 + map_parts(x.owned_values,Ph.spaces,y.owned_values) do x,Ph,y prolongate!(x,Ph,y) end @@ -57,20 +60,21 @@ function inject!(x::PVector, Ph::GridapDistributed.DistributedSingleFieldFESpace, y::PVector, w::PVector) - parts=get_part_ids(x.owned_values) + parts = get_part_ids(x.owned_values) Gridap.Helpers.@notimplementedif num_parts(parts)!=1 + map_parts(x.owned_values,Ph.spaces,y.owned_values,w.owned_values) do x,Ph,y,w inject!(x,Ph,y,w) end end function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace) - parts=get_part_ids(Ph.spaces) - Gridap.Helpers.@notimplementedif num_parts(parts)!=1 - w=PVector(0.0,Ph.gids) - map_parts(w.owned_values,Ph.spaces) do w,Ph + parts = get_part_ids(Ph.spaces) + Gridap.Helpers.@notimplementedif num_parts(parts) != 1 + + w = PVector(0.0,Ph.gids) + map_parts(w.owned_values,Ph.spaces) do w, Ph w .= compute_weight_operators(Ph) - #println(w) end - w + return w end diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index d6381d46..a9d15761 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -36,15 +36,15 @@ struct PatchBasedSmootherNumericalSetup{A,B,C,D,E} <: Gridap.Algebra.NumericalSe end function Gridap.Algebra.numerical_setup(ss::PatchBasedSymbolicSetup,A::AbstractMatrix) - Ph=ss.solver.Ph - assembler=SparseMatrixAssembler(Ph,Ph) - Ap=assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph) + Ph = ss.solver.Ph + assembler = SparseMatrixAssembler(Ph,Ph) + Ap = assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph) solver = ss.solver.M ssAp = symbolic_setup(solver,Ap) nsAp = numerical_setup(ssAp,Ap) - rp = _allocate_row_vector(Ap) - dxp = _allocate_col_vector(Ap) - w = compute_weight_operators(Ph) + rp = _allocate_row_vector(Ap) + dxp = _allocate_col_vector(Ap) + w = compute_weight_operators(Ph) PatchBasedSmootherNumericalSetup(ss.solver,Ap,nsAp,rp,dxp,w) end @@ -68,9 +68,9 @@ function Gridap.Algebra.numerical_setup!(ns::PatchBasedSmootherNumericalSetup, A Gridap.Helpers.@notimplemented end -function Gridap.Algebra.solve!( - x::AbstractVector,ns::PatchBasedSmootherNumericalSetup,r::AbstractVector) - Ap,nsAp,rp,dxp,w=ns.Ap,ns.nsAp,ns.rp,ns.dxp,ns.w +function Gridap.Algebra.solve!(x::AbstractVector,ns::PatchBasedSmootherNumericalSetup,r::AbstractVector) + Ap, nsAp, rp, dxp, w = ns.Ap, ns.nsAp, ns.rp, ns.dxp, ns.w + prolongate!(rp,ns.solver.Ph,r) solve!(dxp,nsAp,rp) inject!(x,ns.solver.Ph,dxp,w) diff --git a/src/PatchBasedSmoothers/seq/PatchDecompositions.jl b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl index 6517fa15..df093840 100644 --- a/src/PatchBasedSmoothers/seq/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl @@ -13,8 +13,8 @@ struct PatchDecomposition{Dc,Dp} <: GridapType patch_cells_faces_on_boundary :: Vector{Gridap.Arrays.Table} # Df + overlapped cell -> faces on end -num_patches(a::PatchDecomposition)= length(a.patch_cells_overlapped_mesh.ptrs)-1 -Gridap.Geometry.num_cells(a::PatchDecomposition) = a.patch_cells_overlapped_mesh.data[end] +num_patches(a::PatchDecomposition) = length(a.patch_cells_overlapped_mesh.ptrs)-1 +Gridap.Geometry.num_cells(a::PatchDecomposition) = a.patch_cells_overlapped_mesh.data[end] function PatchDecomposition( @@ -29,10 +29,9 @@ function PatchDecomposition( d_ctype_num_dfaces = [ map(reffe->num_faces(Gridap.Geometry.get_polytope(reffe),d),ctype_reffe) for d in 0:Dc] topology = get_grid_topology(model) - patch_cells=Gridap.Geometry.get_faces(topology,Dr,Dc) - patch_facets=Gridap.Geometry.get_faces(topology,Dr,Dc-1) - patch_cells_overlapped_mesh= - setup_patch_cells_overlapped_mesh(patch_cells) + patch_cells = Gridap.Geometry.get_faces(topology,Dr,Dc) + patch_facets = Gridap.Geometry.get_faces(topology,Dr,Dc-1) + patch_cells_overlapped_mesh = setup_patch_cells_overlapped_mesh(patch_cells) patch_cells_faces_on_boundary = allocate_patch_cells_faces_on_boundary( Dr, @@ -42,7 +41,6 @@ function PatchDecomposition( patch_cells, patch_cells_overlapped_mesh) - generate_patch_boundary_faces!(model, patch_cells_faces_on_boundary, patch_cells, @@ -50,32 +48,30 @@ function PatchDecomposition( patch_facets, patch_boundary_style) - PatchDecomposition{Dc,Dp}(model, - Dr, - patch_cells, - patch_cells_overlapped_mesh, - patch_cells_faces_on_boundary) + return PatchDecomposition{Dc,Dp}(model, Dr, + patch_cells, + patch_cells_overlapped_mesh, + patch_cells_faces_on_boundary) end function Gridap.Geometry.Triangulation(a::PatchDecomposition) - patch_cells=Gridap.Arrays.Table(a.patch_cells) - view(Triangulation(a.model),patch_cells.data) + patch_cells = Gridap.Arrays.Table(a.patch_cells) + return view(Triangulation(a.model),patch_cells.data) end function setup_patch_cells_overlapped_mesh(patch_cells) - num_patches=length(patch_cells) + num_patches = length(patch_cells) cache = array_cache(patch_cells) - ptrs=Vector{Int}(undef,num_patches+1) - ptrs[1]=1 - for patch_id=1:num_patches - cells_around_patch=getindex!(cache,patch_cells,patch_id) - ptrs[patch_id+1]=ptrs[patch_id]+length(cells_around_patch) + ptrs = Vector{Int}(undef,num_patches+1) + ptrs[1] = 1 + for patch_id = 1:num_patches + cells_around_patch = getindex!(cache,patch_cells,patch_id) + ptrs[patch_id+1] = ptrs[patch_id] + length(cells_around_patch) end - data=Gridap.Arrays.IdentityVector(ptrs[end]-1) - Gridap.Arrays.Table(data,ptrs) + data = Gridap.Arrays.IdentityVector(ptrs[end]-1) + return Gridap.Arrays.Table(data,ptrs) end - function allocate_patch_cells_faces_on_boundary(Dr, model::DiscreteModel{Dc}, cell_type, @@ -83,16 +79,12 @@ function allocate_patch_cells_faces_on_boundary(Dr, patch_cells, patch_cells_overlapped_mesh) where {Dc} patch_cells_faces_on_boundary = Vector{Gridap.Arrays.Table}(undef,Dc) - for d=0:Dc-1 - patch_cells_faces_on_boundary[d+1]= - allocate_cell_overlapped_mesh_lface(Bool, - patch_cells, - patch_cells_overlapped_mesh, - cell_type, - d_ctype_num_dfaces, - d) + for d = 0:Dc-1 + patch_cells_faces_on_boundary[d+1] = + allocate_cell_overlapped_mesh_lface(Bool, patch_cells, patch_cells_overlapped_mesh, + cell_type, d_ctype_num_dfaces, d) end - patch_cells_faces_on_boundary + return patch_cells_faces_on_boundary end # Table 2 @@ -103,22 +95,21 @@ function allocate_cell_overlapped_mesh_lface(::Type{T}, cell_type, d_ctype_num_dfaces, dim) where T<:Number # dim=0,1,...,Dc-1 - n=length(patch_cells_overlapped_mesh.data) # number of cells in the overlapped mesh - ptrs=Vector{Int}(undef,n+1) - ptrs[1]=1 - n=1 - for patch=1:length(patch_cells) - cells_patch=patch_cells[patch] + n = length(patch_cells_overlapped_mesh.data) # number of cells in the overlapped mesh + ptrs = Vector{Int}(undef,n+1) + + ptrs[1] = 1; n = 1 + for (patch,cells_patch) in enumerate(patch_cells) for cell in cells_patch - ctype = cell_type[cell] - nfaces = d_ctype_num_dfaces[dim+1][ctype] - # To get the cell in the non overlapped mesh - ptrs[n+1]=ptrs[n]+nfaces - n=n+1 + ctype = cell_type[cell] + nfaces = d_ctype_num_dfaces[dim+1][ctype] + # To get the cell in the non overlapped mesh + ptrs[n+1] = ptrs[n] + nfaces + n = n + 1 end end - data=zeros(T,ptrs[n]-1) - Gridap.Arrays.Table(data,ptrs) + data = zeros(T,ptrs[n]-1) + return Gridap.Arrays.Table(data,ptrs) end function generate_patch_boundary_faces!(model, @@ -127,15 +118,16 @@ function generate_patch_boundary_faces!(model, patch_cells_overlapped_mesh, patch_facets, patch_boundary_style) - Dc=num_cell_dims(model) - topology=get_grid_topology(model) - labeling=get_face_labeling(model) - num_patches=length(patch_cells.ptrs)-1 - cache_patch_cells=array_cache(patch_cells) - cache_patch_facets=array_cache(patch_facets) - for patch=1:num_patches - current_patch_cells=getindex!(cache_patch_cells,patch_cells,patch) - current_patch_facets=getindex!(cache_patch_facets,patch_facets,patch) + Dc = num_cell_dims(model) + topology = get_grid_topology(model) + labeling = get_face_labeling(model) + num_patches = length(patch_cells.ptrs)-1 + + cache_patch_cells = array_cache(patch_cells) + cache_patch_facets = array_cache(patch_facets) + for patch = 1:num_patches + current_patch_cells = getindex!(cache_patch_cells,patch_cells,patch) + current_patch_facets = getindex!(cache_patch_facets,patch_facets,patch) generate_patch_boundary_faces!(patch_cells_faces_on_boundary, Dc, topology, @@ -158,69 +150,47 @@ function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, patch_facets, patch_boundary_style) - boundary_tag=findfirst(x->(x=="boundary"),face_labeling.tag_to_name) - Gridap.Helpers.@check boundary_tag != nothing - boundary_entities=face_labeling.tag_to_entities[boundary_tag] + boundary_tag = findfirst(x->(x=="boundary"),face_labeling.tag_to_name) + Gridap.Helpers.@check !isa(boundary_tag, Nothing) + boundary_entities = face_labeling.tag_to_entities[boundary_tag] # Cells facets - Df=Dc-1 - cells_facets=Gridap.Geometry.get_faces(topology,Dc,Df) - cache_cells_facets=array_cache(cells_facets) + Df = Dc-1 + cells_facets = Gridap.Geometry.get_faces(topology,Dc,Df) + cache_cells_facets = array_cache(cells_facets) # Cells around facets - cells_around_facets=Gridap.Geometry.get_faces(topology,Df,Dc) - cache_cells_around_facets=array_cache(cells_around_facets) + cells_around_facets = Gridap.Geometry.get_faces(topology,Df,Dc) + cache_cells_around_facets = array_cache(cells_around_facets) # Go over all cells in the current patch for (lpatch_cell,patch_cell) in enumerate(patch_cells) - cell_facets=getindex!(cache_cells_facets,cells_facets,patch_cell) + cell_facets = getindex!(cache_cells_facets,cells_facets,patch_cell) # Go over the facets (i.e., faces of dim D-1) in the current cell for (lfacet,facet) in enumerate(cell_facets) - facet_entity=face_labeling.d_to_dface_to_entity[Df+1][facet] + facet_entity = face_labeling.d_to_dface_to_entity[Df+1][facet] - cells_around_facet=getindex!(cache_cells_around_facets, - cells_around_facets, - facet) + cells_around_facet = getindex!(cache_cells_around_facets,cells_around_facets,facet) - # Go over the cells around facet - cell_not_in_patch_found=false - for cell_around_facet in cells_around_facet - if !(cell_around_facet in patch_cells) - cell_not_in_patch_found=true + # Check if facet has a neighboring cell that does not belong to the patch + cell_not_in_patch_found = false + for c in cells_around_facet + if c ∉ patch_cells + cell_not_in_patch_found = true break end end - facet_at_global_boundary = facet_entity in boundary_entities - if (facet_at_global_boundary) - if (facet in patch_facets) - facet_at_patch_boundary = false - else - facet_at_patch_boundary = true - end - elseif (patch_boundary_style isa PatchBoundaryInclude) - facet_at_patch_boundary = false - elseif ((patch_boundary_style isa PatchBoundaryExclude) && cell_not_in_patch_found) - facet_at_patch_boundary = true - else - facet_at_patch_boundary = false - end - - # if (facet_at_neumann_boundary) - # println("XXX") - # println(facet) - # println(length(cells_around_facet)) - # @assert length(cells_around_facet)==1 - # println(cell_not_in_patch_found) - # @assert !cell_not_in_patch_found - # println("YYY") - # @assert !facet_at_boundary - # end + facet_at_global_boundary = (facet_entity ∈ boundary_entities) + A = (facet_at_global_boundary) && (facet ∉ patch_facets) + B = (patch_boundary_style isa PatchBoundaryExclude) && cell_not_in_patch_found + facet_at_patch_boundary = A || B if (facet_at_patch_boundary) + cell_overlapped_mesh = patch_cells_overlapped_mesh[patch][lpatch_cell] - position=patch_cells_faces_on_boundary[Df+1].ptrs[cell_overlapped_mesh]+lfacet-1 - patch_cells_faces_on_boundary[Df+1].data[position]=true + position = patch_cells_faces_on_boundary[Df+1].ptrs[cell_overlapped_mesh]+lfacet-1 + patch_cells_faces_on_boundary[Df+1].data[position] = true # Go over the faces of the lower dimension on the boundary of # the facet. And then propagate true to all cells around, and @@ -228,8 +198,8 @@ function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, # face identifier within that cell # Go over the faces on the boundary of the current facet - for d=0:Df-1 - d_faces_on_boundary_of_current_facet=Gridap.Geometry.get_faces(topology,Df,d)[facet] + for d = 0:Df-1 + d_faces_on_boundary_of_current_facet = Gridap.Geometry.get_faces(topology,Df,d)[facet] for f in d_faces_on_boundary_of_current_facet # # TO-DO: to use caches!!! # Locate the local position of f within the cell (lface) @@ -238,12 +208,12 @@ function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, for cell_around_face in d_faces_cells[f] if (cell_around_face in patch_cells) cell_d_face = cells_d_faces[cell_around_face] - lface = findfirst((x->x==f),cell_d_face) - lpatch_cell2 = findfirst((x->x==cell_around_face),patch_cells) - cell_overlapped_mesh = - patch_cells_overlapped_mesh[patch][lpatch_cell2] - position=patch_cells_faces_on_boundary[d+1].ptrs[cell_overlapped_mesh]+lface-1 - patch_cells_faces_on_boundary[d+1].data[position]=true + lface = findfirst(x -> x==f, cell_d_face) + lpatch_cell2 = findfirst(x -> x==cell_around_face, patch_cells) + + cell_overlapped_mesh = patch_cells_overlapped_mesh[patch][lpatch_cell2] + position = patch_cells_faces_on_boundary[d+1].ptrs[cell_overlapped_mesh]+lface-1 + patch_cells_faces_on_boundary[d+1].data[position] = true end end end diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index 77d0caa8..efaa1802 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -1,8 +1,8 @@ struct PatchFESpace <: Gridap.FESpaces.SingleFieldFESpace - num_dofs::Int - patch_cell_dofs_ids::Gridap.Arrays.Table - Vh::Gridap.FESpaces.SingleFieldFESpace - patch_decomposition::PatchDecomposition + num_dofs :: Int + patch_cell_dofs_ids :: Gridap.Arrays.Table + Vh :: Gridap.FESpaces.SingleFieldFESpace + patch_decomposition :: PatchDecomposition end # INPUT @@ -53,13 +53,13 @@ function PatchFESpace(model::DiscreteModel, cell_reffe = setup_cell_reffe(model,reffe) cell_conformity = CellConformity(cell_reffe,conformity) - cell_dofs_ids=get_cell_dof_ids(Vh) - num_cells_overlapped_mesh=num_cells(patch_decomposition) - patch_cell_dofs_ids=allocate_patch_cell_dofs_ids(num_cells_overlapped_mesh, + cell_dofs_ids = get_cell_dof_ids(Vh) + num_cells_overlapped_mesh = num_cells(patch_decomposition) + patch_cell_dofs_ids = allocate_patch_cell_dofs_ids(num_cells_overlapped_mesh, patch_decomposition.patch_cells, cell_dofs_ids) - num_dofs=generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, + num_dofs = generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, get_grid_topology(model), patch_decomposition.patch_cells, patch_decomposition.patch_cells_overlapped_mesh, @@ -68,7 +68,7 @@ function PatchFESpace(model::DiscreteModel, cell_conformity, patches_mask) - PatchFESpace(num_dofs,patch_cell_dofs_ids,Vh,patch_decomposition) + return PatchFESpace(num_dofs,patch_cell_dofs_ids,Vh,patch_decomposition) end Gridap.FESpaces.get_dof_value_type(a::PatchFESpace)=Gridap.FESpaces.get_dof_value_type(a.Vh) @@ -79,40 +79,35 @@ Gridap.FESpaces.get_fe_basis(a::PatchFESpace)=get_fe_basis(a.Vh) Gridap.FESpaces.ConstraintStyle(a::PatchFESpace)=Gridap.FESpaces.UnConstrained() Gridap.FESpaces.get_vector_type(a::PatchFESpace)=get_vector_type(a.Vh) -function Gridap.FESpaces.scatter_free_and_dirichlet_values(f::PatchFESpace, - free_values, - dirichlet_values) - lazy_map(Broadcasting(Gridap.Fields.PosNegReindex(free_values,dirichlet_values)), - f.patch_cell_dofs_ids) +function Gridap.FESpaces.scatter_free_and_dirichlet_values(f::PatchFESpace,free_values,dirichlet_values) + cell_vals = Gridap.Fields.PosNegReindex(free_values,dirichlet_values) + return lazy_map(Broadcasting(cell_vals),f.patch_cell_dofs_ids) end -function setup_cell_reffe(model::DiscreteModel, - reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}; kwargs...) +function setup_cell_reffe(model::DiscreteModel,reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}; kwargs...) basis, reffe_args,reffe_kwargs = reffe cell_reffe = ReferenceFE(model,basis,reffe_args...;reffe_kwargs...) + return cell_reffe end -function allocate_patch_cell_dofs_ids(num_cells_overlapped_mesh, - cell_patches, - cell_dof_ids) - - ptrs=Vector{Int}(undef,num_cells_overlapped_mesh+1) - ptrs[1]=1 - cache=array_cache(cell_patches) - cache_cdofids=array_cache(cell_dof_ids) - gcell_overlapped_mesh=1 - for patch=1:length(cell_patches) - cells_patch=getindex!(cache,cell_patches,patch) +function allocate_patch_cell_dofs_ids(num_cells_overlapped_mesh,cell_patches,cell_dof_ids) + cache = array_cache(cell_patches) + cache_cdofids = array_cache(cell_dof_ids) + + ptrs = Vector{Int}(undef,num_cells_overlapped_mesh+1) + ptrs[1] = 1; gcell_overlapped_mesh = 1 + for patch = 1:length(cell_patches) + cells_patch = getindex!(cache,cell_patches,patch) for cell in cells_patch - current_cell_dof_ids=getindex!(cache_cdofids,cell_dof_ids,cell) - ptrs[gcell_overlapped_mesh+1]=ptrs[gcell_overlapped_mesh]+length(current_cell_dof_ids) - gcell_overlapped_mesh+=1 + current_cell_dof_ids = getindex!(cache_cdofids,cell_dof_ids,cell) + ptrs[gcell_overlapped_mesh+1] = ptrs[gcell_overlapped_mesh]+length(current_cell_dof_ids) + gcell_overlapped_mesh += 1 end - end - #println(num_cells_overlapped_mesh, " ", gcell_overlapped_mesh) - Gridap.Helpers.@check num_cells_overlapped_mesh+1 == gcell_overlapped_mesh - data=Vector{Int}(undef,ptrs[end]-1) - Gridap.Arrays.Table(data,ptrs) + end + + Gridap.Helpers.@check num_cells_overlapped_mesh+1 == gcell_overlapped_mesh + data = Vector{Int}(undef,ptrs[end]-1) + return Gridap.Arrays.Table(data,ptrs) end function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, @@ -124,12 +119,12 @@ function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, cell_conformity, patches_mask) - cache=array_cache(patch_cells) - num_patches=length(patch_cells) - current_dof=1 - for patch=1:num_patches - current_patch_cells=getindex!(cache,patch_cells,patch) - current_dof=generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, + cache = array_cache(patch_cells) + num_patches = length(patch_cells) + current_dof = 1 + for patch = 1:num_patches + current_patch_cells = getindex!(cache,patch_cells,patch) + current_dof = generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, topology, patch, current_patch_cells, @@ -168,46 +163,43 @@ function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, o = patch_cells_overlapped_mesh.ptrs[patch] if mask - for lpatch_cell=1:length(patch_cells) - cell_overlapped_mesh=patch_cells_overlapped_mesh.data[o+lpatch_cell-1] - s,e=patch_cell_dofs_ids.ptrs[cell_overlapped_mesh], - patch_cell_dofs_ids.ptrs[cell_overlapped_mesh+1]-1 + for lpatch_cell = 1:length(patch_cells) + cell_overlapped_mesh = patch_cells_overlapped_mesh.data[o+lpatch_cell-1] + s = patch_cell_dofs_ids.ptrs[cell_overlapped_mesh] + e = patch_cell_dofs_ids.ptrs[cell_overlapped_mesh+1]-1 patch_cell_dofs_ids.data[s:e] .= -1 end else - g2l=Dict{Int,Int}() - Dc = length(patch_cells_faces_on_boundary) + g2l = Dict{Int,Int}() + Dc = length(patch_cells_faces_on_boundary) # Loop over cells of the patch (local_cell_id_within_patch) for (lpatch_cell,patch_cell) in enumerate(patch_cells) - cell_overlapped_mesh=patch_cells_overlapped_mesh.data[o+lpatch_cell-1] - s,e=patch_cell_dofs_ids.ptrs[cell_overlapped_mesh], - patch_cell_dofs_ids.ptrs[cell_overlapped_mesh+1]-1 - current_patch_cell_dofs_ids=view(patch_cell_dofs_ids.data,s:e) - face_offset=0 + cell_overlapped_mesh = patch_cells_overlapped_mesh.data[o+lpatch_cell-1] + s = patch_cell_dofs_ids.ptrs[cell_overlapped_mesh] + e = patch_cell_dofs_ids.ptrs[cell_overlapped_mesh+1]-1 + current_patch_cell_dofs_ids = view(patch_cell_dofs_ids.data,s:e) + face_offset = 0 ctype = cell_conformity.cell_ctype[patch_cell] - for d=0:Dc-1 + for d = 0:Dc-1 cells_d_faces = Gridap.Geometry.get_faces(topology,Dc,d) cell_d_face = cells_d_faces[patch_cell] - #println(patch_cell, " ", patch_cells_faces_on_boundary[d+1][cell_overlapped_mesh]) - #println(patch_cell, " ", cell_d_face, " ", s:e) for (lf,f) in enumerate(cell_d_face) # If current face is on the patch boundary if (patch_cells_faces_on_boundary[d+1][cell_overlapped_mesh][lf]) # assign negative indices to DoFs owned by face for ldof in cell_conformity.ctype_lface_own_ldofs[ctype][face_offset+lf] - gdof=global_space_cell_dofs_ids[patch_cell][ldof] + gdof = global_space_cell_dofs_ids[patch_cell][ldof] current_patch_cell_dofs_ids[ldof] = -1 - # println(ldof) end else # rely on the existing glued info (available at global_space_cell_dof_ids) # (we will need a Dict{Int,Int} to hold the correspondence among global # space and patch cell dofs IDs) for ldof in cell_conformity.ctype_lface_own_ldofs[ctype][face_offset+lf] - gdof=global_space_cell_dofs_ids[patch_cell][ldof] - if (gdof>0) + gdof = global_space_cell_dofs_ids[patch_cell][ldof] + if (gdof > 0) if gdof in keys(g2l) current_patch_cell_dofs_ids[ldof] = g2l[gdof] else @@ -223,9 +215,9 @@ function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, end face_offset += cell_conformity.d_ctype_num_dfaces[d+1][ctype] end + # Interior DoFs for ldof in cell_conformity.ctype_lface_own_ldofs[ctype][face_offset+1] - # println("ldof: $(ldof) $(length(current_patch_cell_dofs_ids))") current_patch_cell_dofs_ids[ldof] = free_dofs_offset free_dofs_offset += 1 end @@ -243,12 +235,11 @@ function prolongate!(x::AbstractVector{T},Ph::PatchFESpace,y::AbstractVector{T}) Gridap.Helpers.@check num_free_dofs(Ph) == length(x) # Gather y cell-wise - y_cell_wise=scatter_free_and_dirichlet_values(Ph.Vh, - y, - get_dirichlet_dof_values(Ph.Vh)) + dv = get_dirichlet_dof_values(Ph.Vh) + y_cell_wise = scatter_free_and_dirichlet_values(Ph.Vh,y,dv) # Gather y cell-wise in overlapped mesh - y_cell_wise_with_overlap=lazy_map(Broadcasting(Reindex(y_cell_wise)), + y_cell_wise_with_overlap = lazy_map(Broadcasting(Reindex(y_cell_wise)), Ph.patch_decomposition.patch_cells.data) Gridap.FESpaces._free_and_dirichlet_values_fill!( @@ -259,7 +250,6 @@ function prolongate!(x::AbstractVector{T},Ph::PatchFESpace,y::AbstractVector{T}) y_cell_wise_with_overlap, Ph.patch_cell_dofs_ids, Gridap.Arrays.IdentityVector(length(Ph.patch_cell_dofs_ids))) - end # x \in SingleFESpace @@ -270,54 +260,55 @@ function inject!(x,Ph::PatchFESpace,y) end function inject!(x,Ph::PatchFESpace,y,w) - touched=Dict{Int,Bool}() - cell_mesh_overlapped=1 - cache_patch_cells=array_cache(Ph.patch_decomposition.patch_cells) - cell_dof_ids=get_cell_dof_ids(Ph.Vh) - cache_cell_dof_ids=array_cache(cell_dof_ids) + touched = Dict{Int,Bool}() + cell_mesh_overlapped = 1 + cache_patch_cells = array_cache(Ph.patch_decomposition.patch_cells) + cell_dof_ids = get_cell_dof_ids(Ph.Vh) + cache_cell_dof_ids = array_cache(cell_dof_ids) + fill!(x,0.0) - for patch=1:length(Ph.patch_decomposition.patch_cells) - current_patch_cells=getindex!(cache_patch_cells, + for patch = 1:length(Ph.patch_decomposition.patch_cells) + current_patch_cells = getindex!(cache_patch_cells, Ph.patch_decomposition.patch_cells, patch) for cell in current_patch_cells - current_cell_dof_ids=getindex!(cache_cell_dof_ids,cell_dof_ids,cell) + current_cell_dof_ids = getindex!(cache_cell_dof_ids,cell_dof_ids,cell) s = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped] e = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 - current_patch_cell_dof_ids=view(Ph.patch_cell_dofs_ids.data,s:e) + current_patch_cell_dof_ids = view(Ph.patch_cell_dofs_ids.data,s:e) for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) - if pdof >0 && !(dof in keys(touched)) - touched[dof]=true - x[dof]+=y[pdof]*w[pdof] + if pdof > 0 && !(dof ∈ keys(touched)) + touched[dof] = true + x[dof] += y[pdof] * w[pdof] end end - cell_mesh_overlapped+=1 + cell_mesh_overlapped += 1 end empty!(touched) end end function compute_weight_operators(Ph::PatchFESpace) - cell_dof_ids=get_cell_dof_ids(Ph.Vh) - cache_cell_dof_ids=array_cache(cell_dof_ids) - cache_patch_cells=array_cache(Ph.patch_decomposition.patch_cells) - - w=zeros(num_free_dofs(Ph.Vh)) - touched=Dict{Int,Bool}() - cell_mesh_overlapped=1 - for patch=1:length(Ph.patch_decomposition.patch_cells) - current_patch_cells=getindex!(cache_patch_cells, + cell_dof_ids = get_cell_dof_ids(Ph.Vh) + cache_cell_dof_ids = array_cache(cell_dof_ids) + cache_patch_cells = array_cache(Ph.patch_decomposition.patch_cells) + + w = zeros(num_free_dofs(Ph.Vh)) + touched = Dict{Int,Bool}() + cell_mesh_overlapped = 1 + for patch = 1:length(Ph.patch_decomposition.patch_cells) + current_patch_cells = getindex!(cache_patch_cells, Ph.patch_decomposition.patch_cells, patch) for cell in current_patch_cells - current_cell_dof_ids=getindex!(cache_cell_dof_ids,cell_dof_ids,cell) + current_cell_dof_ids = getindex!(cache_cell_dof_ids,cell_dof_ids,cell) s = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped] e = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 - current_patch_cell_dof_ids=view(Ph.patch_cell_dofs_ids.data,s:e) + current_patch_cell_dof_ids = view(Ph.patch_cell_dofs_ids.data,s:e) for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) if pdof > 0 && !(dof in keys(touched)) - touched[dof]=true - w[dof]+=1.0 + touched[dof] = true + w[dof] += 1.0 end end cell_mesh_overlapped+=1 @@ -325,7 +316,7 @@ function compute_weight_operators(Ph::PatchFESpace) empty!(touched) end w .= 1.0 ./ w - w_Ph=similar(w,num_free_dofs(Ph)) + w_Ph = similar(w,num_free_dofs(Ph)) prolongate!(w_Ph,Ph,w) - w_Ph + return w_Ph end diff --git a/test/runtests.jl b/test/runtests.jl index 869d393c..6d1d4e4a 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -63,5 +63,9 @@ function run_tests(testdir) end end +# MPI tests run_tests(@__DIR__) run_tests(joinpath(@__DIR__, "mpi")) + +# Sequential tests +@time @testset "PatchLinearSolverTests" begin include("seq/PatchLinearSolverTests.jl") end diff --git a/test/seq/PatchLinearSolverTests.jl b/test/seq/PatchLinearSolverTests.jl new file mode 100644 index 00000000..9e38869f --- /dev/null +++ b/test/seq/PatchLinearSolverTests.jl @@ -0,0 +1,92 @@ +module PatchLinearSolverTests + using Gridap + using Gridap.Geometry + using Gridap.FESpaces + using Gridap.ReferenceFEs + using FillArrays + using PartitionedArrays + using Test + + using GridapSolvers + using GridapSolvers.PatchBasedSmoothers + + const order=1 + + function returns_PD_Ph_xh_Vh(model) + reffe = ReferenceFE(lagrangian,Float64,order) + # reffe=ReferenceFE(lagrangian,VectorValue{2,Float64},order) @santiagobadia: For Vector Laplacian + Vh = TestFESpace(model,reffe) + PD = PatchDecomposition(model) + Ph = PatchFESpace(model,reffe,H1Conformity(),PD,Vh) + assembler = SparseMatrixAssembler(Ph,Ph) + Ωₚ = Triangulation(PD) + dΩₚ = Measure(Ωₚ,2*order+1) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩₚ + l(v) = ∫(1*v)*dΩₚ + # α =1,0; a(u,v)=∫(v⋅u)dΩ+∫(α*∇(v)⊙∇(u))dΩ # @santiagobadia: For vector Laplacian + # f(x) = VectorValue(1.0,0.0) + # l(v)=∫(v⋅f)dΩ + Ah = assemble_matrix(a,assembler,Ph,Ph) + fh = assemble_vector(l,assembler,Ph) + return PD, Ph, Ah\fh, Vh + end + + function compute_matrix_vector(model,Vh) + Ω = Triangulation(model) + dΩ = Measure(Ω,2*order+1) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(1*v)*dΩ + # α =1,0; a(u,v)=∫(v⋅u)dΩ+∫(α*∇(v)⊙∇(u))dΩ # @santiagobadia: For vector Laplacian + # f(x) = VectorValue(1.0,0.0) + # l(v)=∫(v⋅f)dΩ + assembler = SparseMatrixAssembler(Vh,Vh) + Ah = assemble_matrix(a,assembler,Vh,Vh) + lh = assemble_vector(l,assembler,Vh) + return Ah,lh + end + + function test_smoother(PD,Ph,Vh,A,b) + Ωₚ = Triangulation(PD) + order = 1 + dΩₚ = Measure(Ωₚ,2*order+1) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩₚ + # α =1,0; a(u,v)=∫(v⋅u)dΩ+∫(α*∇(v)⊙∇(u))dΩ # @santiagobadia: For vector Laplacian + M = PatchBasedLinearSolver(a,Ph,LUSolver()) + s = RichardsonSmoother(M,10,1.0/3.0) + x = GridapSolvers.PatchBasedSmoothers._allocate_col_vector(A) + r = b-A*x + solve!(x,s,A,r) + return x + end + + ################################################## + + domain = (0.0,1.0,0.0,1.0) + partition = (2,3) + + model = CartesianDiscreteModel(domain,partition) + _,Ph,xh,Vh = returns_PD_Ph_xh_Vh(model) + + parts = get_part_ids(sequential,(1,2)) + dmodel = CartesianDiscreteModel(parts,domain,partition) + _,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); + + @test num_free_dofs(Ph) == num_free_dofs(dPh) + @test all(dxh.owned_values.parts[1] .≈ xh[1:3]) + @test all(dxh.owned_values.parts[2] .≈ xh[4:end]) + + ################################################# + + model = CartesianDiscreteModel(domain,partition) + PD,Ph,xh,Vh = returns_PD_Ph_xh_Vh(model) + A,b = compute_matrix_vector(model,Vh) + x = test_smoother(PD,Ph,Vh,A,b) + + parts = get_part_ids(sequential,(1,1)) + dmodel = CartesianDiscreteModel(parts,domain,partition) + dPD,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); + dA,db = compute_matrix_vector(dmodel,dVh); + dx = test_smoother(dPD,dPh,dVh,dA,db) + + @test all(dx.owned_values.parts[1] .≈ x) +end From ef23790f439405e7af56fddd83a84b41e5dbea66 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 20 Jan 2023 17:06:38 +0100 Subject: [PATCH 63/95] Updated with latest changes to PA, GP4est, GDist - Removed PartitionedArraysExtensions.jl completely - Removed dependence on GridapP4est within the package - Removed some of teh GridapDistributed extensions - Added new environment for GridapP4est to tests --- Manifest.toml | 43 +++--- src/LinearSolvers/GMGLinearSolvers.jl | 22 +-- .../DistributedGridTransferOperators.jl | 6 +- src/MultilevelTools/FESpaceHierarchies.jl | 8 +- .../GridapDistributedExtensions.jl | 32 +--- src/MultilevelTools/MultilevelTools.jl | 3 +- .../PartitionedArraysExtensions.jl | 113 -------------- src/MultilevelTools/RedistributeTools.jl | 4 +- src/MultilevelTools/SubpartitioningTools.jl | 32 ++++ .../DistributedGridTransferOperatorsTests.jl | 146 +++++++++--------- test/mpi/GMGLinearSolversHDivRTTests.jl | 120 +++++++------- test/mpi/GMGLinearSolversLaplacianTests.jl | 120 +++++++------- test/mpi/GMGLinearSolversMUMPSTests.jl | 114 +++++++------- test/mpi/GMGLinearSolversPoissonTests.jl | 110 ++++++------- .../GMGLinearSolversVectorLaplacianTests.jl | 120 +++++++------- test/mpi/MUMPSSolversTests.jl | 5 +- test/mpi/ModelHierarchiesTests.jl | 62 ++++---- test/mpi/RedistributeToolsTests.jl | 122 +++++++-------- test/mpi/RefinementToolsTests.jl | 125 +++++++-------- test/mpi/RestrictDofsTests.jl | 110 ++++++------- test/mpi/RichardsonSmoothersTests.jl | 84 +++++----- test/runtests.jl | 4 +- 22 files changed, 696 insertions(+), 809 deletions(-) delete mode 100644 src/MultilevelTools/PartitionedArraysExtensions.jl create mode 100644 src/MultilevelTools/SubpartitioningTools.jl diff --git a/Manifest.toml b/Manifest.toml index ed8b7c6f..db3fd731 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -1,6 +1,6 @@ # This file is machine-generated - editing it directly is not advised -julia_version = "1.8.1" +julia_version = "1.8.5" manifest_format = "2.0" project_hash = "dbb1333b0ace488af82ab2035259c592777d3449" @@ -11,9 +11,9 @@ uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" version = "1.2.1" [[deps.AbstractTrees]] -git-tree-sha1 = "52b3b436f8f73133d7bc3a6c71ee7ed6ab2ab754" +git-tree-sha1 = "faa260e4cb5aba097a73fab382dd4b5819d8ec8c" uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" -version = "0.4.3" +version = "0.4.4" [[deps.ArgCheck]] git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" @@ -71,9 +71,9 @@ version = "0.4.2" [[deps.ChainRulesCore]] deps = ["Compat", "LinearAlgebra", "SparseArrays"] -git-tree-sha1 = "e7ff6cadf743c098e08fca25c91103ee4303c9bb" +git-tree-sha1 = "c6d890a52d2c4d55d326439580c3b8d0875a77d9" uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "1.15.6" +version = "1.15.7" [[deps.ChangesOfVariables]] deps = ["ChainRulesCore", "LinearAlgebra", "Test"] @@ -107,7 +107,7 @@ version = "4.5.0" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -version = "0.5.2+0" +version = "1.0.1+0" [[deps.ConstructionBase]] deps = ["LinearAlgebra"] @@ -209,7 +209,7 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DataStructures", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "b702fa4fbdc42d586f5ab6bbdd621f113f549511" +git-tree-sha1 = "f3f4a94f53fdfcda51e9cafc2d83ca9b229d6ea7" repo-rev = "refined-discrete-models" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" @@ -217,15 +217,15 @@ version = "0.17.16" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "60a9432ee5958967d5401967bf34c04961627401" +git-tree-sha1 = "2f998a322a07e7c19312e617868a0c1589727828" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapDistributed.jl.git" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" version = "0.2.6" [[deps.GridapP4est]] -deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "IterativeSolvers", "Libdl", "LinearAlgebra", "MPI", "P4est_wrapper", "PartitionedArrays", "Printf", "Test"] -git-tree-sha1 = "df1d778b3f6eaba536b3b954de330e5cf2aa7cd6" +deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "Libdl", "MPI", "P4est_wrapper", "PartitionedArrays", "Test"] +git-tree-sha1 = "d94678b1a1aa3a18608565b73d271a8a81ec088b" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -508,15 +508,15 @@ version = "0.12.3" [[deps.Parsers]] deps = ["Dates", "SnoopPrecompile"] -git-tree-sha1 = "6466e524967496866901a78fca3f2e9ea445a559" +git-tree-sha1 = "8175fc2b118a3755113c8e68084dc1a9e63c61ee" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.5.2" +version = "2.5.3" [[deps.PartitionedArrays]] deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] -git-tree-sha1 = "94291b7ddeac39816572660383055870b41bca64" +git-tree-sha1 = "7efbdad40c1f4a341d10db6c039495babc602a39" uuid = "5a9dfac6-5c52-46f7-8278-5e2210713be9" -version = "0.2.11" +version = "0.2.14" [[deps.Pkg]] deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] @@ -541,9 +541,9 @@ uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" [[deps.QuadGK]] deps = ["DataStructures", "LinearAlgebra"] -git-tree-sha1 = "97aa253e65b784fd13e83774cadc95b38011d734" +git-tree-sha1 = "de191bc385072cc6c7ed3ffdc1caeed3f22c74d4" uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" -version = "2.6.0" +version = "2.7.0" [[deps.REPL]] deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] @@ -596,9 +596,10 @@ uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46" version = "1.1.1" [[deps.SnoopPrecompile]] -git-tree-sha1 = "f604441450a3c0569830946e5b33b78c928e1a85" +deps = ["Preferences"] +git-tree-sha1 = "e760a70afdcd461cf01a575947738d359234665c" uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c" -version = "1.0.1" +version = "1.0.3" [[deps.Sockets]] uuid = "6462fe0b-24de-5631-8697-dd941f90decc" @@ -663,7 +664,7 @@ version = "1.0.0" [[deps.Tar]] deps = ["ArgTools", "SHA"] uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" -version = "1.10.0" +version = "1.10.1" [[deps.Test]] deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] @@ -676,9 +677,9 @@ version = "1.0.1" [[deps.TranscodingStreams]] deps = ["Random", "Test"] -git-tree-sha1 = "e4bdc63f5c6d62e80eb1c0043fcc0360d5950ff7" +git-tree-sha1 = "94f38103c984f89cf77c402f2a68dbd870f8165f" uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.9.10" +version = "0.9.11" [[deps.UUIDs]] deps = ["Random", "SHA"] diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index 39dd132a..b0e7fb75 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -86,7 +86,7 @@ end function setup_finest_level_cache(mh::ModelHierarchy,smatrices::Vector{<:AbstractMatrix}) cache = nothing parts = get_level_parts(mh,1) - if (GridapP4est.i_am_in(parts)) + if i_am_in(parts) Ah = smatrices[1] rh = PVector(0.0, Ah.cols) cache = rh @@ -101,7 +101,7 @@ function setup_smoothers_caches(mh::ModelHierarchy,smoothers::AbstractVector{<:L caches = Vector{Any}(undef,nlevs-1) for i = 1:nlevs-1 parts = get_level_parts(mh,i) - if (GridapP4est.i_am_in(parts)) + if i_am_in(parts) ss = symbolic_setup(smoothers[i], smatrices[i]) caches[i] = numerical_setup(ss, smatrices[i]) end @@ -113,7 +113,7 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::LinearS cache = nothing nlevs = num_levels(mh) parts = get_level_parts(mh,nlevs) - if (GridapP4est.i_am_in(parts)) + if i_am_in(parts) mat = smatrices[nlevs] if (num_parts(parts) == 1) # Serial cache = map_parts(mat.owned_owned_values) do Ah @@ -133,7 +133,7 @@ function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::PETScLi cache = nothing nlevs = num_levels(mh) parts = get_level_parts(mh,nlevs) - if (GridapP4est.i_am_in(parts)) + if i_am_in(parts) mat = smatrices[nlevs] if (num_parts(parts) == 1) # Serial cache = map_parts(mat.owned_owned_values) do Ah @@ -160,7 +160,7 @@ function allocate_level_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:Abst Adxh = PVector(0.0, smatrices[lev].rows) cparts = get_level_parts(mh,lev+1) - if (GridapP4est.i_am_in(cparts)) + if i_am_in(cparts) AH = smatrices[lev+1] rH = PVector(0.0,AH.cols) dxH = PVector(0.0,AH.cols) @@ -176,7 +176,7 @@ function allocate_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMa work_vectors = Vector{Any}(undef,nlevs-1) for i = 1:nlevs-1 parts = get_level_parts(mh,i) - if GridapP4est.i_am_in(parts) + if i_am_in(parts) work_vectors[i] = allocate_level_work_vectors(mh,smatrices,i) end end @@ -184,7 +184,7 @@ function allocate_work_vectors(mh::ModelHierarchy,smatrices::Vector{<:AbstractMa end function solve_coarsest_level!(parts::AbstractPData,::LinearSolver,xh::PVector,rh::PVector,caches) - if (GridapP4est.num_parts(parts) == 1) + if (num_parts(parts) == 1) map_parts(xh.owned_values,rh.owned_values) do xh, rh solve!(xh,caches,rh) end @@ -195,7 +195,7 @@ end function solve_coarsest_level!(parts::AbstractPData,::PETScLinearSolver,xh::PVector,rh::PVector,caches) solver_ns, xh_petsc, rh_petsc = caches - if (GridapP4est.num_parts(parts) == 1) + if (num_parts(parts) == 1) map_parts(xh.owned_values,rh.owned_values) do xh, rh copy!(rh_petsc,rh) solve!(xh_petsc,solver_ns,rh_petsc) @@ -211,7 +211,7 @@ end function apply_GMG_level!(lev::Integer,xh::Union{PVector,Nothing},rh::Union{PVector,Nothing},ns::GMGNumericalSetup;verbose=false) mh = ns.solver.mh parts = get_level_parts(mh,lev) - if GridapP4est.i_am_in(parts) + if i_am_in(parts) if (lev == num_levels(mh)) ## Coarsest level coarsest_solver = ns.solver.coarsest_solver @@ -273,7 +273,7 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::GMGNumericalSetup,b::Abstra rel_res = nrm_r / nrm_r0 parts = get_level_parts(mh,1) - if GridapP4est.i_am_main(parts) + if i_am_main(parts) @printf "%6s %12s" "Iter" "Rel res\n" @printf "%6i %12.4e\n" current_iter rel_res end @@ -284,7 +284,7 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::GMGNumericalSetup,b::Abstra nrm_r = norm(rh) rel_res = nrm_r / nrm_r0 current_iter += 1 - if GridapP4est.i_am_main(parts) + if i_am_main(parts) @printf "%6i %12.4e\n" current_iter rel_res end end diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 5cef9aee..1c5d1518 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -51,7 +51,7 @@ function _get_interpolation_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mod mh = sh.mh cparts = get_level_parts(mh,lev+1) - if GridapP4est.i_am_in(cparts) + if i_am_in(cparts) model_h = get_model_before_redist(mh,lev) Uh = get_fe_space_before_redist(sh,lev) fv_h = PVector(0.0,Uh.gids) @@ -75,7 +75,7 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: mh = sh.mh cparts = get_level_parts(mh,lev+1) - if GridapP4est.i_am_in(cparts) + if i_am_in(cparts) model_h = get_model_before_redist(mh,lev) Uh = get_fe_space_before_redist(sh,lev) Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) @@ -142,7 +142,7 @@ function setup_transfer_operators(sh::FESpaceHierarchy,qdegree::Int;kwargs...) prolongations = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) for lev in 1:num_levels(sh)-1 parts = get_level_parts(mh,lev) - if GridapP4est.i_am_in(parts) + if i_am_in(parts) restrictions[lev] = RestrictionOperator(lev,sh,qdegree;kwargs...) prolongations[lev] = ProlongationOperator(lev,sh,qdegree;kwargs...) end diff --git a/src/MultilevelTools/FESpaceHierarchies.jl b/src/MultilevelTools/FESpaceHierarchies.jl index 15395b01..f6e750eb 100644 --- a/src/MultilevelTools/FESpaceHierarchies.jl +++ b/src/MultilevelTools/FESpaceHierarchies.jl @@ -70,7 +70,7 @@ function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) where test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh)) for i = 1:num_levels(mh) parts = get_level_parts(mh,i) - if (GridapP4est.i_am_in(parts)) + if i_am_in(parts) Vh = TestFESpace(get_level(mh,i),args...;kwargs...) test_spaces[i] = Vh end @@ -82,7 +82,7 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy,u) trial_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(a.mh)) for i = 1:num_levels(a.mh) parts = get_level_parts(a.mh,i) - if (GridapP4est.i_am_in(parts)) + if i_am_in(parts) Uh = TrialFESpace(a[i],u) trial_spaces[i] = Uh end @@ -94,7 +94,7 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) trial_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(a.mh)) for i = 1:num_levels(a.mh) parts = get_level_parts(a.mh,i) - if (GridapP4est.i_am_in(parts)) + if i_am_in(parts) Uh = TrialFESpace(a[i]) trial_spaces[i] = Uh end @@ -111,7 +111,7 @@ function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Func mats = Vector{PSparseMatrix}(undef,nlevs) for lev in 1:nlevs parts = get_level_parts(mh,lev) - if GridapP4est.i_am_in(parts) + if i_am_in(parts) model = get_model(mh,lev) U = get_fe_space(trials,lev) V = get_test_space(U) diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index c12b62bc..f3e30a40 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -70,28 +70,6 @@ function void(::Type{IndexSet}) end """ -# get_parts - -function get_parts(x::GridapDistributed.DistributedDiscreteModel) - return PartitionedArrays.get_part_ids(x.models) -end - -function get_parts(x::GridapDistributed.DistributedTriangulation) - return PartitionedArrays.get_part_ids(x.trians) -end - -function get_parts(x::GridapP4est.OctreeDistributedDiscreteModel) - return x.parts -end - -function get_parts(x::GridapDistributed.RedistributeGlue) - return PartitionedArrays.get_part_ids(x.old2new) -end - -function get_parts(x::GridapDistributed.DistributedSingleFieldFESpace) - return PartitionedArrays.get_part_ids(x.spaces) -end - # DistributedFESpaces function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) @@ -120,11 +98,11 @@ function VoidDistributedDiscreteModel(model::GridapDistributed.AbstractDistribut return VoidDistributedDiscreteModel(Dc,Dp,get_parts(model)) end -function get_parts(x::VoidDistributedDiscreteModel) +function GridapDistributed.get_parts(x::VoidDistributedDiscreteModel) return x.parts end -struct VoidDistributedTriangulation{Dc,Dp,A} <: GridapType +struct VoidDistributedTriangulation{Dc,Dp,A} <: GridapDistributed.DistributedGridapType parts::A function VoidDistributedTriangulation(Dc::Int,Dp::Int,parts) A = typeof(parts) @@ -132,7 +110,7 @@ struct VoidDistributedTriangulation{Dc,Dp,A} <: GridapType end end -function get_parts(x::VoidDistributedTriangulation) +function GridapDistributed.get_parts(x::VoidDistributedTriangulation) return x.parts end @@ -144,11 +122,11 @@ function Gridap.Geometry.Triangulation(model::VoidDistributedDiscreteModel{Dc,Dp return VoidDistributedTriangulation(Dc,Dp,get_parts(model)) end -struct VoidDistributedFESpace{A} <: GridapType +struct VoidDistributedFESpace{A} <: GridapDistributed.DistributedGridapType parts::A end -function get_parts(x::VoidDistributedFESpace) +function GridapDistributed.get_parts(x::VoidDistributedFESpace) return x.parts end diff --git a/src/MultilevelTools/MultilevelTools.jl b/src/MultilevelTools/MultilevelTools.jl index 6211d518..751605ed 100644 --- a/src/MultilevelTools/MultilevelTools.jl +++ b/src/MultilevelTools/MultilevelTools.jl @@ -12,7 +12,6 @@ using Gridap.FESpaces using Gridap.Adaptivity using PartitionedArrays using GridapDistributed -using GridapP4est import LinearAlgebra: mul! import GridapDistributed: local_views @@ -39,7 +38,7 @@ export RestrictionOperator, ProlongationOperator export setup_transfer_operators export mul! -include("PartitionedArraysExtensions.jl") +include("SubpartitioningTools.jl") include("GridapDistributedExtensions.jl") include("GridapFixes.jl") include("RefinementTools.jl") diff --git a/src/MultilevelTools/PartitionedArraysExtensions.jl b/src/MultilevelTools/PartitionedArraysExtensions.jl deleted file mode 100644 index 102212e0..00000000 --- a/src/MultilevelTools/PartitionedArraysExtensions.jl +++ /dev/null @@ -1,113 +0,0 @@ - -function PartitionedArrays.num_parts(parts::PartitionedArrays.MPIData) - num_parts(parts.comm) -end - -function PartitionedArrays.num_parts(comm::MPI.Comm) - if comm != MPI.COMM_NULL - nparts = MPI.Comm_size(comm) - else - nparts = -1 - end - nparts -end - -function PartitionedArrays.get_part_id(comm::MPI.Comm) - if comm != MPI.COMM_NULL - id = MPI.Comm_rank(comm)+1 - else - id = -1 - end - id -end - -function i_am_in(comm::MPI.Comm) - PartitionedArrays.get_part_id(comm) >=0 -end - -function i_am_in(parts::MPIData) - i_am_in(parts.comm) -end - -function PartitionedArrays.get_part_ids(comm::MPI.Comm) - rank = PartitionedArrays.get_part_id(comm) - nparts = PartitionedArrays.num_parts(comm) - PartitionedArrays.MPIData(rank,comm,(nparts,)) -end - -function PartitionedArrays.get_part_ids(b::MPIBackend,nparts::Union{Int,NTuple{N,Int} where N}) - root_comm = MPI.Comm_dup(MPI.COMM_WORLD) - size = MPI.Comm_size(root_comm) - rank = MPI.Comm_rank(root_comm) - need = prod(nparts) - if size < need - throw("Not enough MPI ranks, please run mpiexec with -n $need (at least)") - elseif size > need - if rank < need - comm = MPI.Comm_split(root_comm, 0, 0) - MPIData(PartitionedArrays.get_part_id(comm),comm,Tuple(nparts)) - else - comm = MPI.Comm_split(root_comm, MPI.MPI_UNDEFINED, MPI.MPI_UNDEFINED) - MPIData(PartitionedArrays.get_part_id(comm),comm,(-1,)) - end - else - comm = root_comm - MPIData(PartitionedArrays.get_part_id(comm),comm,Tuple(nparts)) - end -end - -function PartitionedArrays.prun(driver::Function,b::MPIBackend,nparts::Union{Int,NTuple{N,Int} where N},args...;kwargs...) - if !MPI.Initialized() - MPI.Init() - end - if MPI.Comm_size(MPI.COMM_WORLD) == 1 - part = get_part_ids(b,nparts) - driver(part,args...;kwargs...) - else - try - part = get_part_ids(b,nparts) - if i_am_in(part) - driver(part,args...;kwargs...) - end - catch e - @error "" exception=(e, catch_backtrace()) - if MPI.Initialized() && !MPI.Finalized() - MPI.Abort(MPI.COMM_WORLD,1) - end - end - end - # We are NOT invoking MPI.Finalize() here because we rely on - # MPI.jl, which registers MPI.Finalize() in atexit() -end - -function generate_subparts(root_parts::AbstractPData,subpart_size::Integer) - root_comm = root_parts.comm - rank = MPI.Comm_rank(root_comm) - size = MPI.Comm_size(root_comm) - Gridap.Helpers.@check all(subpart_size .<= size) - Gridap.Helpers.@check all(subpart_size .>= 1) - - if rank < subpart_size - comm = MPI.Comm_split(root_comm, 0, 0) - else - comm = MPI.Comm_split(root_comm, MPI.MPI_UNDEFINED, MPI.MPI_UNDEFINED) - end - return get_part_ids(comm) -end - -function generate_level_parts(root_parts::AbstractPData,last_level_parts::AbstractPData,level_parts_size::Integer) - if level_parts_size == num_parts(last_level_parts) - return last_level_parts - end - return generate_subparts(root_parts,level_parts_size) -end - -function generate_level_parts(root_parts::AbstractPData,num_procs_x_level::Vector{<:Integer}) - num_levels = length(num_procs_x_level) - level_parts = Vector{typeof(parts)}(undef,num_levels) - level_parts[1] = generate_subparts(root_parts,num_procs_x_level[1]) - for l = 2:num_levels - level_parts[l] = generate_level_parts(root_parts,level_parts[l-1],num_procs_x_level[l]) - end - return level_parts -end diff --git a/src/MultilevelTools/RedistributeTools.jl b/src/MultilevelTools/RedistributeTools.jl index 3905edff..54813a44 100644 --- a/src/MultilevelTools/RedistributeTools.jl +++ b/src/MultilevelTools/RedistributeTools.jl @@ -165,7 +165,7 @@ function redistribute_cell_dofs!(caches, # Now that every part knows it's new owned dofs, exchange ghosts new_parts = get_parts(model_new) cell_dof_values_new = change_parts(cell_dof_values_new,new_parts) - if GridapP4est.i_am_in(new_parts) + if i_am_in(new_parts) fgids = get_cell_gids(model_new) exchange!(cell_dof_values_new,fgids.exchanger) end @@ -233,7 +233,7 @@ function redistribute_fe_function(uh_old::Union{GridapDistributed.DistributedSin cell_dof_values_new = redistribute_cell_dofs(cell_dof_values_old,cell_dof_ids_new,model_new,glue;reverse=reverse) # Assemble the new FEFunction - if GridapP4est.i_am_in(get_parts(Uh_new)) + if i_am_in(get_parts(Uh_new)) free_values, dirichlet_values = Gridap.FESpaces.gather_free_and_dirichlet_values(Uh_new,cell_dof_values_new) free_values = PVector(free_values,Uh_new.gids) uh_new = FEFunction(Uh_new,free_values,dirichlet_values) diff --git a/src/MultilevelTools/SubpartitioningTools.jl b/src/MultilevelTools/SubpartitioningTools.jl new file mode 100644 index 00000000..00d80187 --- /dev/null +++ b/src/MultilevelTools/SubpartitioningTools.jl @@ -0,0 +1,32 @@ + +function generate_subparts(root_parts::AbstractPData,subpart_size::Integer) + root_comm = root_parts.comm + rank = MPI.Comm_rank(root_comm) + size = MPI.Comm_size(root_comm) + Gridap.Helpers.@check all(subpart_size .<= size) + Gridap.Helpers.@check all(subpart_size .>= 1) + + if rank < subpart_size + comm = MPI.Comm_split(root_comm, 0, 0) + else + comm = MPI.Comm_split(root_comm, MPI.MPI_UNDEFINED, MPI.MPI_UNDEFINED) + end + return get_part_ids(comm) +end + +function generate_level_parts(root_parts::AbstractPData,last_level_parts::AbstractPData,level_parts_size::Integer) + if level_parts_size == num_parts(last_level_parts) + return last_level_parts + end + return generate_subparts(root_parts,level_parts_size) +end + +function generate_level_parts(root_parts::AbstractPData,num_procs_x_level::Vector{<:Integer}) + num_levels = length(num_procs_x_level) + level_parts = Vector{typeof(parts)}(undef,num_levels) + level_parts[1] = generate_subparts(root_parts,num_procs_x_level[1]) + for l = 2:num_levels + level_parts[l] = generate_level_parts(root_parts,level_parts[l-1],num_procs_x_level[l]) + end + return level_parts +end \ No newline at end of file diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index 8c69bbfb..4c935dc2 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -9,92 +9,84 @@ using Test using GridapSolvers using GridapSolvers.MultilevelTools -function model_hierarchy_free!(mh::ModelHierarchy) - for lev in 1:num_levels(mh) - model = get_model(mh,lev) - isa(model,DistributedRefinedDiscreteModel) && (model = model.model) - octree_distributed_discrete_model_free!(model) - end -end - function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - # Create Operators: - order = 1 - u(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,order) - - tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") - trials = TrialFESpace(tests,u) - - qdegree = order*2+1 - ops = setup_transfer_operators(trials, qdegree; restriction_method=:projection) - restrictions, prolongations = ops - ops2 = setup_transfer_operators(trials, qdegree; restriction_method=:interpolation) - restrictions2, prolongations2 = ops2 - ops3 = setup_transfer_operators(trials, qdegree; restriction_method=:dof_mask) - restrictions3, prolongations3 = ops3 - - a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ - l(v,dΩ) = ∫(v⋅u)*dΩ - mats, A, b = compute_hierarchy_matrices(trials,a,l,qdegree) - - for lev in 1:num_levels-1 - parts_h = get_level_parts(mh,lev) - parts_H = get_level_parts(mh,lev+1) - - if GridapP4est.i_am_in(parts_h) - GridapP4est.i_am_main(parts_h) && println("Lev : ", lev) - Ah = mats[lev] - xh = PVector(1.0,Ah.cols) - yh = PVector(0.0,Ah.rows) - - if GridapP4est.i_am_in(parts_H) - AH = mats[lev+1] - xH = PVector(1.0,AH.cols) - yH = PVector(0.0,AH.rows) - else - xH = nothing - yH = nothing + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + # Create Operators: + order = 1 + u(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,order) + + tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + qdegree = order*2+1 + ops = setup_transfer_operators(trials, qdegree; restriction_method=:projection) + restrictions, prolongations = ops + ops2 = setup_transfer_operators(trials, qdegree; restriction_method=:interpolation) + restrictions2, prolongations2 = ops2 + ops3 = setup_transfer_operators(trials, qdegree; restriction_method=:dof_mask) + restrictions3, prolongations3 = ops3 + + a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ + l(v,dΩ) = ∫(v⋅u)*dΩ + mats, A, b = compute_hierarchy_matrices(trials,a,l,qdegree) + + for lev in 1:num_levels-1 + parts_h = get_level_parts(mh,lev) + parts_H = get_level_parts(mh,lev+1) + + if i_am_in(parts_h) + i_am_main(parts_h) && println("Lev : ", lev) + Ah = mats[lev] + xh = PVector(1.0,Ah.cols) + yh = PVector(0.0,Ah.rows) + + if i_am_in(parts_H) + AH = mats[lev+1] + xH = PVector(1.0,AH.cols) + yH = PVector(0.0,AH.rows) + else + xH = nothing + yH = nothing + end + + i_am_main(parts_h) && println(" > Restriction") + R = restrictions[lev] + mul!(yH,R,xh) + + R2 = restrictions2[lev] + mul!(yH,R2,xh) + + R3 = restrictions3[lev] + mul!(yH,R3,xh) + + i_am_main(parts_h) && println(" > Prolongation") + P = prolongations[lev] + mul!(yh,P,xH) + + P2 = prolongations2[lev] + mul!(yh,P2,xH) + + P3 = prolongations3[lev] + mul!(yh,P3,xH) end - - GridapP4est.i_am_main(parts_h) && println(" > Restriction") - R = restrictions[lev] - mul!(yH,R,xh) - - R2 = restrictions2[lev] - mul!(yH,R2,xh) - - R3 = restrictions3[lev] - mul!(yH,R3,xh) - - GridapP4est.i_am_main(parts_h) && println(" > Prolongation") - P = prolongations[lev] - mul!(yh,P,xH) - - P2 = prolongations2[lev] - mul!(yh,P2,xH) - - P3 = prolongations3[lev] - mul!(yh,P3,xH) end end - - #model_hierarchy_free!(mh) end - num_parts_x_level = [4,2,2] # Procs in each refinement level num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements -ranks = num_parts_x_level[1] -prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) +num_ranks = num_parts_x_level[1] +with_backend(run,MPIBackend(),num_ranks,num_parts_x_level,num_trees,num_refs_coarse) +println("AT THE END") MPI.Finalize() end diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index 158d094c..fbc0c2f9 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -19,62 +19,64 @@ u(x) = VectorValue(x[1],x[2]) f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - qdegree = 2*(order+1) - reffe = ReferenceFE(raviart_thomas,Float64,order) - tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") - trials = TrialFESpace(tests,u) - - biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ - liform(v,dΩ) = ∫(v⋅f)dΩ - smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) - - # Preconditioner - smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) - restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) - - gmg = GMGLinearSolver(mh, - smatrices, - prolongations, - restrictions, - pre_smoothers=smoothers, - post_smoothers=smoothers, - maxiter=1, - rtol=1.0e-10, - verbose=false, - mode=:preconditioner) - ss = symbolic_setup(gmg,A) - ns = numerical_setup(ss,A) - - # Solve - x = PVector(0.0,A.cols) - x, history = IterativeSolvers.cg!(x,A,b; - verbose=GridapP4est.i_am_main(parts), - reltol=1.0e-12, - Pl=ns, - log=true) - - # Error norms and print solution - model = get_model(mh,1) - Uh = get_fe_space(trials,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 - #@test e_l2 < tol - if GridapP4est.i_am_main(parts) - println("L2 error = ", e_l2) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + qdegree = 2*(order+1) + reffe = ReferenceFE(raviart_thomas,Float64,order) + tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ + liform(v,dΩ) = ∫(v⋅f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) + + # Preconditioner + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + #@test e_l2 < tol + if i_am_main(parts) + println("L2 error = ", e_l2) + end + + return history.iters, num_free_dofs(Vh) end - - return history.iters, num_free_dofs(Vh) end ############################################## @@ -91,7 +93,7 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] ranks = num_parts_x_level[1] -#num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +#num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ @@ -108,19 +110,19 @@ for ref = 1:nr for alpha_exp = 1:na α = 10.0^alpha_exps[alpha_exp] - num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,order,α) + num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,order,α) free_dofs[ref] = num_free_dofs2 iter_matrix[ref,alpha_exp] = num_iters end end # Display results -if GridapP4est.i_am_main(parts) +if i_am_main(parts) println("> α = ", map(exp->10.0^exp,alpha_exp)) end for ref = 1:nr - if GridapP4est.i_am_main(parts) + if i_am_main(parts) println("> Num Refinements: ", num_refinements[ref]) println(" > Num free dofs : ", free_dofs[ref]) println(" > Num Refinements : ", num_refinements[ref]) diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index 4e05d7a7..c6eed0a4 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -19,62 +19,64 @@ u(x) = x[1] + x[2] f(x) = -Δ(u)(x) function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - qdegree = 2*(order+1) - reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") - trials = TrialFESpace(tests,u) - - biform(u,v,dΩ) = ∫(v*u)dΩ + ∫(α*∇(v)⋅∇(u))dΩ - liform(v,dΩ) = ∫(v*f)dΩ - smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) - - # Preconditioner - smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) - restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) - - gmg = GMGLinearSolver(mh, - smatrices, - prolongations, - restrictions, - pre_smoothers=smoothers, - post_smoothers=smoothers, - maxiter=1, - rtol=1.0e-10, - verbose=false, - mode=:preconditioner) - ss = symbolic_setup(gmg,A) - ns = numerical_setup(ss,A) - - # Solve - x = PVector(0.0,A.cols) - x, history = IterativeSolvers.cg!(x,A,b; - verbose=GridapP4est.i_am_main(parts), - reltol=1.0e-12, - Pl=ns, - log=true) - - # Error norms and print solution - model = get_model(mh,1) - Uh = get_fe_space(trials,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 - #@test e_l2 < tol - if GridapP4est.i_am_main(parts) - println("L2 error = ", e_l2) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(v*u)dΩ + ∫(α*∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) + + # Preconditioner + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + #@test e_l2 < tol + if i_am_main(parts) + println("L2 error = ", e_l2) + end + + return history.iters, num_free_dofs(Uh) end - - return history.iters, num_free_dofs(Uh) end ############################################## @@ -91,7 +93,7 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] ranks = num_parts_x_level[1] -num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ @@ -108,19 +110,19 @@ for ref = 1:nr for alpha_exp = 1:na α = 10.0^alpha_exps[alpha_exp] - num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,order,α) + num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,order,α) free_dofs[ref] = num_free_dofs2 iter_matrix[ref,alpha_exp] = num_iters end end # Display results -if GridapP4est.i_am_main(parts) +if i_am_main(parts) println("> α = ", map(exp->10.0^exp,alpha_exp)) end for ref = 1:nr - if GridapP4est.i_am_main(parts) + if i_am_main(parts) println("> Num Refinements: ", num_refinements[ref]) println(" > Num free dofs : ", free_dofs[ref]) println(" > Num Refinements : ", num_refinements[ref]) diff --git a/test/mpi/GMGLinearSolversMUMPSTests.jl b/test/mpi/GMGLinearSolversMUMPSTests.jl index 970aeec2..0ba9a28b 100644 --- a/test/mpi/GMGLinearSolversMUMPSTests.jl +++ b/test/mpi/GMGLinearSolversMUMPSTests.jl @@ -39,61 +39,63 @@ end function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order) GridapPETSc.with() do - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - qdegree = 2*(order+1) - reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") - trials = TrialFESpace(tests,u) - - biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ - liform(v,dΩ) = ∫(v*f)dΩ - smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) - - # Preconditioner - smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) - restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) - mumps_solver = PETScLinearSolver(set_ksp_options) - - gmg = GMGLinearSolver(mh, - smatrices, - prolongations, - restrictions, - pre_smoothers=smoothers, - post_smoothers=smoothers, - coarsest_solver=mumps_solver, - maxiter=1, - rtol=1.0e-10, - verbose=false, - mode=:preconditioner) - ss = symbolic_setup(gmg,A) - ns = numerical_setup(ss,A) - - # Solve - x = PVector(0.0,A.cols) - x, history = IterativeSolvers.cg!(x,A,b; - verbose=GridapP4est.i_am_main(parts), - reltol=1.0e-12, - Pl=ns, - log=true) - - # Error norms and print solution - model = get_model(mh,1) - Uh = get_fe_space(trials,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 - @test e_l2 < tol - if GridapP4est.i_am_main(parts) - println("L2 error = ", e_l2) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) + + # Preconditioner + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) + mumps_solver = PETScLinearSolver(set_ksp_options) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + coarsest_solver=mumps_solver, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + @test e_l2 < tol + if i_am_main(parts) + println("L2 error = ", e_l2) + end end end end @@ -111,7 +113,7 @@ num_refs_coarse = 3 num_parts_x_level = [4,2] ranks = num_parts_x_level[1] -prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) +with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) MPI.Finalize() diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index a28d6d7b..17fbda4e 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -19,59 +19,61 @@ u(x) = x[1] + x[2] f(x) = -Δ(u)(x) function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order) - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - qdegree = 2*(order+1) - reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") - trials = TrialFESpace(tests,u) - - biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ - liform(v,dΩ) = ∫(v*f)dΩ - smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) - - # Preconditioner - smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) - restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) - - gmg = GMGLinearSolver(mh, - smatrices, - prolongations, - restrictions, - pre_smoothers=smoothers, - post_smoothers=smoothers, - maxiter=1, - rtol=1.0e-10, - verbose=false, - mode=:preconditioner) - ss = symbolic_setup(gmg,A) - ns = numerical_setup(ss,A) - - # Solve - x = PVector(0.0,A.cols) - x, history = IterativeSolvers.cg!(x,A,b; - verbose=GridapP4est.i_am_main(parts), - reltol=1.0e-12, - Pl=ns, - log=true) - - # Error norms and print solution - model = get_model(mh,1) - Uh = get_fe_space(trials,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 - @test e_l2 < tol - if GridapP4est.i_am_main(parts) - println("L2 error = ", e_l2) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) + + # Preconditioner + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + @test e_l2 < tol + if i_am_main(parts) + println("L2 error = ", e_l2) + end end end @@ -88,7 +90,7 @@ num_refs_coarse = 2 num_parts_x_level = [4,2,1] ranks = num_parts_x_level[1] -prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) +with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) MPI.Finalize() diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl index 68c44cc7..8517dc6c 100644 --- a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -19,62 +19,64 @@ u(x) = VectorValue(x[1],x[2]) f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - qdegree = 2*(order+1) - reffe = ReferenceFE(lagrangian,VectorValue{2,Float64},order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") - trials = TrialFESpace(tests,u) - - biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*∇(v)⊙∇(u))dΩ - liform(v,dΩ) = ∫(v⋅f)dΩ - smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) - - # Preconditioner - smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) - restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) - - gmg = GMGLinearSolver(mh, - smatrices, - prolongations, - restrictions, - pre_smoothers=smoothers, - post_smoothers=smoothers, - maxiter=1, - rtol=1.0e-10, - verbose=false, - mode=:preconditioner) - ss = symbolic_setup(gmg,A) - ns = numerical_setup(ss,A) - - # Solve - x = PVector(0.0,A.cols) - x, history = IterativeSolvers.cg!(x,A,b; - verbose=GridapP4est.i_am_main(parts), - reltol=1.0e-12, - Pl=ns, - log=true) - - # Error norms and print solution - model = get_model(mh,1) - Uh = get_fe_space(trials,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 - #@test e_l2 < tol - if GridapP4est.i_am_main(parts) - println("L2 error = ", e_l2) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,VectorValue{2,Float64},order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*∇(v)⊙∇(u))dΩ + liform(v,dΩ) = ∫(v⋅f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) + + # Preconditioner + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + #@test e_l2 < tol + if i_am_main(parts) + println("L2 error = ", e_l2) + end + + return history.iters, num_free_dofs(Uh) end - - return history.iters, num_free_dofs(Uh) end ############################################## @@ -91,7 +93,7 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] ranks = num_parts_x_level[1] -num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ @@ -108,19 +110,19 @@ for ref = 1:nr for alpha_exp = 1:na α = 10.0^alpha_exps[alpha_exp] - num_iters, num_free_dofs2 = prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,order,α) + num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,order,α) free_dofs[ref] = num_free_dofs2 iter_matrix[ref,alpha_exp] = num_iters end end # Display results -if GridapP4est.i_am_main(parts) +if i_am_main(parts) println("> α = ", map(exp->10.0^exp,alpha_exp)) end for ref = 1:nr - if GridapP4est.i_am_main(parts) + if i_am_main(parts) println("> Num Refinements: ", num_refinements[ref]) println(" > Num free dofs : ", free_dofs[ref]) println(" > Num Refinements : ", num_refinements[ref]) diff --git a/test/mpi/MUMPSSolversTests.jl b/test/mpi/MUMPSSolversTests.jl index df5f4c62..5be1501a 100644 --- a/test/mpi/MUMPSSolversTests.jl +++ b/test/mpi/MUMPSSolversTests.jl @@ -5,7 +5,6 @@ using MPI using Gridap using GridapDistributed using PartitionedArrays -using GridapP4est using IterativeSolvers using GridapSolvers @@ -65,7 +64,7 @@ function main(parts,partition) uh = FEFunction(Uh,x) eh = uh - u E = sum(∫(eh*eh)*dΩ) - if GridapP4est.i_am_main(parts) + if i_am_main(parts) println("L2 Error: ", E) end @@ -75,7 +74,7 @@ end partition = (32,32) ranks = (2,2) -prun(main,mpi,ranks,partition) +with_backend(main,MPIBackend(),ranks,partition) MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/ModelHierarchiesTests.jl b/test/mpi/ModelHierarchiesTests.jl index 7742b077..a7cc00e1 100644 --- a/test/mpi/ModelHierarchiesTests.jl +++ b/test/mpi/ModelHierarchiesTests.jl @@ -10,47 +10,39 @@ using GridapP4est using GridapSolvers using GridapSolvers.MultilevelTools -function model_hierarchy_free!(mh::ModelHierarchy) - for lev in 1:num_levels(mh) - model = get_model(mh,lev) - isa(model,DistributedAdaptedDiscreteModel) && (model = model.model) - octree_distributed_discrete_model_free!(model) - end -end - function main(parts,num_parts_x_level) - # Start from coarse, refine models - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,(2,2)) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,2) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - sol(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,1) - tests = TestFESpace(mh,reffe,conformity=:H1) - trials = TrialFESpace(tests,sol) - - # Start from fine, coarsen models - domain = (0,1,0,1) - fparts = generate_subparts(parts,num_parts_x_level[1]) - fmodel = CartesianDiscreteModel(domain,(2,2)) - fine_model = OctreeDistributedDiscreteModel(fparts,fmodel,8) - mh = ModelHierarchy(parts,fine_model,num_parts_x_level) - - sol(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,1) - tests = TestFESpace(mh,reffe,conformity=:H1) - trials = TrialFESpace(tests,sol) - - # model_hierarchy_free!(mh) + GridapP4est.with(parts) do + # Start from coarse, refine models + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,(2,2)) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,2) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,1) + tests = TestFESpace(mh,reffe,conformity=:H1) + trials = TrialFESpace(tests,sol) + + # Start from fine, coarsen models + domain = (0,1,0,1) + fparts = generate_subparts(parts,num_parts_x_level[1]) + fmodel = CartesianDiscreteModel(domain,(2,2)) + fine_model = OctreeDistributedDiscreteModel(fparts,fmodel,8) + mh = ModelHierarchy(parts,fine_model,num_parts_x_level) + + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,1) + tests = TestFESpace(mh,reffe,conformity=:H1) + trials = TrialFESpace(tests,sol) + end end num_parts_x_level = [4,4,2,2] # Procs in each refinement level ranks = num_parts_x_level[1] -prun(main,mpi,ranks,num_parts_x_level) +with_backend(main,MPIBackend(),ranks,num_parts_x_level) MPI.Finalize() end \ No newline at end of file diff --git a/test/mpi/RedistributeToolsTests.jl b/test/mpi/RedistributeToolsTests.jl index 0e80d95a..20f130b9 100644 --- a/test/mpi/RedistributeToolsTests.jl +++ b/test/mpi/RedistributeToolsTests.jl @@ -9,80 +9,72 @@ using Test using GridapSolvers using GridapSolvers.MultilevelTools -function model_hierarchy_free!(mh::ModelHierarchy) - for lev in 1:num_levels(mh) - model = get_model(mh,lev) - isa(model,DistributedRefinedDiscreteModel) && (model = model.model) - octree_distributed_discrete_model_free!(model) - end -end - function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - level_parts = get_level_parts(mh) - old_parts = level_parts[2] - new_parts = level_parts[1] + level_parts = get_level_parts(mh) + old_parts = level_parts[2] + new_parts = level_parts[1] - # FE Spaces - order = 1 - u(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,order) - glue = mh.levels[1].red_glue + # FE Spaces + order = 1 + u(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,order) + glue = mh.levels[1].red_glue - model_old = get_model_before_redist(mh.levels[1]) - VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") - UOLD = TrialFESpace(VOLD,u) + model_old = get_model_before_redist(mh.levels[1]) + VOLD = TestFESpace(model_old,reffe,dirichlet_tags="boundary") + UOLD = TrialFESpace(VOLD,u) - model_new = get_model(mh.levels[1]) - VNEW = TestFESpace(model_new,reffe,dirichlet_tags="boundary") - UNEW = TrialFESpace(VNEW,u) + model_new = get_model(mh.levels[1]) + VNEW = TestFESpace(model_new,reffe,dirichlet_tags="boundary") + UNEW = TrialFESpace(VNEW,u) - # Triangulations - qdegree = 2*order+1 - Ω_new = Triangulation(model_new) - dΩ_new = Measure(Ω_new,qdegree) - uh_new = interpolate(u,UNEW) + # Triangulations + qdegree = 2*order+1 + Ω_new = Triangulation(model_new) + dΩ_new = Measure(Ω_new,qdegree) + uh_new = interpolate(u,UNEW) - if GridapP4est.i_am_in(old_parts) - Ω_old = Triangulation(model_old) - dΩ_old = Measure(Ω_old,qdegree) - uh_old = interpolate(u,UOLD) - else - Ω_old = nothing - dΩ_old = nothing - uh_old = nothing - end + if i_am_in(old_parts) + Ω_old = Triangulation(model_old) + dΩ_old = Measure(Ω_old,qdegree) + uh_old = interpolate(u,UOLD) + else + Ω_old = nothing + dΩ_old = nothing + uh_old = nothing + end - # Old -> New - uh_old_red = redistribute_fe_function(uh_old, - UNEW, - model_new, - glue) - n = sum(∫(uh_old_red)*dΩ_new) - if GridapP4est.i_am_in(old_parts) - o = sum(∫(uh_old)*dΩ_old) - @test o ≈ n - end + # Old -> New + uh_old_red = redistribute_fe_function(uh_old, + UNEW, + model_new, + glue) + n = sum(∫(uh_old_red)*dΩ_new) + if i_am_in(old_parts) + o = sum(∫(uh_old)*dΩ_old) + @test o ≈ n + end - # New -> Old - uh_new_red = redistribute_fe_function(uh_new, - UOLD, - model_old, - glue; - reverse=true) - n = sum(∫(uh_new)*dΩ_new) - if GridapP4est.i_am_in(old_parts) - o = sum(∫(uh_new_red)*dΩ_old) - @test o ≈ n + # New -> Old + uh_new_red = redistribute_fe_function(uh_new, + UOLD, + model_old, + glue; + reverse=true) + n = sum(∫(uh_new)*dΩ_new) + if i_am_in(old_parts) + o = sum(∫(uh_new_red)*dΩ_old) + @test o ≈ n + end end - - #model_hierarchy_free!(mh) end @@ -91,6 +83,6 @@ num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] -prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) +with_backend(run,MPIBackend(),ranks,num_parts_x_level,num_trees,num_refs_coarse) MPI.Finalize() end diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index a5ebc40e..639990e0 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -11,69 +11,70 @@ using GridapSolvers using GridapSolvers.MultilevelTools function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - # FE Spaces - order = 1 - sol(x) = x[1] + x[2] - reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") - trials = TrialFESpace(tests,sol) - - quad_order = 3*order+1 - for lev in 1:num_levels-1 - fparts = get_level_parts(mh,lev) - cparts = get_level_parts(mh,lev+1) - - if GridapP4est.i_am_in(cparts) - Vh = get_fe_space_before_redist(tests,lev) - Uh = get_fe_space_before_redist(trials,lev) - Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) - dΩh = Measure(Ωh,quad_order) - uh = interpolate(sol,Uh) - - VH = get_fe_space(tests,lev+1) - UH = get_fe_space(trials,lev+1) - ΩH = get_triangulation(UH,get_model(mh,lev+1)) - dΩH = Measure(ΩH,quad_order) - uH = interpolate(sol,UH) - dΩhH = Measure(ΩH,Ωh,quad_order) - - # Coarse FEFunction -> Fine FEFunction, by projection - ah(u,v) = ∫(v⋅u)*dΩh - lh(v) = ∫(v⋅uH)*dΩh - Ah = assemble_matrix(ah,Uh,Vh) - bh = assemble_vector(lh,Vh) - - xh = PVector(0.0,Ah.cols) - IterativeSolvers.cg!(xh,Ah,bh;verbose=i_am_main(parts),reltol=1.0e-08) - uH_projected = FEFunction(Uh,xh) - - _eh = uh-uH_projected - eh = sum(∫(_eh⋅_eh)*dΩh) - i_am_main(parts) && println("Error H2h: ", eh) - - # Fine FEFunction -> Coarse FEFunction, by projection - aH(u,v) = ∫(v⋅u)*dΩH - lH(v) = ∫(v⋅uH_projected)*dΩhH - AH = assemble_matrix(aH,UH,VH) - bH = assemble_vector(lH,VH) - - xH = PVector(0.0,AH.cols) - IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-08) - uh_projected = FEFunction(UH,xH) - - _eH = uH-uh_projected - eH = sum(∫(_eH⋅_eH)*dΩH) - i_am_main(parts) && println("Error h2H: ", eH) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + # FE Spaces + order = 1 + sol(x) = x[1] + x[2] + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,sol) + + quad_order = 3*order+1 + for lev in 1:num_levels-1 + fparts = get_level_parts(mh,lev) + cparts = get_level_parts(mh,lev+1) + + if i_am_in(cparts) + Vh = get_fe_space_before_redist(tests,lev) + Uh = get_fe_space_before_redist(trials,lev) + Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) + dΩh = Measure(Ωh,quad_order) + uh = interpolate(sol,Uh) + + VH = get_fe_space(tests,lev+1) + UH = get_fe_space(trials,lev+1) + ΩH = get_triangulation(UH,get_model(mh,lev+1)) + dΩH = Measure(ΩH,quad_order) + uH = interpolate(sol,UH) + dΩhH = Measure(ΩH,Ωh,quad_order) + + # Coarse FEFunction -> Fine FEFunction, by projection + ah(u,v) = ∫(v⋅u)*dΩh + lh(v) = ∫(v⋅uH)*dΩh + Ah = assemble_matrix(ah,Uh,Vh) + bh = assemble_vector(lh,Vh) + + xh = PVector(0.0,Ah.cols) + IterativeSolvers.cg!(xh,Ah,bh;verbose=i_am_main(parts),reltol=1.0e-08) + uH_projected = FEFunction(Uh,xh) + + _eh = uh-uH_projected + eh = sum(∫(_eh⋅_eh)*dΩh) + i_am_main(parts) && println("Error H2h: ", eh) + + # Fine FEFunction -> Coarse FEFunction, by projection + aH(u,v) = ∫(v⋅u)*dΩH + lH(v) = ∫(v⋅uH_projected)*dΩhH + AH = assemble_matrix(aH,UH,VH) + bH = assemble_vector(lH,VH) + + xH = PVector(0.0,AH.cols) + IterativeSolvers.cg!(xH,AH,bH;verbose=i_am_main(parts),reltol=1.0e-08) + uh_projected = FEFunction(UH,xH) + + _eH = uH-uh_projected + eH = sum(∫(_eH⋅_eH)*dΩH) + i_am_main(parts) && println("Error h2H: ", eH) + end end end - end @@ -82,6 +83,6 @@ num_trees = (1,1) # Number of initial P4est trees num_refs_coarse = 2 # Number of initial refinements ranks = num_parts_x_level[1] -prun(run,mpi,ranks,num_parts_x_level,num_trees,num_refs_coarse) +with_backend(run,MPIBackend(),ranks,num_parts_x_level,num_trees,num_refs_coarse) MPI.Finalize() end diff --git a/test/mpi/RestrictDofsTests.jl b/test/mpi/RestrictDofsTests.jl index ff71b9b2..48ff5721 100644 --- a/test/mpi/RestrictDofsTests.jl +++ b/test/mpi/RestrictDofsTests.jl @@ -19,59 +19,61 @@ u(x) = x[1] + x[2] f(x) = -Δ(u)(x) function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order) - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - qdegree = 2*(order+1) - reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") - trials = TrialFESpace(tests,u) - - biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ - liform(v,dΩ) = ∫(v*f)dΩ - smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) - - # Preconditioner - smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) - restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual,restriction_method=:dof_mask) - - gmg = GMGLinearSolver(mh, - smatrices, - prolongations, - restrictions, - pre_smoothers=smoothers, - post_smoothers=smoothers, - maxiter=1, - rtol=1.0e-10, - verbose=false, - mode=:preconditioner) - ss = symbolic_setup(gmg,A) - ns = numerical_setup(ss,A) - - # Solve - x = PVector(0.0,A.cols) - x, history = IterativeSolvers.cg!(x,A,b; - verbose=GridapP4est.i_am_main(parts), - reltol=1.0e-12, - Pl=ns, - log=true) - - # Error norms and print solution - model = get_model(mh,1) - Uh = get_fe_space(trials,1) - Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) - uh = FEFunction(Uh,x) - e = u-uh - e_l2 = sum(∫(e⋅e)dΩ) - tol = 1.0e-9 - @test e_l2 < tol - if GridapP4est.i_am_main(parts) - println("L2 error = ", e_l2) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + qdegree = 2*(order+1) + reffe = ReferenceFE(lagrangian,Float64,order) + tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) + + # Preconditioner + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual,restriction_method=:dof_mask) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegree) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + @test e_l2 < tol + if i_am_main(parts) + println("L2 error = ", e_l2) + end end end @@ -88,7 +90,7 @@ num_refs_coarse = 2 num_parts_x_level = [4,2,1] ranks = num_parts_x_level[1] -prun(main,mpi,ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) +with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order) MPI.Finalize() diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl index 9bc2b810..20fe79c8 100644 --- a/test/mpi/RichardsonSmoothersTests.jl +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -12,52 +12,54 @@ using GridapSolvers using GridapSolvers.LinearSolvers function main(parts,partition) - domain = (0,1,0,1) - model = CartesianDiscreteModel(parts,domain,partition) - - sol(x) = x[1] + x[2] - f(x) = -Δ(sol)(x) - - order = 1 - qorder = order*2 + 1 - reffe = ReferenceFE(lagrangian,Float64,order) - Vh = TestFESpace(model,reffe,conformity=:H1,dirichlet_tags="boundary") - Uh = TrialFESpace(Vh,sol) - u = interpolate(sol,Uh) - - Ω = Triangulation(model) - dΩ = Measure(Ω,qorder) - a(u,v) = ∫(∇(v)⋅∇(u))*dΩ - l(v) = ∫(v⋅f)*dΩ - - op = AffineFEOperator(a,l,Uh,Vh) - A, b = get_matrix(op), get_vector(op) - - P = RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0) - ss = symbolic_setup(P,A) - ns = numerical_setup(ss,A) - - x = PVector(1.0,A.cols) - x, history = IterativeSolvers.cg!(x,A,b; - verbose=GridapP4est.i_am_main(parts), - reltol=1.0e-8, - Pl=ns, - log=true) - - u = interpolate(sol,Uh) - uh = FEFunction(Uh,x) - eh = uh - u - E = sum(∫(eh*eh)*dΩ) - if GridapP4est.i_am_main(parts) - println("L2 Error: ", E) + GridapP4est.with(parts) do + domain = (0,1,0,1) + model = CartesianDiscreteModel(parts,domain,partition) + + sol(x) = x[1] + x[2] + f(x) = -Δ(sol)(x) + + order = 1 + qorder = order*2 + 1 + reffe = ReferenceFE(lagrangian,Float64,order) + Vh = TestFESpace(model,reffe,conformity=:H1,dirichlet_tags="boundary") + Uh = TrialFESpace(Vh,sol) + u = interpolate(sol,Uh) + + Ω = Triangulation(model) + dΩ = Measure(Ω,qorder) + a(u,v) = ∫(∇(v)⋅∇(u))*dΩ + l(v) = ∫(v⋅f)*dΩ + + op = AffineFEOperator(a,l,Uh,Vh) + A, b = get_matrix(op), get_vector(op) + + P = RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0) + ss = symbolic_setup(P,A) + ns = numerical_setup(ss,A) + + x = PVector(1.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-8, + Pl=ns, + log=true) + + u = interpolate(sol,Uh) + uh = FEFunction(Uh,x) + eh = uh - u + E = sum(∫(eh*eh)*dΩ) + if i_am_main(parts) + println("L2 Error: ", E) + end + + @test E < 1.e-8 end - - @test E < 1.e-8 end partition = (32,32) ranks = (2,2) -prun(main,mpi,ranks,partition) +with_backend(main,MPIBackend(),ranks,partition) MPI.Finalize() end \ No newline at end of file diff --git a/test/runtests.jl b/test/runtests.jl index 6d1d4e4a..a3dd1ab1 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -40,7 +40,8 @@ function run_tests(testdir) "GMGLinearSolversMUMPSTests.jl", "RestrictDofsTests.jl"] np = 4 - extra_args = "-s 2 2 -r 2" + #extra_args = "-s 2 2 -r 2" + extra_args = "" elseif f in ["ModelHierarchiesTests.jl"] np = 6 extra_args = "" @@ -64,7 +65,6 @@ function run_tests(testdir) end # MPI tests -run_tests(@__DIR__) run_tests(joinpath(@__DIR__, "mpi")) # Sequential tests From 1316c04a080013def31f1366409b22369ca50997 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Feb 2023 10:55:03 +0100 Subject: [PATCH 64/95] Added ModelHierarchies without adaptivity --- src/MultilevelTools/ModelHierarchies.jl | 116 ++++++++++++++++++++---- 1 file changed, 99 insertions(+), 17 deletions(-) diff --git a/src/MultilevelTools/ModelHierarchies.jl b/src/MultilevelTools/ModelHierarchies.jl index 2d816211..9403e189 100644 --- a/src/MultilevelTools/ModelHierarchies.jl +++ b/src/MultilevelTools/ModelHierarchies.jl @@ -39,6 +39,10 @@ has_redistribution(a::ModelHierarchy,level::Integer) = has_redistribution(a.leve has_redistribution(a::ModelHierarchyLevel{A,B,C,D}) where {A,B,C,D} = true has_redistribution(a::ModelHierarchyLevel{A,B,C,Nothing}) where {A,B,C} = false +has_refinement(a::ModelHierarchy,level::Integer) = has_refinement(a.levels[level]) +has_refinement(a::ModelHierarchyLevel{A,B}) where {A,B} = true +has_refinement(a::ModelHierarchyLevel{A,Nothing}) where A = false + """ ModelHierarchy(parts,model,num_procs_x_level;num_refs_x_level) - `model`: Initial refinable distributed model. Will be set as coarsest level. @@ -48,7 +52,8 @@ has_redistribution(a::ModelHierarchyLevel{A,B,C,Nothing}) where {A,B,C} = false """ function ModelHierarchy(root_parts ::AbstractPData, model ::GridapDistributed.AbstractDistributedDiscreteModel, - num_procs_x_level ::Vector{<:Integer}; + num_procs_x_level ::Vector{<:Integer}; + mesh_refinement = true, kwargs...) # Request correct number of parts from MAIN @@ -59,18 +64,78 @@ function ModelHierarchy(root_parts ::AbstractPData, main_num_parts = get_main_part(my_num_parts) if main_num_parts == num_procs_x_level[end] # Coarsest model - return create_model_hierarchy_by_refinement(root_parts,model,num_procs_x_level;kwargs...) + if mesh_refinement + return _model_hierarchy_by_refinement(root_parts,model,num_procs_x_level;kwargs...) + else + return _model_hierarchy_without_refinement_bottom_up(root_parts,model,num_procs_x_level;kwargs...) + end end if main_num_parts == num_procs_x_level[1] # Finest model - return create_model_hierarchy_by_coarsening(root_parts,model,num_procs_x_level;kwargs...) + if mesh_refinement + return _model_hierarchy_by_coarsening(root_parts,model,num_procs_x_level;kwargs...) + else + return _model_hierarchy_without_refinement_top_down(root_parts,model,num_procs_x_level;kwargs...) + end end @error "Model parts do not correspond to coarsest or finest parts!" end -function create_model_hierarchy_by_refinement(root_parts::AbstractPData, - coarsest_model::GridapDistributed.AbstractDistributedDiscreteModel, - num_procs_x_level ::Vector{<:Integer}; - num_refs_x_level=nothing) +function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractPData, + bottom_model::GridapDistributed.AbstractDistributedDiscreteModel, + num_procs_x_level::Vector{<:Integer}) + num_levels = length(num_procs_x_level) + level_parts = Vector{typeof(root_parts)}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + + level_parts[num_levels] = get_parts(bottom_model) + meshes[num_levels] = ModelHierarchyLevel(num_levels,bottom_model,nothing,nothing,nothing) + + for i = num_levels-1:-1:1 + model = get_model(meshes[i+1]) + if (num_procs_x_level[i] != num_procs_x_level[i+1]) + level_parts[i] = generate_subparts(root_parts,num_procs_x_level[i]) + model_red,red_glue = GridapDistributed.redistribute(model,level_parts[i]) + else + level_parts[i] = level_parts[i+1] + model_red,red_glue = nothing,nothing + end + meshes[i] = ModelHierarchyLevel(i,model,nothing,model_red,red_glue) + end + + mh = ModelHierarchy(level_parts,meshes) + return convert_to_void_models(mh) +end + +function _model_hierarchy_without_refinement_top_down(root_parts::AbstractPData, + top_model::GridapDistributed.AbstractDistributedDiscreteModel, + num_procs_x_level::Vector{<:Integer}) + num_levels = length(num_procs_x_level) + level_parts = Vector{typeof(root_parts)}(undef,num_levels) + meshes = Vector{ModelHierarchyLevel}(undef,num_levels) + + level_parts[1] = get_parts(top_model) + model = top_model + for i = 1:num_levels-1 + if (num_procs_x_level[i] != num_procs_x_level[i+1]) + level_parts[i+1] = generate_subparts(root_parts,num_procs_x_level[i+1]) + model_red = model + model,red_glue = GridapDistributed.redistribute(model_red,level_parts[i+1]) + else + level_parts[i+1] = level_parts[i] + model_red,red_glue = nothing, nothing + end + meshes[i] = ModelHierarchyLevel(i,model,nothing,model_red,red_glue) + end + meshes[num_levels] = ModelHierarchyLevel(num_levels,model,nothing,nothing,nothing) + + mh = ModelHierarchy(level_parts,meshes) + return convert_to_void_models(mh) +end + +function _model_hierarchy_by_refinement(root_parts::AbstractPData, + coarsest_model::GridapDistributed.AbstractDistributedDiscreteModel, + num_procs_x_level::Vector{<:Integer}; + num_refs_x_level=nothing) # TODO: Implement support for num_refs_x_level? (future work) num_levels = length(num_procs_x_level) level_parts = Vector{typeof(root_parts)}(undef,num_levels) @@ -96,13 +161,13 @@ function create_model_hierarchy_by_refinement(root_parts::AbstractPData, end mh = ModelHierarchy(level_parts,meshes) - return convert_to_refined_models(mh) + return convert_to_adapted_models(mh) end -function create_model_hierarchy_by_coarsening(root_parts::AbstractPData, - finest_model::GridapDistributed.AbstractDistributedDiscreteModel, - num_procs_x_level ::Vector{<:Integer}; - num_refs_x_level=nothing) +function _model_hierarchy_by_coarsening(root_parts::AbstractPData, + finest_model::GridapDistributed.AbstractDistributedDiscreteModel, + num_procs_x_level::Vector{<:Integer}; + num_refs_x_level=nothing) # TODO: Implement support for num_refs_x_level? (future work) num_levels = length(num_procs_x_level) level_parts = Vector{typeof(root_parts)}(undef,num_levels) @@ -129,24 +194,41 @@ function create_model_hierarchy_by_coarsening(root_parts::AbstractPData, meshes[num_levels] = ModelHierarchyLevel(num_levels,model,nothing,nothing,nothing) mh = ModelHierarchy(level_parts,meshes) - return convert_to_refined_models(mh) + return convert_to_adapted_models(mh) end -function convert_to_refined_models(mh::ModelHierarchy) +function convert_to_adapted_models(mh::ModelHierarchy) nlevs = num_levels(mh) levels = Vector{ModelHierarchyLevel}(undef,nlevs) for lev in 1:nlevs-1 - parts = get_level_parts(mh,lev+1) - if i_am_in(parts) + cparts = get_level_parts(mh,lev+1) + if i_am_in(cparts) model = get_model_before_redist(mh,lev) parent = get_model(mh,lev+1) ref_glue = mh.levels[lev].ref_glue - model_ref = DistributedAdaptedDiscreteModel(model,parent,ref_glue) + model_ref = GridapDistributed.DistributedAdaptedDiscreteModel(model,parent,ref_glue) else model = get_model_before_redist(mh,lev) model_ref = VoidDistributedDiscreteModel(model) end + levels[lev] = ModelHierarchyLevel(lev,model_ref,mh.levels[lev].ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) + end + levels[nlevs] = mh.levels[nlevs] + + return ModelHierarchy(mh.level_parts,levels) +end +function convert_to_void_models(mh::ModelHierarchy) + nlevs = num_levels(mh) + levels = Vector{ModelHierarchyLevel}(undef,nlevs) + for lev in 1:nlevs-1 + cparts = get_level_parts(mh,lev+1) + if i_am_in(cparts) + model_ref = get_model_before_redist(mh,lev) + else + model = get_model_before_redist(mh,lev) + model_ref = VoidDistributedDiscreteModel(model) + end levels[lev] = ModelHierarchyLevel(lev,model_ref,mh.levels[lev].ref_glue,mh.levels[lev].model_red,mh.levels[lev].red_glue) end levels[nlevs] = mh.levels[nlevs] From 252caaf78b476844b7c3f1f4abbe6547c9f07b35 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Feb 2023 10:56:24 +0100 Subject: [PATCH 65/95] FESpaceHierarchies and TransferOps now accept different degrees for each level --- .../DistributedGridTransferOperators.jl | 9 +++- src/MultilevelTools/FESpaceHierarchies.jl | 41 +++++++++++++++---- 2 files changed, 40 insertions(+), 10 deletions(-) diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 1c5d1518..6a17590e 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -136,13 +136,20 @@ function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol,op return cache_redist end -function setup_transfer_operators(sh::FESpaceHierarchy,qdegree::Int;kwargs...) +function setup_transfer_operators(sh::FESpaceHierarchy,qdegree::Integer;kwargs...) + qdegrees = Fill(qdegree,num_levels(sh)) + return setup_transfer_operators(sh,qdegrees;kwargs...) +end + +function setup_transfer_operators(sh::FESpaceHierarchy,qdegrees::AbstractArray{<:Integer};kwargs...) + @check length(qdegrees) == num_levels(sh) mh = sh.mh restrictions = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) prolongations = Vector{DistributedGridTransferOperator}(undef,num_levels(sh)-1) for lev in 1:num_levels(sh)-1 parts = get_level_parts(mh,lev) if i_am_in(parts) + qdegree = qdegrees[lev] restrictions[lev] = RestrictionOperator(lev,sh,qdegree;kwargs...) prolongations[lev] = ProlongationOperator(lev,sh,qdegree;kwargs...) end diff --git a/src/MultilevelTools/FESpaceHierarchies.jl b/src/MultilevelTools/FESpaceHierarchies.jl index f6e750eb..ef41ee1e 100644 --- a/src/MultilevelTools/FESpaceHierarchies.jl +++ b/src/MultilevelTools/FESpaceHierarchies.jl @@ -66,13 +66,30 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchyLevel{A,B},u) where {A, FESpaceHierarchyLevel(a.level,Uh,Uh_red) end -function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) where {A,B} +function Gridap.FESpaces.TestFESpace(mh::ModelHierarchy,args...;kwargs...) test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh)) for i = 1:num_levels(mh) parts = get_level_parts(mh,i) if i_am_in(parts) - Vh = TestFESpace(get_level(mh,i),args...;kwargs...) - test_spaces[i] = Vh + Vh = TestFESpace(get_level(mh,i),args...;kwargs...) + test_spaces[i] = Vh + end + end + FESpaceHierarchy(mh,test_spaces) +end + +function Gridap.FESpaces.TestFESpace( + mh::ModelHierarchy, + arg_vector::AbstractVector{<:Union{ReferenceFE,Tuple{<:Gridap.ReferenceFEs.ReferenceFEName,Any,Any}}}; + kwargs...) + @check length(arg_vector) == num_levels(mh) + test_spaces = Vector{FESpaceHierarchyLevel}(undef,num_levels(mh)) + for i = 1:num_levels(mh) + parts = get_level_parts(mh,i) + if i_am_in(parts) + args = arg_vector[i] + Vh = TestFESpace(get_level(mh,i),args;kwargs...) + test_spaces[i] = Vh end end FESpaceHierarchy(mh,test_spaces) @@ -83,8 +100,8 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy,u) for i = 1:num_levels(a.mh) parts = get_level_parts(a.mh,i) if i_am_in(parts) - Uh = TrialFESpace(a[i],u) - trial_spaces[i] = Uh + Uh = TrialFESpace(a[i],u) + trial_spaces[i] = Uh end end FESpaceHierarchy(a.mh,trial_spaces) @@ -95,17 +112,23 @@ function Gridap.FESpaces.TrialFESpace(a::FESpaceHierarchy) for i = 1:num_levels(a.mh) parts = get_level_parts(a.mh,i) if i_am_in(parts) - Uh = TrialFESpace(a[i]) - trial_spaces[i] = Uh + Uh = TrialFESpace(a[i]) + trial_spaces[i] = Uh end end FESpaceHierarchy(a.mh,trial_spaces) end -function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Function,qdegree::Int) +function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Function,qdegree::Integer) + return compute_hierarchy_matrices(trials,a,l,Fill(qdegree,num_levels(trials))) +end + +function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Function,qdegree::AbstractArray{<:Integer}) nlevs = num_levels(trials) mh = trials.mh + @check length(qdegree) == nlevs + A = nothing b = nothing mats = Vector{PSparseMatrix}(undef,nlevs) @@ -116,7 +139,7 @@ function compute_hierarchy_matrices(trials::FESpaceHierarchy,a::Function,l::Func U = get_fe_space(trials,lev) V = get_test_space(U) Ω = Triangulation(model) - dΩ = Measure(Ω,qdegree) + dΩ = Measure(Ω,qdegree[lev]) ai(u,v) = a(u,v,dΩ) if lev == 1 li(v) = l(v,dΩ) From 1717f3f9802a7c212e176c9d4e0220c0091993db Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Feb 2023 10:57:04 +0100 Subject: [PATCH 66/95] Removed PMGLinearSolvers --- src/LinearSolvers/PMGLinearSolvers.jl | 181 -------------------------- 1 file changed, 181 deletions(-) delete mode 100644 src/LinearSolvers/PMGLinearSolvers.jl diff --git a/src/LinearSolvers/PMGLinearSolvers.jl b/src/LinearSolvers/PMGLinearSolvers.jl deleted file mode 100644 index db7c669c..00000000 --- a/src/LinearSolvers/PMGLinearSolvers.jl +++ /dev/null @@ -1,181 +0,0 @@ -""" - struct PMG{S1,S2,CS} <: Gridap.Algebra.LinearSolver - -Implementation of a P-MultiGrid solver. - -Through the constructor kwargs, one can specify the smoothers and -solver used for the pre, post smoothing steps and the coarsest level solve. -""" -struct PMG{S1,S2,CS} <: Gridap.Algebra.LinearSolver - sh ::FESpaceHierarchy - pre_smoother ::S1 - post_smoother ::S2 - coarse_solver ::CS - rtol ::Float64 - maxiter ::Int - verbose ::Bool - mode ::Symbol -end - -function PMG( - sh::FESpaceHierarchy; - pre_smoother=JacobiSmoother(5), - post_smoother=pre_smoother, - coarse_solver=BackslashSolver(), - rtol=1.0e-6, - maxiter=1000, - verbose=false, - mode=:preconditioner) - Gridap.Helpers.@check mode==:preconditioner || mode==:solver - return PMG(sh,pre_smoother,post_smoother,coarse_solver,rtol,maxiter,verbose,mode) -end - -struct PMGSymbolicSetup{S1,S2,CS} <: Gridap.Algebra.SymbolicSetup - pmg :: PMG - ss_pre_smoothers :: Vector{S1} - ss_post_smoothers :: Vector{S2} - ss_coarse_solver :: CS -end - -function Gridap.Algebra.symbolic_setup(pmg::PMG, sysmats) - nlev = get_num_levels(pmg.sh) - - ss_pre_smoothers = map(mat -> symbolic_setup(pmg.pre_smoother,mat),sysmats[1:nlev-1]) - if pmg.post_smoother === pmg.pre_smoother - ss_post_smoothers = ss_pre_smoothers - else - ss_post_smoothers = map(mat -> symbolic_setup(pmg.post_smoother,mat),sysmats[1:nlev-1]) - end - ss_coarse_solver = symbolic_setup(pmg.coarse_solver,sysmats[nlev]) - - return PMGSymbolicSetup(pmg,ss_pre_smoothers,ss_post_smoothers,ss_coarse_solver) -end - -mutable struct PMGNumericalSetup{M,C,T,S1,S2,CS} <: Gridap.Algebra.NumericalSetup - pmg :: PMG - sysmats :: Vector{M} - caches :: Vector{C} - transfers :: Vector{T} - ns_pre_smoothers :: Vector{S1} - ns_post_smoothers :: Vector{S2} - ns_coarse_solver :: CS -end - -function get_pmg_caches(lev::Int, sysmats, sh::FESpaceHierarchy) - nlev = length(sysmats) - Adxh = fill(0.0,size(sysmats[lev],1)) - dxh = fill(0.0,size(sysmats[lev],2)) - if (lev != nlev) # Not the coarsest level - dxH = fill(0.0,size(sysmats[lev+1],2)) - rH = fill(0.0,size(sysmats[lev+1],2)) - else - dxH, rH = nothing, nothing - end - return Adxh, dxh, dxH, rH -end - -function get_pmg_caches(lev::Int, sysmats::Vector{T}, sh::FESpaceHierarchy) where T <: PSparseMatrix - nlev = length(sysmats) - Adxh = PVector(0.0,sysmats[lev].rows) - dxh = PVector(0.0,sysmats[lev].cols) - if (lev != nlev) # Not the coarsest level - dxH = PVector(0.0,sysmats[lev+1].cols) - rH = PVector(0.0,sysmats[lev+1].rows) - else - dxH, rH = nothing, nothing - end - return Adxh, dxh, dxH, rH -end - -function Gridap.Algebra.numerical_setup(ss::PMGSymbolicSetup, sysmats) - pmg = ss.pmg - nlev = get_num_levels(pmg.sh) - - # Caches - caches = map(k -> get_pmg_caches(k,sysmats,pmg.sh), collect(1:nlev)) - - # Transfer Operators - transfers = get_transfer_operators(pmg.sh) - - # Smoother/Solvers setups - ns_pre_smoothers = map((ss,mat) -> numerical_setup(ss,mat),ss.ss_pre_smoothers,sysmats[1:nlev-1]) - if pmg.post_smoother === pmg.pre_smoother - ns_post_smoothers = ns_pre_smoothers - else - ns_post_smoothers = map((ss,mat) -> numerical_setup(ss,mat),ss.ss_post_smoothers,sysmats[1:nlev-1]) - end - ns_coarse_solver = numerical_setup(ss.ss_coarse_solver,sysmats[nlev]) - - return PMGNumericalSetup(pmg,sysmats,caches,transfers,ns_pre_smoothers,ns_post_smoothers,ns_coarse_solver) -end - -function solve!(x::AbstractVector,ns::PMGNumericalSetup,b::AbstractVector) - maxiter = ns.pmg.maxiter - rtol = ns.pmg.rtol - verbose = ns.pmg.verbose - mode = ns.pmg.mode - - if mode == :preconditioner - fill!(x,0.0) - r = copy(b) - else - A = ns.sysmats[1] - r = similar(b); mul!(r,A,x); r .= b .- r - end - - iter = 0 - err = 1.0 - nrm_r0 = norm(r) - verbose && println("> PMG: Starting convergence loop.") - while err > rtol && iter < maxiter - solve!(1,x,ns,r) - - nrm_r = norm(r) - err = nrm_r/nrm_r0 - verbose && println(" > Iteration ", iter, ": (eAbs, eRel) = (", nrm_r, " , ", err, ")") - iter = iter + 1 - end - - converged = (err < rtol) - return iter, converged -end - -function solve!(lev::Int,xh::AbstractVector,ns::PMGNumericalSetup,rh::AbstractVector) - nlev = get_num_levels(ns.pmg.sh) - - ### Coarsest level - if (lev == nlev) - solve!(xh,ns.ns_coarse_solver,rh) - return - end - - ### Fine levels - Ah = ns.sysmats[lev] - Adxh, dxh, dxH, rH = ns.caches[lev] - R, Rt = ns.transfers[lev] - - # Pre-smooth current solution - solve!(xh, ns.ns_pre_smoothers[lev], rh) - - # Restrict the residual - mul!(rH, R, rh) - - # Apply next level - fill!(dxH,0.0) - solve!(lev+1,dxH,ns,rH) - - # Interpolate dxH in finer space - mul!(dxh, Rt, dxH) - - # Update solution & residual - xh .= xh .+ dxh - mul!(Adxh, Ah, dxh) - rh .= rh .- Adxh - - # Post-smooth current solution - solve!(xh, ns.ns_post_smoothers[lev], rh) -end - -function LinearAlgebra.ldiv!(x::AbstractVector,ns::PMGNumericalSetup,b::AbstractVector) - solve!(x,ns,b) -end From 67c97cb84f0eb2a714f1f6249143ece2168ab3cf Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Feb 2023 10:58:20 +0100 Subject: [PATCH 67/95] Added tests for P-refinement GMG --- .../PRefinementGMGLinearSolversPoissonTest.jl | 101 ++++++++++++++++++ 1 file changed, 101 insertions(+) create mode 100644 test/mpi/PRefinementGMGLinearSolversPoissonTest.jl diff --git a/test/mpi/PRefinementGMGLinearSolversPoissonTest.jl b/test/mpi/PRefinementGMGLinearSolversPoissonTest.jl new file mode 100644 index 00000000..9a8bd92c --- /dev/null +++ b/test/mpi/PRefinementGMGLinearSolversPoissonTest.jl @@ -0,0 +1,101 @@ +module PRefinementGMGLinearSolverPoissonTests +using MPI +using Test +using LinearAlgebra +using IterativeSolvers +using FillArrays + +using Gridap +using Gridap.Helpers +using Gridap.ReferenceFEs +using PartitionedArrays +using GridapDistributed +using GridapP4est + +using GridapSolvers +using GridapSolvers.LinearSolvers +using GridapSolvers.MultilevelTools + +u(x) = x[1] + x[2] +f(x) = -Δ(u)(x) + +function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, max_order) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level;mesh_refinement=false) + + orders = collect(max_order:-1:1) + qdegrees = map(o->2*(o+1),orders) + reffes = map(o->ReferenceFE(lagrangian,Float64,o),orders) + tests = TestFESpace(mh,reffes;conformity=:H1,dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ + liform(v,dΩ) = ∫(v*f)dΩ + smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegrees) + + # Preconditioner + smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + restrictions, prolongations = setup_transfer_operators(trials,qdegrees; + mode=:residual, + restriction_method=:interpolation) + + gmg = GMGLinearSolver(mh, + smatrices, + prolongations, + restrictions, + pre_smoothers=smoothers, + post_smoothers=smoothers, + maxiter=1, + rtol=1.0e-10, + verbose=false, + mode=:preconditioner) + ss = symbolic_setup(gmg,A) + ns = numerical_setup(ss,A) + + # Solve + x = PVector(0.0,A.cols) + x, history = IterativeSolvers.cg!(x,A,b; + verbose=i_am_main(parts), + reltol=1.0e-12, + Pl=ns, + log=true) + + # Error norms and print solution + model = get_model(mh,1) + Uh = get_fe_space(trials,1) + Ω = Triangulation(model) + dΩ = Measure(Ω,qdegrees[1]) + uh = FEFunction(Uh,x) + e = u-uh + e_l2 = sum(∫(e⋅e)dΩ) + tol = 1.0e-9 + @test e_l2 < tol + if i_am_main(parts) + println("L2 error = ", e_l2) + end + end +end + +############################################## + +if !MPI.Initialized() + MPI.Init() +end + +# Parameters +max_order = 3 +coarse_grid_partition = (2,2) +num_refs_coarse = 2 + +num_parts_x_level = [4,4,1] +ranks = num_parts_x_level[1] +with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,max_order) + + +MPI.Finalize() +end From ddd0b690657cec00e11304f9cb56fe686dff1c6a Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Feb 2023 10:58:45 +0100 Subject: [PATCH 68/95] Minor changes --- src/LinearSolvers/GMGLinearSolvers.jl | 3 +- src/LinearSolvers/LinearSolvers.jl | 1 - .../GridapDistributedExtensions.jl | 36 ------------------- src/MultilevelTools/MultilevelTools.jl | 1 - src/MultilevelTools/RefinementTools.jl | 17 --------- .../DistributedGridTransferOperatorsTests.jl | 2 +- test/mpi/GMGLinearSolversHDivRTTests.jl | 2 +- test/mpi/GMGLinearSolversLaplacianTests.jl | 2 +- test/mpi/GMGLinearSolversMUMPSTests.jl | 2 +- test/mpi/GMGLinearSolversPoissonTests.jl | 2 +- .../GMGLinearSolversVectorLaplacianTests.jl | 2 +- test/mpi/RefinementToolsTests.jl | 2 +- test/mpi/RestrictDofsTests.jl | 2 +- test/mpi/RichardsonSmoothersTests.jl | 2 +- 14 files changed, 10 insertions(+), 66 deletions(-) diff --git a/src/LinearSolvers/GMGLinearSolvers.jl b/src/LinearSolvers/GMGLinearSolvers.jl index b0e7fb75..f9a20e0f 100644 --- a/src/LinearSolvers/GMGLinearSolvers.jl +++ b/src/LinearSolvers/GMGLinearSolvers.jl @@ -236,9 +236,8 @@ function apply_GMG_level!(lev::Integer,xh::Union{PVector,Nothing},rh::Union{PVec # Interpolate dxH in finer space mul!(dxh,interp,dxH) - # Update solution + # Update solution & residual xh .= xh .+ dxh - # Update residual mul!(Adxh, Ah, dxh) rh .= rh .- Adxh diff --git a/src/LinearSolvers/LinearSolvers.jl b/src/LinearSolvers/LinearSolvers.jl index f8ba2dc2..4fe84ae9 100644 --- a/src/LinearSolvers/LinearSolvers.jl +++ b/src/LinearSolvers/LinearSolvers.jl @@ -5,7 +5,6 @@ using LinearAlgebra using Gridap using Gridap.Algebra using PartitionedArrays -using GridapP4est using GridapPETSc using GridapSolvers.MultilevelTools diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index f3e30a40..bc24aa19 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -34,42 +34,6 @@ function change_parts(::Type{<:GridapDistributed.DistributedCellField},x,new_par return GridapDistributed.DistributedCellField(fields) end -""" -function change_parts(::Type{<:GridapDistributed.DistributedSingleFieldFEFunction},x,new_parts) - if isa(x,GridapDistributed.DistributedSingleFieldFEFunction) - fields = change_parts(local_views(x),new_parts) - metadata = GridapDistributed.DistributedFEFunctionData(change_parts(x.metadata.free_values,new_parts)) - else - fields = change_parts(nothing,new_parts;default=void(CellField)) - metadata = GridapDistributed.DistributedFEFunctionData(change_parts(nothing,new_parts;default=Float64[])) - end - return GridapDistributed.DistributedCellField(fields,metadata) -end -""" - -""" -function change_parts(::Type{<:PRange},x::Union{PRange,Nothing}, new_parts) - if isa(x,PRange) - ngids = x.ngids - partition = change_parts(x.partition,new_parts;default=void(IndexSet)) - exchanger = x.exchanger - gid_to_part = x.gid_to_part - ghost = x.ghost - else - ngids = 0 - partition = change_parts(nothing,new_parts;default=void(IndexSet)) - exchanger = empty_exchanger(new_parts) - gid_to_part = nothing - ghost = false - end - return PRange(ngids,partition,exchanger,gid_to_part,ghost) -end - -function void(::Type{IndexSet}) - return IndexSet(0,Int[],Int32[],Int32[],Int32[],Int32[],Dict{Int,Int32}()) -end -""" - # DistributedFESpaces function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) diff --git a/src/MultilevelTools/MultilevelTools.jl b/src/MultilevelTools/MultilevelTools.jl index 751605ed..447ae753 100644 --- a/src/MultilevelTools/MultilevelTools.jl +++ b/src/MultilevelTools/MultilevelTools.jl @@ -15,7 +15,6 @@ using GridapDistributed import LinearAlgebra: mul! import GridapDistributed: local_views -import GridapP4est: i_am_in, i_am_main export change_parts diff --git a/src/MultilevelTools/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl index 6b3051a0..b7dd250a 100644 --- a/src/MultilevelTools/RefinementTools.jl +++ b/src/MultilevelTools/RefinementTools.jl @@ -1,21 +1,4 @@ -# DistributedRefinedDiscreteModels - -const DistributedAdaptedDiscreteModel{Dc,Dp} = GridapDistributed.DistributedDiscreteModel{Dc,Dp,<:AbstractPData{<:AdaptedDiscreteModel{Dc,Dp}}} - -function DistributedAdaptedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel, - parent::GridapDistributed.AbstractDistributedDiscreteModel, - glue::AbstractPData{<:AdaptivityGlue}) - models = map_parts(local_views(model),local_views(parent),glue) do model, parent, glue - AdaptedDiscreteModel(model,parent,glue) - end - return GridapDistributed.DistributedDiscreteModel(models,get_cell_gids(model)) -end - -function Gridap.Adaptivity.get_adaptivity_glue(model::DistributedAdaptedDiscreteModel) - return map_parts(Gridap.Adaptivity.get_adaptivity_glue,local_views(model)) -end - # DistributedAdaptedTriangulations const DistributedAdaptedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:AdaptedTriangulation{Dc,Dp}}} diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index 4c935dc2..b6fdc12b 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -23,7 +23,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) u(x) = x[1] + x[2] reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") + tests = TestFESpace(mh,reffe;dirichlet_tags="boundary") trials = TrialFESpace(tests,u) qdegree = order*2+1 diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index fbc0c2f9..1a9bff6f 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -29,7 +29,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, qdegree = 2*(order+1) reffe = ReferenceFE(raviart_thomas,Float64,order) - tests = TestFESpace(mh,reffe,dirichlet_tags="boundary") + tests = TestFESpace(mh,reffe;dirichlet_tags="boundary") trials = TrialFESpace(tests,u) biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ diff --git a/test/mpi/GMGLinearSolversLaplacianTests.jl b/test/mpi/GMGLinearSolversLaplacianTests.jl index c6eed0a4..b39377b9 100644 --- a/test/mpi/GMGLinearSolversLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversLaplacianTests.jl @@ -29,7 +29,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, qdegree = 2*(order+1) reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + tests = TestFESpace(mh,reffe;conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,u) biform(u,v,dΩ) = ∫(v*u)dΩ + ∫(α*∇(v)⋅∇(u))dΩ diff --git a/test/mpi/GMGLinearSolversMUMPSTests.jl b/test/mpi/GMGLinearSolversMUMPSTests.jl index 0ba9a28b..2d2cce68 100644 --- a/test/mpi/GMGLinearSolversMUMPSTests.jl +++ b/test/mpi/GMGLinearSolversMUMPSTests.jl @@ -49,7 +49,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, qdegree = 2*(order+1) reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + tests = TestFESpace(mh,reffe;conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,u) biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ diff --git a/test/mpi/GMGLinearSolversPoissonTests.jl b/test/mpi/GMGLinearSolversPoissonTests.jl index 17fbda4e..72dfc714 100644 --- a/test/mpi/GMGLinearSolversPoissonTests.jl +++ b/test/mpi/GMGLinearSolversPoissonTests.jl @@ -29,7 +29,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, qdegree = 2*(order+1) reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + tests = TestFESpace(mh,reffe;conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,u) biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ diff --git a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl index 8517dc6c..86bd509d 100644 --- a/test/mpi/GMGLinearSolversVectorLaplacianTests.jl +++ b/test/mpi/GMGLinearSolversVectorLaplacianTests.jl @@ -29,7 +29,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, qdegree = 2*(order+1) reffe = ReferenceFE(lagrangian,VectorValue{2,Float64},order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + tests = TestFESpace(mh,reffe;conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,u) biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*∇(v)⊙∇(u))dΩ diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 639990e0..235d9e6e 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -23,7 +23,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) order = 1 sol(x) = x[1] + x[2] reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + tests = TestFESpace(mh,reffe;conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,sol) quad_order = 3*order+1 diff --git a/test/mpi/RestrictDofsTests.jl b/test/mpi/RestrictDofsTests.jl index 48ff5721..47612a06 100644 --- a/test/mpi/RestrictDofsTests.jl +++ b/test/mpi/RestrictDofsTests.jl @@ -29,7 +29,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, qdegree = 2*(order+1) reffe = ReferenceFE(lagrangian,Float64,order) - tests = TestFESpace(mh,reffe,conformity=:H1,dirichlet_tags="boundary") + tests = TestFESpace(mh,reffe;conformity=:H1,dirichlet_tags="boundary") trials = TrialFESpace(tests,u) biform(u,v,dΩ) = ∫(∇(v)⋅∇(u))dΩ diff --git a/test/mpi/RichardsonSmoothersTests.jl b/test/mpi/RichardsonSmoothersTests.jl index 20fe79c8..ca0d2835 100644 --- a/test/mpi/RichardsonSmoothersTests.jl +++ b/test/mpi/RichardsonSmoothersTests.jl @@ -22,7 +22,7 @@ function main(parts,partition) order = 1 qorder = order*2 + 1 reffe = ReferenceFE(lagrangian,Float64,order) - Vh = TestFESpace(model,reffe,conformity=:H1,dirichlet_tags="boundary") + Vh = TestFESpace(model,reffe;conformity=:H1,dirichlet_tags="boundary") Uh = TrialFESpace(Vh,sol) u = interpolate(sol,Uh) From b8b7fee128e4f8e9f9537a85c8547390aa16f3b2 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Feb 2023 11:14:53 +0100 Subject: [PATCH 69/95] Limited PartitionedArrays compatibility to v0.2.15 --- Manifest.toml | 42 +++++++++++++++++++++--------------------- Project.toml | 1 + 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index db3fd731..fbfd9512 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.8.5" manifest_format = "2.0" -project_hash = "dbb1333b0ace488af82ab2035259c592777d3449" +project_hash = "fd265c84f39675a275e4824c8107d86c61971692" [[deps.AbstractFFTs]] deps = ["ChainRulesCore", "LinearAlgebra"] @@ -31,10 +31,10 @@ uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" version = "1.1.1" [[deps.ArrayInterfaceCore]] -deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "14c3f84a763848906ac681f94cf469a851601d92" +deps = ["LinearAlgebra", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "e5f08b5689b1aad068e01751889f2f615c7db36d" uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.28" +version = "0.1.29" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] @@ -77,15 +77,15 @@ version = "1.15.7" [[deps.ChangesOfVariables]] deps = ["ChainRulesCore", "LinearAlgebra", "Test"] -git-tree-sha1 = "38f7a08f19d8810338d4f5085211c7dfa5d5bdd8" +git-tree-sha1 = "844b061c104c408b24537482469400af6075aae4" uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" -version = "0.1.4" +version = "0.1.5" [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] -git-tree-sha1 = "ded953804d019afa9a3f98981d99b33e3db7b6da" +git-tree-sha1 = "9c209fb7536406834aa938fb149964b985de6c83" uuid = "944b1d66-785c-5afd-91f1-9de20f533193" -version = "0.7.0" +version = "0.7.1" [[deps.Combinatorics]] git-tree-sha1 = "08c8b6831dc00bfea825826be0bc8336fc369860" @@ -100,9 +100,9 @@ version = "0.3.0" [[deps.Compat]] deps = ["Dates", "LinearAlgebra", "UUIDs"] -git-tree-sha1 = "00a2cccc7f098ff3b66806862d275ca3db9e6e5a" +git-tree-sha1 = "61fdd77467a5c3ad071ef8277ac6bd6af7dd4c04" uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.5.0" +version = "4.6.0" [[deps.CompilerSupportLibraries_jll]] deps = ["Artifacts", "Libdl"] @@ -225,7 +225,7 @@ version = "0.2.6" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "Libdl", "MPI", "P4est_wrapper", "PartitionedArrays", "Test"] -git-tree-sha1 = "d94678b1a1aa3a18608565b73d271a8a81ec088b" +git-tree-sha1 = "6bae326892fa48e44633cf948f8b7cba8d4a92b0" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -266,9 +266,9 @@ version = "0.9.2" [[deps.JLD2]] deps = ["FileIO", "MacroTools", "Mmap", "OrderedCollections", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"] -git-tree-sha1 = "ec8a9c9f0ecb1c687e34c1fda2699de4d054672a" +git-tree-sha1 = "c3244ef42b7d4508c638339df1bdbf4353e144db" uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -version = "0.4.29" +version = "0.4.30" [[deps.JLLWrappers]] deps = ["Preferences"] @@ -338,9 +338,9 @@ uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[deps.LogExpFunctions]] deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "946607f84feb96220f480e0422d3484c49c00239" +git-tree-sha1 = "45b288af6956e67e621c5cbb2d75a261ab58300b" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.19" +version = "0.3.20" [[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" @@ -508,15 +508,15 @@ version = "0.12.3" [[deps.Parsers]] deps = ["Dates", "SnoopPrecompile"] -git-tree-sha1 = "8175fc2b118a3755113c8e68084dc1a9e63c61ee" +git-tree-sha1 = "151d91d63d8d6c1a5789ecb7de51547e00480f1b" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.5.3" +version = "2.5.4" [[deps.PartitionedArrays]] deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] -git-tree-sha1 = "7efbdad40c1f4a341d10db6c039495babc602a39" +git-tree-sha1 = "8a8a72723ffb62a395b0475b78b4695fb7090441" uuid = "5a9dfac6-5c52-46f7-8278-5e2210713be9" -version = "0.2.14" +version = "0.2.15" [[deps.Pkg]] deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] @@ -610,9 +610,9 @@ uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" [[deps.SparseMatricesCSR]] deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "4870b3e7db7063927b163fb981bd579410b68b2d" +git-tree-sha1 = "38677ca58e80b5cad2382e5a1848f93b054ad28d" uuid = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1" -version = "0.6.6" +version = "0.6.7" [[deps.SpecialFunctions]] deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] diff --git a/Project.toml b/Project.toml index 4fd2707f..50de03a2 100644 --- a/Project.toml +++ b/Project.toml @@ -17,6 +17,7 @@ PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" [compat] +PartitionedArrays = "0.2.15" julia = "1.7" [extras] From 01a8a8a1e65be1f91cf6da13878f7fb66a1f0276 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Feb 2023 12:27:50 +0100 Subject: [PATCH 70/95] Minor fix --- ...issonTest.jl => PRefinementGMGLinearSolversPoissonTests.jl} | 0 test/runtests.jl | 3 ++- 2 files changed, 2 insertions(+), 1 deletion(-) rename test/mpi/{PRefinementGMGLinearSolversPoissonTest.jl => PRefinementGMGLinearSolversPoissonTests.jl} (100%) diff --git a/test/mpi/PRefinementGMGLinearSolversPoissonTest.jl b/test/mpi/PRefinementGMGLinearSolversPoissonTests.jl similarity index 100% rename from test/mpi/PRefinementGMGLinearSolversPoissonTest.jl rename to test/mpi/PRefinementGMGLinearSolversPoissonTests.jl diff --git a/test/runtests.jl b/test/runtests.jl index a3dd1ab1..0f62f474 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -38,7 +38,8 @@ function run_tests(testdir) "GMGLinearSolversHDivRTTests.jl", "MUMPSSolversTests.jl", "GMGLinearSolversMUMPSTests.jl", - "RestrictDofsTests.jl"] + "RestrictDofsTests.jl", + "PRefinementGMGLinearSolversPoissonTests.jl"] np = 4 #extra_args = "-s 2 2 -r 2" extra_args = "" From 09db08df1b0694d31994c82a3051300655a24023 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 14 Feb 2023 11:27:07 +1100 Subject: [PATCH 71/95] Minor changes --- src/PatchBasedSmoothers/mpi/PatchDecompositions.jl | 14 +++++++------- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 2 +- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 14 +++++++------- 3 files changed, 15 insertions(+), 15 deletions(-) diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl index 609b0028..c208ec4e 100644 --- a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -7,27 +7,27 @@ end function PatchDecomposition(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}; Dr=0, patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} - patch_decompositions=map_parts(model.models) do lmodel + patch_decompositions = map_parts(model.models) do lmodel PatchDecomposition(lmodel; Dr=Dr, patch_boundary_style=patch_boundary_style) end - A=typeof(patch_decompositions) - B=typeof(model) + A = typeof(patch_decompositions) + B = typeof(model) DistributedPatchDecomposition{Dc,Dp,A,B}(patch_decompositions,model) end function Gridap.Geometry.Triangulation(a::DistributedPatchDecomposition) - trians=map_parts(a.patch_decompositions) do a + trians = map_parts(a.patch_decompositions) do a Triangulation(a) end GridapDistributed.DistributedTriangulation(trians,a.model) end function get_patch_root_dim(a::DistributedPatchDecomposition) - patch_root_dim=0 + patch_root_dim = 0 map_parts(a.patch_decompositions) do patch_decomposition - patch_root_dim=patch_decomposition.Dr + patch_root_dim = patch_decomposition.Dr end - patch_root_dim + return patch_root_dim end diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 76924b8b..4778e82c 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -12,7 +12,7 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, conformity::Gridap.FESpaces.Conformity, patch_decomposition::DistributedPatchDecomposition, Vh::GridapDistributed.DistributedSingleFieldFESpace) - root_gids=get_face_gids(model,get_patch_root_dim(patch_decomposition)) + root_gids = get_face_gids(model,get_patch_root_dim(patch_decomposition)) function f(model,patch_decomposition,Vh,partition) patches_mask = fill(false,length(partition.lid_to_gid)) diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index efaa1802..22235ee5 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -71,13 +71,13 @@ function PatchFESpace(model::DiscreteModel, return PatchFESpace(num_dofs,patch_cell_dofs_ids,Vh,patch_decomposition) end -Gridap.FESpaces.get_dof_value_type(a::PatchFESpace)=Gridap.FESpaces.get_dof_value_type(a.Vh) -Gridap.FESpaces.get_free_dof_ids(a::PatchFESpace)=Base.OneTo(a.num_dofs) -Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace)=a.patch_cell_dofs_ids -Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::Triangulation)=a.patch_cell_dofs_ids -Gridap.FESpaces.get_fe_basis(a::PatchFESpace)=get_fe_basis(a.Vh) -Gridap.FESpaces.ConstraintStyle(a::PatchFESpace)=Gridap.FESpaces.UnConstrained() -Gridap.FESpaces.get_vector_type(a::PatchFESpace)=get_vector_type(a.Vh) +Gridap.FESpaces.get_dof_value_type(a::PatchFESpace) = Gridap.FESpaces.get_dof_value_type(a.Vh) +Gridap.FESpaces.get_free_dof_ids(a::PatchFESpace) = Base.OneTo(a.num_dofs) +Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace) = a.patch_cell_dofs_ids +Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::Triangulation) = a.patch_cell_dofs_ids +Gridap.FESpaces.get_fe_basis(a::PatchFESpace) = get_fe_basis(a.Vh) +Gridap.FESpaces.ConstraintStyle(::PatchFESpace) = Gridap.FESpaces.UnConstrained() +Gridap.FESpaces.get_vector_type(a::PatchFESpace) = get_vector_type(a.Vh) function Gridap.FESpaces.scatter_free_and_dirichlet_values(f::PatchFESpace,free_values,dirichlet_values) cell_vals = Gridap.Fields.PosNegReindex(free_values,dirichlet_values) From 90e4db8eca85dc0f77338e93eb15008c873ac3df Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 15 Feb 2023 14:58:36 +1100 Subject: [PATCH 72/95] Weights and contributions are now redistributed --- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 43 ++++++++++-------- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 41 ++++++++++------- test/seq/DistributedPatchFESpacesTests.jl | 48 ++++++++++++++++++++ 3 files changed, 98 insertions(+), 34 deletions(-) create mode 100644 test/seq/DistributedPatchFESpacesTests.jl diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 4778e82c..be1f53ab 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -1,6 +1,6 @@ # Rationale behind distributed PatchFESpace: # 1. Patches have an owner. Only owners compute subspace correction. -# If am not owner of a patch, all dofs in my patch become -1. +# If am not owner of a patch, all dofs in my patch become -1. [DONE] # 2. Subspace correction on an owned patch may affect DoFs which # are non-owned. These corrections should be sent to the owner # process. I.e., NO -> O (reversed) communication. [PENDING] @@ -48,10 +48,7 @@ end function prolongate!(x::PVector, Ph::GridapDistributed.DistributedSingleFieldFESpace, y::PVector) - parts=get_part_ids(x.owned_values) - Gridap.Helpers.@notimplementedif num_parts(parts)!=1 - - map_parts(x.owned_values,Ph.spaces,y.owned_values) do x,Ph,y + map_parts(x.values,Ph.spaces,y.values) do x,Ph,y prolongate!(x,Ph,y) end end @@ -59,22 +56,32 @@ end function inject!(x::PVector, Ph::GridapDistributed.DistributedSingleFieldFESpace, y::PVector, - w::PVector) - parts = get_part_ids(x.owned_values) - Gridap.Helpers.@notimplementedif num_parts(parts)!=1 - - map_parts(x.owned_values,Ph.spaces,y.owned_values,w.owned_values) do x,Ph,y,w - inject!(x,Ph,y,w) + w::PVector, + w_sums::PVector) + + map_parts(x.values,Ph.spaces,y.values,w.values,w_sums.values) do x,Ph,y,w,w_sums + inject!(x,Ph,y,w,w_sums) end + + # Exchange local contributions + assemble!(x) + exchange!(x) # TO CONSIDER: Is this necessary? Do we need ghosts for later? + return x end -function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace) - parts = get_part_ids(Ph.spaces) - Gridap.Helpers.@notimplementedif num_parts(parts) != 1 - +function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace,Vh) + # Local weights and partial sums w = PVector(0.0,Ph.gids) - map_parts(w.owned_values,Ph.spaces) do w, Ph - w .= compute_weight_operators(Ph) + w_sums = PVector(0.0,Vh.gids) + map_parts(w.values,w_sums.values,Ph.spaces) do w, w_sums, Ph + _w, _w_sums = compute_weight_operators(Ph) + w .= _w + w_sums .= _w_sums end - return w + + # partial sums -> global sums + assemble!(w_sums) # ghost -> owners + exchange!(w_sums) # repopulate ghosts with owner info + + return w, w_sums end diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index 22235ee5..852e4c22 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -78,6 +78,7 @@ Gridap.FESpaces.get_cell_dof_ids(a::PatchFESpace,::Triangulation) = a.patch_cell Gridap.FESpaces.get_fe_basis(a::PatchFESpace) = get_fe_basis(a.Vh) Gridap.FESpaces.ConstraintStyle(::PatchFESpace) = Gridap.FESpaces.UnConstrained() Gridap.FESpaces.get_vector_type(a::PatchFESpace) = get_vector_type(a.Vh) +Gridap.FESpaces.get_fe_dof_basis(a::PatchFESpace) = get_fe_dof_basis(a.Vh) function Gridap.FESpaces.scatter_free_and_dirichlet_values(f::PatchFESpace,free_values,dirichlet_values) cell_vals = Gridap.Fields.PosNegReindex(free_values,dirichlet_values) @@ -140,7 +141,7 @@ end # TO-THINK/STRESS: # 1. MultiFieldFESpace case? -# 2. FESpaces which are directly defined on physical space? We think this cased is covered by +# 2. FESpaces which are directly defined on physical space? We think this case is covered by # the fact that we are using a CellConformity instance to rely on ownership info. # free_dofs_offset : the ID from which we start to assign free DoF IDs upwards # Note: we do not actually need to generate a global numbering for Dirichlet DoFs. We can @@ -255,8 +256,8 @@ end # x \in SingleFESpace # y \in PatchFESpace function inject!(x,Ph::PatchFESpace,y) - w = compute_weight_operators(Ph) - inject!(x,Ph::PatchFESpace,y,w) + w, w_sums = compute_weight_operators(Ph) + inject!(x,Ph::PatchFESpace,y,w,w_sums) end function inject!(x,Ph::PatchFESpace,y,w) @@ -288,14 +289,14 @@ function inject!(x,Ph::PatchFESpace,y,w) end end -function compute_weight_operators(Ph::PatchFESpace) - cell_dof_ids = get_cell_dof_ids(Ph.Vh) - cache_cell_dof_ids = array_cache(cell_dof_ids) - cache_patch_cells = array_cache(Ph.patch_decomposition.patch_cells) - - w = zeros(num_free_dofs(Ph.Vh)) +function inject!(x,Ph::PatchFESpace,y,w,w_sums) touched = Dict{Int,Bool}() cell_mesh_overlapped = 1 + cache_patch_cells = array_cache(Ph.patch_decomposition.patch_cells) + cell_dof_ids = get_cell_dof_ids(Ph.Vh) + cache_cell_dof_ids = array_cache(cell_dof_ids) + + fill!(x,0.0) for patch = 1:length(Ph.patch_decomposition.patch_cells) current_patch_cells = getindex!(cache_patch_cells, Ph.patch_decomposition.patch_cells, @@ -306,17 +307,25 @@ function compute_weight_operators(Ph::PatchFESpace) e = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 current_patch_cell_dof_ids = view(Ph.patch_cell_dofs_ids.data,s:e) for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) - if pdof > 0 && !(dof in keys(touched)) + if pdof > 0 && !(dof ∈ keys(touched)) touched[dof] = true - w[dof] += 1.0 + x[dof] += y[pdof] * w[pdof] / w_sums[dof] end end - cell_mesh_overlapped+=1 + cell_mesh_overlapped += 1 end empty!(touched) end - w .= 1.0 ./ w - w_Ph = similar(w,num_free_dofs(Ph)) - prolongate!(w_Ph,Ph,w) - return w_Ph +end + +function compute_weight_operators(Ph::PatchFESpace) + w = Fill(1.0,num_free_dofs(Ph)) + w_sums = compute_partial_sums(Ph,w) + return w, w_sums +end + +function compute_partial_sums(Ph::PatchFESpace,x) + x_sums = zeros(num_free_dofs(Ph.Vh)) + inject!(x_sums,Ph,x,Fill(1.0,num_free_dofs(Ph))) + return x_sums end diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl new file mode 100644 index 00000000..ee20d3f9 --- /dev/null +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -0,0 +1,48 @@ +module DistributedPatchFESpacesTests + +using Test +using Gridap +using GridapDistributed +using PartitionedArrays +using FillArrays + +include("../../src/PatchBasedSmoothers/PatchBasedSmoothers.jl") +import .PatchBasedSmoothers as PBS + +backend = SequentialBackend() +ranks = (1,2) +parts = get_part_ids(backend,ranks) + +domain = (0.0,1.0,0.0,1.0) +partition = (2,4) +model = CartesianDiscreteModel(parts,domain,partition) + +order = 1 +reffe = ReferenceFE(lagrangian,Float64,order) +Vh = TestFESpace(model,reffe) +PD = PBS.PatchDecomposition(model,patch_boundary_style=PBS.PatchBoundaryInclude()) +Ph = PBS.PatchFESpace(model,reffe,Gridap.ReferenceFEs.H1Conformity(),PD,Vh) + +w, w_sums = PBS.compute_weight_operators(Ph,Vh); + +xP = PVector(1.0,Ph.gids) +yP = PVector(0.0,Ph.gids) +x = PVector(1.0,Vh.gids) +y = PVector(0.0,Vh.gids) + +PBS.prolongate!(yP,Ph,x) +PBS.inject!(y,Ph,yP,w,w_sums) + + +assembler = SparseMatrixAssembler(Ph,Ph) +Ωₚ = Triangulation(PD) +dΩₚ = Measure(Ωₚ,2*order+1) +a(u,v) = ∫(∇(v)⋅∇(u))*dΩₚ +l(v) = ∫(1*v)*dΩₚ + +Ah = assemble_matrix(a,assembler,Ph,Ph) +fh = assemble_vector(l,assembler,Ph) + + + +end \ No newline at end of file From 13003bf79590160308ce9c18ef00b3e631fcfa2a Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 15 Feb 2023 17:49:43 +1100 Subject: [PATCH 73/95] Adapted the PatchBasedLinearSolvers --- .../mpi/PatchDecompositions.jl | 2 +- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 2 +- .../seq/PatchBasedLinearSolvers.jl | 34 +++++++++++-------- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 4 +-- test/seq/DistributedPatchFESpacesTests.jl | 26 +++++++++++--- 5 files changed, 44 insertions(+), 24 deletions(-) diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl index c208ec4e..5075c697 100644 --- a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -25,7 +25,7 @@ function Gridap.Geometry.Triangulation(a::DistributedPatchDecomposition) end function get_patch_root_dim(a::DistributedPatchDecomposition) - patch_root_dim = 0 + patch_root_dim = -1 map_parts(a.patch_decompositions) do patch_decomposition patch_root_dim = patch_decomposition.Dr end diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index be1f53ab..972a42f4 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -74,7 +74,7 @@ function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFE w = PVector(0.0,Ph.gids) w_sums = PVector(0.0,Vh.gids) map_parts(w.values,w_sums.values,Ph.spaces) do w, w_sums, Ph - _w, _w_sums = compute_weight_operators(Ph) + _w, _w_sums = compute_weight_operators(Ph,Ph.Vh) w .= _w w_sums .= _w_sums end diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index a9d15761..1289b1b3 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -12,9 +12,10 @@ # (not 100% sure, to investigate) -struct PatchBasedLinearSolver{A} <: Gridap.Algebra.LinearSolver +struct PatchBasedLinearSolver{A,B} <: Gridap.Algebra.LinearSolver bilinear_form :: Function Ph :: A + Vh :: B M :: Gridap.Algebra.LinearSolver end @@ -23,29 +24,30 @@ struct PatchBasedSymbolicSetup <: Gridap.Algebra.SymbolicSetup end function Gridap.Algebra.symbolic_setup(ls::PatchBasedLinearSolver,mat::AbstractMatrix) - PatchBasedSymbolicSetup(ls) + return PatchBasedSymbolicSetup(ls) end -struct PatchBasedSmootherNumericalSetup{A,B,C,D,E} <: Gridap.Algebra.NumericalSetup +struct PatchBasedSmootherNumericalSetup{A,B,C,D,E,F} <: Gridap.Algebra.NumericalSetup solver :: PatchBasedLinearSolver Ap :: A nsAp :: B rp :: C dxp :: D w :: E + w_sums :: F end function Gridap.Algebra.numerical_setup(ss::PatchBasedSymbolicSetup,A::AbstractMatrix) - Ph = ss.solver.Ph + Ph, Vh = ss.solver.Ph, ss.solver.Vh assembler = SparseMatrixAssembler(Ph,Ph) - Ap = assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph) - solver = ss.solver.M - ssAp = symbolic_setup(solver,Ap) - nsAp = numerical_setup(ssAp,Ap) - rp = _allocate_row_vector(Ap) - dxp = _allocate_col_vector(Ap) - w = compute_weight_operators(Ph) - PatchBasedSmootherNumericalSetup(ss.solver,Ap,nsAp,rp,dxp,w) + Ap = assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph) + solver = ss.solver.M + ssAp = symbolic_setup(solver,Ap) + nsAp = numerical_setup(ssAp,Ap) + rp = _allocate_row_vector(Ap) + dxp = _allocate_col_vector(Ap) + w, w_sums = compute_weight_operators(Ph,Vh) + return PatchBasedSmootherNumericalSetup(ss.solver,Ap,nsAp,rp,dxp,w,w_sums) end function _allocate_col_vector(A::AbstractMatrix) @@ -69,9 +71,11 @@ function Gridap.Algebra.numerical_setup!(ns::PatchBasedSmootherNumericalSetup, A end function Gridap.Algebra.solve!(x::AbstractVector,ns::PatchBasedSmootherNumericalSetup,r::AbstractVector) - Ap, nsAp, rp, dxp, w = ns.Ap, ns.nsAp, ns.rp, ns.dxp, ns.w + Ap, nsAp, rp, dxp, w, w_sums = ns.Ap, ns.nsAp, ns.rp, ns.dxp, ns.w, ns.w_sums + Ph = ns.solver.Ph - prolongate!(rp,ns.solver.Ph,r) + prolongate!(rp,Ph,r) solve!(dxp,nsAp,rp) - inject!(x,ns.solver.Ph,dxp,w) + inject!(x,Ph,dxp,w,w_sums) + return x end diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index 852e4c22..36fc2395 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -256,7 +256,7 @@ end # x \in SingleFESpace # y \in PatchFESpace function inject!(x,Ph::PatchFESpace,y) - w, w_sums = compute_weight_operators(Ph) + w, w_sums = compute_weight_operators(Ph,Ph.Vh) inject!(x,Ph::PatchFESpace,y,w,w_sums) end @@ -318,7 +318,7 @@ function inject!(x,Ph::PatchFESpace,y,w,w_sums) end end -function compute_weight_operators(Ph::PatchFESpace) +function compute_weight_operators(Ph::PatchFESpace,Vh) w = Fill(1.0,num_free_dofs(Ph)) w_sums = compute_partial_sums(Ph,w) return w, w_sums diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index ee20d3f9..5b1ee651 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -1,14 +1,22 @@ module DistributedPatchFESpacesTests +using LinearAlgebra using Test +using PartitionedArrays using Gridap +using Gridap.Helpers +using Gridap.Geometry using GridapDistributed -using PartitionedArrays using FillArrays include("../../src/PatchBasedSmoothers/PatchBasedSmoothers.jl") import .PatchBasedSmoothers as PBS +# This is needed for assembly +include("../../src/MultilevelTools/GridapFixes.jl") + +include("../../src/LinearSolvers/RichardsonSmoothers.jl") + backend = SequentialBackend() ranks = (1,2) parts = get_part_ids(backend,ranks) @@ -32,17 +40,25 @@ y = PVector(0.0,Vh.gids) PBS.prolongate!(yP,Ph,x) PBS.inject!(y,Ph,yP,w,w_sums) +@test x ≈ y +PBS.inject!(x,Ph,xP,w,w_sums) +PBS.prolongate!(yP,Ph,x) +@test xP ≈ yP -assembler = SparseMatrixAssembler(Ph,Ph) Ωₚ = Triangulation(PD) dΩₚ = Measure(Ωₚ,2*order+1) a(u,v) = ∫(∇(v)⋅∇(u))*dΩₚ l(v) = ∫(1*v)*dΩₚ -Ah = assemble_matrix(a,assembler,Ph,Ph) -fh = assemble_vector(l,assembler,Ph) - +assembler = SparseMatrixAssembler(Vh,Vh) +Ah = assemble_matrix(a,assembler,Vh,Vh) +fh = assemble_vector(l,assembler,Vh) +M = PBS.PatchBasedLinearSolver(a,Ph,Vh,LUSolver()) +s = RichardsonSmoother(M,10,1.0/3.0) +x = PBS._allocate_col_vector(Ah) +r = fh-Ah*x +solve!(x,s,Ah,r) end \ No newline at end of file From 2e4f9e6c72018f11709006a282a73a85144bda53 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 16 Feb 2023 17:04:43 +1100 Subject: [PATCH 74/95] Modifications to solver --- Project.toml | 11 ++-- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 2 + .../seq/PatchBasedLinearSolvers.jl | 66 +++++++++++++++---- test/seq/DistributedPatchFESpacesTests.jl | 40 ++++++++++- test/seq/PatchLinearSolverTests.jl | 8 +-- 5 files changed, 101 insertions(+), 26 deletions(-) diff --git a/Project.toml b/Project.toml index 50de03a2..62e5ddec 100644 --- a/Project.toml +++ b/Project.toml @@ -1,8 +1,12 @@ +authors = ["Santiago Badia ", "Jordi Manyer ", "Alberto F. Martin ", "Javier Principe "] name = "GridapSolvers" uuid = "6d3209ee-5e3c-4db7-a716-942eb12ed534" -authors = ["Santiago Badia ", "Jordi Manyer ", "Alberto F. Martin ", "Javier Principe "] version = "0.1.0" +[compat] +PartitionedArrays = "0.2.15" +julia = "1.7" + [deps] ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" @@ -16,11 +20,8 @@ MPI = "da04e1cc-30fd-572f-bb4f-1f8673147195" PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" -[compat] -PartitionedArrays = "0.2.15" -julia = "1.7" - [extras] +MPIPreferences = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40" [targets] diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 972a42f4..cdd0c40d 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -51,6 +51,7 @@ function prolongate!(x::PVector, map_parts(x.values,Ph.spaces,y.values) do x,Ph,y prolongate!(x,Ph,y) end + exchange!(x) end function inject!(x::PVector, @@ -59,6 +60,7 @@ function inject!(x::PVector, w::PVector, w_sums::PVector) + exchange!(y) map_parts(x.values,Ph.spaces,y.values,w.values,w_sums.values) do x,Ph,y,w,w_sums inject!(x,Ph,y,w,w_sums) end diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index 1289b1b3..3758f434 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -27,27 +27,45 @@ function Gridap.Algebra.symbolic_setup(ls::PatchBasedLinearSolver,mat::AbstractM return PatchBasedSymbolicSetup(ls) end -struct PatchBasedSmootherNumericalSetup{A,B,C,D,E,F} <: Gridap.Algebra.NumericalSetup - solver :: PatchBasedLinearSolver - Ap :: A - nsAp :: B - rp :: C - dxp :: D - w :: E - w_sums :: F +struct PatchBasedSmootherNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup + solver :: PatchBasedLinearSolver + Ap :: A + Ap_ns :: B + weights :: C + caches :: D end function Gridap.Algebra.numerical_setup(ss::PatchBasedSymbolicSetup,A::AbstractMatrix) Ph, Vh = ss.solver.Ph, ss.solver.Vh + weights = compute_weight_operators(Ph,Vh) + + # Assemble patch system assembler = SparseMatrixAssembler(Ph,Ph) Ap = assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph) - solver = ss.solver.M - ssAp = symbolic_setup(solver,Ap) - nsAp = numerical_setup(ssAp,Ap) + + # Patch system solver + Ap_solver = ss.solver.M + Ap_ss = symbolic_setup(Ap_solver,Ap) + Ap_ns = numerical_setup(Ap_ss,Ap) + + # Caches + caches = _patch_based_solver_caches(Ph,Ap) + + return PatchBasedSmootherNumericalSetup(ss.solver,Ap,Ap_ns,weights,caches) +end + +function _patch_based_solver_caches(Ph::PatchFESpace,Ap::AbstractMatrix) rp = _allocate_row_vector(Ap) dxp = _allocate_col_vector(Ap) - w, w_sums = compute_weight_operators(Ph,Vh) - return PatchBasedSmootherNumericalSetup(ss.solver,Ap,nsAp,rp,dxp,w,w_sums) + return rp, dxp +end + +function _patch_based_solver_caches(Ph::GridapDistributed.DistributedSingleFieldFESpace,Ap::PSparseMatrix) + rp_mat = _allocate_row_vector(Ap) + dxp_mat = _allocate_col_vector(Ap) + rp = PVector(0.0,Ph.gids) + dxp = PVector(0.0,Ph.gids) + return rp_mat, dxp_mat, rp, dxp end function _allocate_col_vector(A::AbstractMatrix) @@ -71,11 +89,31 @@ function Gridap.Algebra.numerical_setup!(ns::PatchBasedSmootherNumericalSetup, A end function Gridap.Algebra.solve!(x::AbstractVector,ns::PatchBasedSmootherNumericalSetup,r::AbstractVector) - Ap, nsAp, rp, dxp, w, w_sums = ns.Ap, ns.nsAp, ns.rp, ns.dxp, ns.w, ns.w_sums + Ap_ns, weights, caches = ns.Ap_ns, ns.weights, ns.caches + Ph = ns.solver.Ph + w, w_sums = weights + rp, dxp = caches prolongate!(rp,Ph,r) solve!(dxp,nsAp,rp) inject!(x,Ph,dxp,w,w_sums) + + return x +end + +function Gridap.Algebra.solve!(x::PVector,ns::PatchBasedSmootherNumericalSetup,r::PVector) + Ap_ns, weights, caches = ns.Ap_ns, ns.weights, ns.caches + + Ph = ns.solver.Ph + w, w_sums = weights + rp_mat, dxp_mat, rp, dxp = caches + + prolongate!(rp,Ph,r) + copy!(rp_mat,rp) + solve!(dxp_mat,Ap_ns,rp_mat) + copy!(dxp,dxp_mat) + inject!(x,Ph,dxp,w,w_sums) + return x end diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index 5b1ee651..c9ee8232 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -1,5 +1,8 @@ module DistributedPatchFESpacesTests +ENV["JULIA_MPI_BINARY"] = "system" +ENV["JULIA_MPI_PATH"] = "/usr/lib/x86_64-linux-gnu" + using LinearAlgebra using Test using PartitionedArrays @@ -28,7 +31,7 @@ model = CartesianDiscreteModel(parts,domain,partition) order = 1 reffe = ReferenceFE(lagrangian,Float64,order) Vh = TestFESpace(model,reffe) -PD = PBS.PatchDecomposition(model,patch_boundary_style=PBS.PatchBoundaryInclude()) +PD = PBS.PatchDecomposition(model)#,patch_boundary_style=PBS.PatchBoundaryInclude()) Ph = PBS.PatchFESpace(model,reffe,Gridap.ReferenceFEs.H1Conformity(),PD,Vh) w, w_sums = PBS.compute_weight_operators(Ph,Vh); @@ -56,9 +59,40 @@ Ah = assemble_matrix(a,assembler,Vh,Vh) fh = assemble_vector(l,assembler,Vh) M = PBS.PatchBasedLinearSolver(a,Ph,Vh,LUSolver()) -s = RichardsonSmoother(M,10,1.0/3.0) +R = RichardsonSmoother(M,10,1.0/3.0) +Rss = symbolic_setup(R,Ah) +Rns = numerical_setup(Rss,Ah) + x = PBS._allocate_col_vector(Ah) r = fh-Ah*x -solve!(x,s,Ah,r) +exchange!(r) +solve!(x,Rns,r) + +Mss = symbolic_setup(M,Ah) +Mns = numerical_setup(Mss,Ah) +solve!(x,Mns,r) + +assembler_P = SparseMatrixAssembler(Ph,Ph) +Ahp = assemble_matrix(a,assembler_P,Ph,Ph) +fhp = assemble_vector(l,assembler_P,Ph) + +lu = LUSolver() +luss = symbolic_setup(lu,Ahp) +luns = numerical_setup(luss,Ahp) + +rp = PVector(0.0,Ph.gids) +PBS.prolongate!(rp,Ph,r) + +rp_mat = PVector(0.0,Ahp.cols) +copy!(rp_mat,rp) +xp_mat = PVector(0.0,Ahp.cols) + +solve!(xp_mat,luns,rp_mat) + +xp = PVector(0.0,Ph.gids) +copy!(xp,xp_mat) + +w, w_sums = PBS.compute_weight_operators(Ph,Vh); +PBS.inject!(x,Ph,xp,w,w_sums) end \ No newline at end of file diff --git a/test/seq/PatchLinearSolverTests.jl b/test/seq/PatchLinearSolverTests.jl index 9e38869f..015e5134 100644 --- a/test/seq/PatchLinearSolverTests.jl +++ b/test/seq/PatchLinearSolverTests.jl @@ -10,13 +10,13 @@ module PatchLinearSolverTests using GridapSolvers using GridapSolvers.PatchBasedSmoothers - const order=1 + order=1 - function returns_PD_Ph_xh_Vh(model) + function returns_PD_Ph_xh_Vh(model;style=GridapSolvers.PatchBasedSmoothers.PatchBoundaryExclude()) reffe = ReferenceFE(lagrangian,Float64,order) # reffe=ReferenceFE(lagrangian,VectorValue{2,Float64},order) @santiagobadia: For Vector Laplacian Vh = TestFESpace(model,reffe) - PD = PatchDecomposition(model) + PD = PatchDecomposition(model;patch_boundary_style=style) Ph = PatchFESpace(model,reffe,H1Conformity(),PD,Vh) assembler = SparseMatrixAssembler(Ph,Ph) Ωₚ = Triangulation(PD) @@ -51,7 +51,7 @@ module PatchLinearSolverTests dΩₚ = Measure(Ωₚ,2*order+1) a(u,v) = ∫(∇(v)⋅∇(u))*dΩₚ # α =1,0; a(u,v)=∫(v⋅u)dΩ+∫(α*∇(v)⊙∇(u))dΩ # @santiagobadia: For vector Laplacian - M = PatchBasedLinearSolver(a,Ph,LUSolver()) + M = PatchBasedLinearSolver(a,Ph,Vh,LUSolver()) s = RichardsonSmoother(M,10,1.0/3.0) x = GridapSolvers.PatchBasedSmoothers._allocate_col_vector(A) r = b-A*x From 3d96def0ff76ed097c2c3e2e8c342004ae583cb8 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 20 Feb 2023 15:13:50 +1100 Subject: [PATCH 75/95] Bugfix when using TransferOperators in solution mode --- Manifest.toml | 48 ++++--- .../DistributedGridTransferOperators.jl | 35 +++-- test/mpi/Debugging.jl | 125 ++++++++++++++++++ test/seq/Debugging.jl | 74 +++++++++++ 4 files changed, 249 insertions(+), 33 deletions(-) create mode 100644 test/mpi/Debugging.jl create mode 100644 test/seq/Debugging.jl diff --git a/Manifest.toml b/Manifest.toml index fbfd9512..1547091f 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -15,6 +15,12 @@ git-tree-sha1 = "faa260e4cb5aba097a73fab382dd4b5819d8ec8c" uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" version = "0.4.4" +[[deps.Adapt]] +deps = ["LinearAlgebra"] +git-tree-sha1 = "0310e08cb19f5da31d08341c6120c047598f5b9c" +uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" +version = "3.5.0" + [[deps.ArgCheck]] git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197" @@ -30,11 +36,11 @@ version = "1.1.4" uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" version = "1.1.1" -[[deps.ArrayInterfaceCore]] -deps = ["LinearAlgebra", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "e5f08b5689b1aad068e01751889f2f615c7db36d" -uuid = "30b0a656-2188-435a-8636-2ec0e6a096e2" -version = "0.1.29" +[[deps.ArrayInterface]] +deps = ["Adapt", "LinearAlgebra", "Requires", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] +git-tree-sha1 = "1da9f7b4f41abece283e0fbeb7ed406e7905dcdd" +uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" +version = "7.0.0" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] @@ -77,9 +83,9 @@ version = "1.15.7" [[deps.ChangesOfVariables]] deps = ["ChainRulesCore", "LinearAlgebra", "Test"] -git-tree-sha1 = "844b061c104c408b24537482469400af6075aae4" +git-tree-sha1 = "485193efd2176b88e6622a39a246f8c5b600e74e" uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" -version = "0.1.5" +version = "0.1.6" [[deps.CodecZlib]] deps = ["TranscodingStreams", "Zlib_jll"] @@ -192,10 +198,10 @@ uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" version = "0.12.8" [[deps.FiniteDiff]] -deps = ["ArrayInterfaceCore", "LinearAlgebra", "Requires", "Setfield", "SparseArrays", "StaticArrays"] -git-tree-sha1 = "04ed1f0029b6b3af88343e439b995141cb0d0b8d" +deps = ["ArrayInterface", "LinearAlgebra", "Requires", "Setfield", "SparseArrays", "StaticArrays"] +git-tree-sha1 = "ed1b56934a2f7a65035976985da71b6a65b4f2cf" uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" -version = "2.17.0" +version = "2.18.0" [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] @@ -217,7 +223,7 @@ version = "0.17.16" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "2f998a322a07e7c19312e617868a0c1589727828" +git-tree-sha1 = "0cd6de7e550a07ae3a51aff738cc53ce3055753d" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapDistributed.jl.git" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" @@ -338,9 +344,9 @@ uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[deps.LogExpFunctions]] deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "45b288af6956e67e621c5cbb2d75a261ab58300b" +git-tree-sha1 = "071602a0be5af779066df0d7ef4e14945a010818" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.20" +version = "0.3.22" [[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" @@ -429,9 +435,9 @@ version = "4.5.1" [[deps.NaNMath]] deps = ["OpenLibm_jll"] -git-tree-sha1 = "a7c3d1da1189a1c2fe843a3bfa04d18d20eb3211" +git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4" uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" -version = "1.0.1" +version = "1.0.2" [[deps.NearestNeighbors]] deps = ["Distances", "StaticArrays"] @@ -508,9 +514,9 @@ version = "0.12.3" [[deps.Parsers]] deps = ["Dates", "SnoopPrecompile"] -git-tree-sha1 = "151d91d63d8d6c1a5789ecb7de51547e00480f1b" +git-tree-sha1 = "6f4fbcd1ad45905a5dee3f4256fabb49aa2110c6" uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.5.4" +version = "2.5.7" [[deps.PartitionedArrays]] deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] @@ -541,9 +547,9 @@ uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" [[deps.QuadGK]] deps = ["DataStructures", "LinearAlgebra"] -git-tree-sha1 = "de191bc385072cc6c7ed3ffdc1caeed3f22c74d4" +git-tree-sha1 = "786efa36b7eff813723c4849c90456609cf06661" uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" -version = "2.7.0" +version = "2.8.1" [[deps.REPL]] deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] @@ -622,9 +628,9 @@ version = "2.1.7" [[deps.StaticArrays]] deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] -git-tree-sha1 = "6954a456979f23d05085727adb17c4551c19ecd1" +git-tree-sha1 = "2d7d9e1ddadc8407ffd460e24218e37ef52dd9a3" uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.5.12" +version = "1.5.16" [[deps.StaticArraysCore]] git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a" diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 6a17590e..19d5cd3c 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -91,13 +91,12 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: aH(u,v) = ∫(v⋅u)*dΩH lH(v,uh) = ∫(v⋅uh)*dΩhH assem = SparseMatrixAssembler(UH,VH) - - AH = assemble_matrix(aH,assem,UH,VH) - xH = PVector(0.0,AH.rows) - - v = get_fe_basis(VH) - vec_data = collect_cell_vector(VH,lH(v,1.0)) - bH = allocate_vector(assem,vec_data) + + u_dir = (mode == :solution) ? interpolate(0.0,UH) : interpolate_everywhere(0.0,UH) + u,v = get_trial_fe_basis(UH), get_fe_basis(VH) + data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,0.0),u_dir) + AH,bH = assemble_matrix_and_vector(assem,data) + xH = PVector(0.0,AH.rows) cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, assem else @@ -168,7 +167,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:p uH = FEFunction(UH,fv_H,dv_H) uh = interpolate!(uH,fv_h,Uh) copy!(y,fv_h) # FE layout -> Matrix layout - + exchange!(y) return y end @@ -181,7 +180,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r uh = FEFunction(Uh,fv_h,dv_h) uH = interpolate!(uh,fv_H,UH) copy!(y,fv_H) # FE layout -> Matrix layout - + exchange!(y) return y end @@ -194,9 +193,11 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r uh = FEFunction(Uh,fv_h,dv_h) v = get_fe_basis(VH) vec_data = collect_cell_vector(VH,lH(v,uh)) - assemble_vector!(bH,assem,vec_data) # Matrix layout + assemble_vector_add!(bH,assem,vec_data) # Matrix layout + display(bH.values) IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) copy!(y,xH) + exchange!(y) return y end @@ -207,9 +208,11 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine copy!(fv_h,x) # Matrix layout -> FE layout + exchange!(fv_h) restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) copy!(y,fv_H) # FE layout -> Matrix layout - + exchange!(y) + return y end @@ -241,6 +244,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) + exchange!(fv_h_red) redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Interpolate in coarse partition @@ -248,6 +252,7 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer uh = FEFunction(Uh,fv_h,dv_h) uH = interpolate!(uh,fv_H,UH) copy!(y,fv_H) # FE layout -> Matrix layout + exchange!(y) end return y @@ -261,16 +266,20 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) + exchange!(fv_h_red) redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Solve f2c projection coarse partition if !isa(y,Nothing) + exchange!(fv_h) uh = FEFunction(Uh,fv_h,dv_h) v = get_fe_basis(VH) vec_data = collect_cell_vector(VH,lH(v,uh)) - assemble_vector!(bH,assem,vec_data) # Matrix layout + assemble_vector_add!(bH,assem,vec_data) # Matrix layout + display(bH.values) IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) copy!(y,xH) + exchange!(y) end return y @@ -284,12 +293,14 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 1 - Redistribute from fine partition to coarse partition copy!(fv_h_red,x) + exchange!(fv_h_red) redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true) # 2 - Interpolate in coarse partition if !isa(y,Nothing) restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) copy!(y,fv_H) # FE layout -> Matrix layout + exchange!(y) end return y diff --git a/test/mpi/Debugging.jl b/test/mpi/Debugging.jl new file mode 100644 index 00000000..b720e9d1 --- /dev/null +++ b/test/mpi/Debugging.jl @@ -0,0 +1,125 @@ + +using MPI +using PartitionedArrays +using Gridap +using GridapDistributed +using GridapP4est +using Test + +using GridapSolvers +using GridapSolvers.MultilevelTools + +function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) + GridapP4est.with(parts) do + domain = (0,1,0,1) + num_levels = length(num_parts_x_level) + cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) + + cparts = generate_subparts(parts,num_parts_x_level[num_levels]) + coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) + + fparts = generate_subparts(parts,num_parts_x_level[1]) + fine_model = OctreeDistributedDiscreteModel(fparts,cmodel,num_refs_coarse + num_levels) + mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) + + # Create Operators: + order = 1 + u(x) = 1.0 + reffe = ReferenceFE(lagrangian,Float64,order) + + tests = TestFESpace(mh,reffe;dirichlet_tags="boundary") + trials = TrialFESpace(tests,u) + + qdegree = order*2+1 + ops = setup_transfer_operators(trials, qdegree; restriction_method=:projection, mode=:solution) + restrictions, prolongations = ops + + a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ + l(v,dΩ) = ∫(v⋅u)*dΩ + mats, A, b = compute_hierarchy_matrices(trials,a,l,qdegree) + + for lev in 1:1#num_levels-1 + parts_h = get_level_parts(mh,lev) + parts_H = get_level_parts(mh,lev+1) + + if i_am_in(parts_h) + i_am_main(parts_h) && println("Lev : ", lev) + Ah = mats[lev] + xh = PVector(1.0,Ah.cols) + yh = PVector(0.0,Ah.cols) + + if i_am_in(parts_H) + AH = mats[lev+1] + xH = PVector(1.0,AH.cols) + yH = PVector(0.0,AH.cols) + + model_h = get_model_before_redist(mh,lev) + model_H = get_model(mh,lev+1) + + display(map_parts(num_cells,local_views(model_h))) + display(map_parts(num_cells,local_views(model_H))) + + Uh = get_fe_space_before_redist(trials,lev) + Ωh = GridapSolvers.MultilevelTools.get_triangulation(Uh,model_h) + + UH = get_fe_space(trials,lev+1) + VH = GridapSolvers.MultilevelTools.get_test_space(UH) + ΩH = GridapSolvers.MultilevelTools.get_triangulation(UH,model_H) + dΩH = Measure(ΩH,qdegree) + dΩhH = Measure(ΩH,Ωh,qdegree) + + uH = interpolate(u,UH) + uh = interpolate(u,Uh) + + aH(u,v) = ∫(v⋅u)*dΩH + lh(v) = ∫(v⋅uh)*dΩhH + lH(v) = ∫(v⋅uH)*dΩH + assem = SparseMatrixAssembler(UH,VH) + + u_dir = interpolate(0.0,UH) + u,v = get_trial_fe_basis(UH), get_fe_basis(VH) + data = Gridap.FESpaces.collect_cell_matrix_and_vector(UH,VH,aH(u,v),lh(v),u_dir) + AH,bH = Gridap.FESpaces.assemble_matrix_and_vector(assem,data) + + data2 = Gridap.FESpaces.collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v),u_dir) + AH2,bH2 = Gridap.FESpaces.assemble_matrix_and_vector(assem,data2) + + vecdata = Gridap.FESpaces.collect_cell_vector(VH,lh(v)) + display(vecdata) + + display(bH.values) + display(bH2.values) + + else + xH = nothing + yH = nothing + end + + i_am_main(parts_h) && println(" > Restriction") + R = restrictions[lev] + mul!(yH,R,xh) + i_am_in(parts_H) && display(yH.values) + + i_am_main(parts_h) && println(" > Prolongation") + P = prolongations[lev] + mul!(yh,P,xH) + i_am_in(parts_h) && display(yh.values) + + end + end + end +end + +num_parts_x_level = [4,2,2] # Procs in each refinement level +#num_parts_x_level = [1,1,1] # Procs in each refinement level +num_trees = (1,1) # Number of initial P4est trees +num_refs_coarse = 1 # Number of initial refinements + +num_ranks = num_parts_x_level[1] +with_backend(run,MPIBackend(),num_ranks,num_parts_x_level,num_trees,num_refs_coarse) +println("AT THE END") +MPI.Finalize() + + + + diff --git a/test/seq/Debugging.jl b/test/seq/Debugging.jl new file mode 100644 index 00000000..fedf058c --- /dev/null +++ b/test/seq/Debugging.jl @@ -0,0 +1,74 @@ +module Debugging + +using IterativeSolvers +using FillArrays +using Gridap +using Gridap.Adaptivity +using Gridap.FESpaces + + +function assemble_matrix_and_vector_bis(a,l,U,V) + u_dir = zero(UH) + u = get_trial_fe_basis(U) + v = get_fe_basis(V) + + assem = SparseMatrixAssembler(U,V) + + matcontribs, veccontribs = a(u,v), l(v) + data = collect_cell_matrix_and_vector(U,V,matcontribs,veccontribs,u_dir) + A,b = assemble_matrix_and_vector(assem,data) + return A,b +end + +""" +function Gridap.Adaptivity.FineToCoarseField(fine_fields,rrule::RefinementRule) + return Gridap.Adaptivity.FineToCoarseField(collect(fine_fields),rrule) +end +""" + +domain = (0,1,0,1) +partition = Tuple(fill(4,2)) +model_H = CartesianDiscreteModel(domain,partition) +model_h = refine(model_H) + +order = 1 +u(x) = 1.0 +reffe = ReferenceFE(lagrangian,Float64,order) + +VH = TestFESpace(model_H,reffe;dirichlet_tags="boundary") +UH = TrialFESpace(VH,u) +Vh = TestFESpace(model_h,reffe;dirichlet_tags="boundary") +Uh = TrialFESpace(Vh,u) + +uh = interpolate(u,Uh) +uH = interpolate(u,UH) + +qorder = order*2+1 +ΩH = Triangulation(model_H) +dΩH = Measure(ΩH,qorder) +Ωh = Triangulation(model_h) +dΩh = Measure(Ωh,qorder) + +dΩHh = Measure(ΩH,Ωh,qorder) + +a(u,v) = ∫(v⋅u)*dΩH +lh(v) = ∫(v⋅uh)*dΩHh +lH(v) = ∫(v⋅uH)*dΩH + +op = AffineFEOperator(a,lH,UH,VH) + +AH, bH = assemble_matrix_and_vector_bis(a,lh,UH,VH) + +xH = zeros(size(bH)) +rH = AH*xH - bH +xH, hist = cg!(xH,AH,bH;log=true) +xH + +uH2 = FEFunction(UH,xH) + +pts = get_cell_points(dΩH) +uH(pts) +uH2(pts) + + +end \ No newline at end of file From 5b7fbf66259a7e390b8ad90494eb1e4e885d8539 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 21 Feb 2023 16:27:26 +1100 Subject: [PATCH 76/95] Introduced new changes from Gridap.Adaptivity --- Manifest.toml | 4 +- .../DistributedGridTransferOperators.jl | 22 +++---- .../GridapDistributedExtensions.jl | 5 -- src/MultilevelTools/GridapFixes.jl | 40 ++++++------ src/MultilevelTools/RefinementTools.jl | 22 ------- test/mpi/Debugging.jl | 40 +----------- .../DistributedGridTransferOperatorsTests.jl | 65 +++++++++++++------ test/seq/Debugging.jl | 54 ++++++--------- 8 files changed, 96 insertions(+), 156 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 1547091f..c6de6061 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -215,8 +215,8 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DataStructures", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "f3f4a94f53fdfcda51e9cafc2d83ca9b229d6ea7" -repo-rev = "refined-discrete-models" +git-tree-sha1 = "ddd11e52330f755829ccc36e40c50d98a400c42c" +repo-rev = "adaptivity" repo-url = "https://github.com/gridap/Gridap.jl.git" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" version = "0.17.16" diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 19d5cd3c..544da23a 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -77,14 +77,15 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: if i_am_in(cparts) model_h = get_model_before_redist(mh,lev) - Uh = get_fe_space_before_redist(sh,lev) - Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) + Uh = get_fe_space_before_redist(sh,lev) + Ωh = Triangulation(model_h) fv_h = PVector(0.0,Uh.gids) dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh) + model_H = get_model(mh,lev+1) UH = get_fe_space(sh,lev+1) VH = get_test_space(UH) - ΩH = get_triangulation(UH,get_model(mh,lev+1)) + ΩH = Triangulation(model_H) dΩH = Measure(ΩH,qdegree) dΩhH = Measure(ΩH,Ωh,qdegree) @@ -96,7 +97,7 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: u,v = get_trial_fe_basis(UH), get_fe_basis(VH) data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,0.0),u_dir) AH,bH = assemble_matrix_and_vector(assem,data) - xH = PVector(0.0,AH.rows) + xH = PVector(0.0,AH.cols) cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, assem else @@ -167,7 +168,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:p uH = FEFunction(UH,fv_H,dv_H) uh = interpolate!(uH,fv_h,Uh) copy!(y,fv_h) # FE layout -> Matrix layout - exchange!(y) + return y end @@ -180,7 +181,7 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r uh = FEFunction(Uh,fv_h,dv_h) uH = interpolate!(uh,fv_H,UH) copy!(y,fv_H) # FE layout -> Matrix layout - exchange!(y) + return y end @@ -194,10 +195,8 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r v = get_fe_basis(VH) vec_data = collect_cell_vector(VH,lH(v,uh)) assemble_vector_add!(bH,assem,vec_data) # Matrix layout - display(bH.values) IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) copy!(y,xH) - exchange!(y) return y end @@ -211,7 +210,6 @@ function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:r exchange!(fv_h) restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) copy!(y,fv_H) # FE layout -> Matrix layout - exchange!(y) return y end @@ -252,7 +250,6 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer uh = FEFunction(Uh,fv_h,dv_h) uH = interpolate!(uh,fv_H,UH) copy!(y,fv_H) # FE layout -> Matrix layout - exchange!(y) end return y @@ -271,15 +268,13 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 2 - Solve f2c projection coarse partition if !isa(y,Nothing) - exchange!(fv_h) + #exchange!(fv_h) uh = FEFunction(Uh,fv_h,dv_h) v = get_fe_basis(VH) vec_data = collect_cell_vector(VH,lH(v,uh)) assemble_vector_add!(bH,assem,vec_data) # Matrix layout - display(bH.values) IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) copy!(y,xH) - exchange!(y) end return y @@ -300,7 +295,6 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer if !isa(y,Nothing) restrict_dofs!(fv_H,fv_h,dv_h,Uh,UH,get_adaptivity_glue(model_h)) copy!(y,fv_H) # FE layout -> Matrix layout - exchange!(y) end return y diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index bc24aa19..0f3d58a1 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -43,11 +43,6 @@ function get_test_space(U::GridapDistributed.DistributedSingleFieldFESpace) return GridapDistributed.DistributedSingleFieldFESpace(spaces,U.gids,U.vector_type) end -function FESpaces.get_triangulation(f::GridapDistributed.DistributedSingleFieldFESpace,model::GridapDistributed.AbstractDistributedDiscreteModel) - trians = map_parts(get_triangulation,local_views(f)) - return GridapDistributed.DistributedTriangulation(trians,model) -end - # Void GridapDistributed structures struct VoidDistributedDiscreteModel{Dc,Dp,A} <: GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp} diff --git a/src/MultilevelTools/GridapFixes.jl b/src/MultilevelTools/GridapFixes.jl index 1f78e4b5..e5ccac7d 100644 --- a/src/MultilevelTools/GridapFixes.jl +++ b/src/MultilevelTools/GridapFixes.jl @@ -1,35 +1,35 @@ +function Gridap.Adaptivity.change_domain_n2o(f_fine,ftrian::Gridap.Adaptivity.AdaptedTriangulation{Dc},ctrian::Gridap.Geometry.Triangulation,glue::Gridap.Adaptivity.AdaptivityGlue{<:Gridap.Adaptivity.RefinementGlue}) where Dc + fglue = Gridap.Geometry.get_glue(ftrian,Val(Dc)) + cglue = Gridap.Geometry.get_glue(ctrian,Val(Dc)) -function Gridap.Adaptivity.change_domain_n2o(f_fine,ctrian::Gridap.Geometry.Triangulation{Dc},glue::Gridap.Adaptivity.AdaptivityGlue{<:Gridap.Adaptivity.RefinementGlue,Dc}) where Dc - @notimplementedif num_dims(ctrian) != Dc - msg = "Evaluating a fine CellField in the coarse mesh is costly! If you are using this feature - to integrate, consider using a CompositeMeasure instead (see test/AdaptivityTests/GridTransferTests.jl)." - @warn msg + @notimplementedif Gridap.Geometry.num_point_dims(ftrian) != Dc + @notimplementedif isa(cglue,Nothing) if (num_cells(ctrian) != 0) + ### New Triangulation -> New Model + fine_tface_to_field = Gridap.CellData.get_data(f_fine) + fine_mface_to_field = Gridap.Geometry.extend(fine_tface_to_field,fglue.mface_to_tface) + + ### New Model -> Old Model # f_c2f[i_coarse] = [f_fine[i_fine_1], ..., f_fine[i_fine_nChildren]] - f_c2f = Gridap.Adaptivity.f2c_reindex(f_fine,glue) + f_c2f = Gridap.Adaptivity.f2c_reindex(fine_mface_to_field,glue) child_ids = Gridap.Adaptivity.f2c_reindex(glue.n2o_cell_to_child_id,glue) rrules = Gridap.Adaptivity.get_old_cell_refinement_rules(glue) - f_coarse = lazy_map(Gridap.Adaptivity.FineToCoarseField,f_c2f,rrules,child_ids) - return Gridap.CellData.GenericCellField(f_coarse,ctrian,ReferenceDomain()) - else - f_coarse = Fill(Gridap.Fields.ConstantField(0.0),num_cells(ftrian)) - return Gridap.CellData.GenericCellField(f_coarse,ctrian,ReferenceDomain()) - end -end + coarse_mface_to_field = lazy_map(Gridap.Adaptivity.FineToCoarseField,f_c2f,rrules,child_ids) -function Gridap.Adaptivity.FineToCoarseField(fine_fields::AbstractArray{<:Gridap.Fields.Field},rrule::Gridap.Adaptivity.RefinementRule,child_ids::AbstractArray{<:Integer}) - fields = Vector{Gridap.Fields.Field}(undef,Gridap.Adaptivity.num_subcells(rrule)) - fields = fill!(fields,Gridap.Fields.ConstantField(0.0)) - for (k,id) in enumerate(child_ids) - fields[id] = fine_fields[k] + ### Old Model -> Old Triangulation + coarse_tface_to_field = lazy_map(Reindex(coarse_mface_to_field),cglue.tface_to_mface) + f_coarse = lazy_map(Broadcasting(∘),coarse_tface_to_field,cglue.tface_to_mface_map) + + return Gridap.CellData.similar_cell_field(f_fine,f_coarse,ctrian,ReferenceDomain()) + else + f_coarse = Fill(Gridap.Fields.ConstantField(0.0),num_cells(fcoarse)) + return Gridap.CellData.similar_cell_field(f_fine,f_coarse,ctrian,ReferenceDomain()) end - return Gridap.Adaptivity.FineToCoarseField(fields,rrule) end - function Base.map(::typeof(Gridap.Arrays.testitem), a::Tuple{<:AbstractVector{<:AbstractVector{<:VectorValue}},<:AbstractVector{<:Gridap.Fields.LinearCombinationFieldVector}}) a2=Gridap.Arrays.testitem(a[2]) diff --git a/src/MultilevelTools/RefinementTools.jl b/src/MultilevelTools/RefinementTools.jl index b7dd250a..e2930cc5 100644 --- a/src/MultilevelTools/RefinementTools.jl +++ b/src/MultilevelTools/RefinementTools.jl @@ -3,28 +3,6 @@ const DistributedAdaptedTriangulation{Dc,Dp} = GridapDistributed.DistributedTriangulation{Dc,Dp,<:AbstractPData{<:AdaptedTriangulation{Dc,Dp}}} -# ChangeDomain - -function Gridap.Adaptivity.change_domain_o2n(c_cell_field, - ftrian::GridapDistributed.DistributedTriangulation{Dc,Dp}, - glue::AbstractPData{Gridap.Adaptivity.AdaptivityGlue}) where {Dc,Dp} - - i_am_in_coarse = !isa(c_cell_field, Nothing) - - fields = map_parts(local_views(ftrian)) do Ω - if (i_am_in_coarse) - c_cell_field.fields.part - else - Gridap.Helpers.@check num_cells(Ω) == 0 - Gridap.CellData.GenericCellField(Fill(Gridap.Fields.ConstantField(0.0),num_cells(Ω)),Ω,ReferenceDomain()) - end - end - c_cell_field_fine = GridapDistributed.DistributedCellField(fields) - - dfield = map_parts(Gridap.Adaptivity.change_domain_o2n,local_views(c_cell_field_fine),local_views(ftrian),glue) - return GridapDistributed.DistributedCellField(dfield) -end - # Restriction of dofs function restrict_dofs!(fv_c::PVector, diff --git a/test/mpi/Debugging.jl b/test/mpi/Debugging.jl index b720e9d1..8c4392a6 100644 --- a/test/mpi/Debugging.jl +++ b/test/mpi/Debugging.jl @@ -38,7 +38,7 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) l(v,dΩ) = ∫(v⋅u)*dΩ mats, A, b = compute_hierarchy_matrices(trials,a,l,qdegree) - for lev in 1:1#num_levels-1 + for lev in 1:num_levels-1 parts_h = get_level_parts(mh,lev) parts_H = get_level_parts(mh,lev+1) @@ -52,44 +52,6 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) AH = mats[lev+1] xH = PVector(1.0,AH.cols) yH = PVector(0.0,AH.cols) - - model_h = get_model_before_redist(mh,lev) - model_H = get_model(mh,lev+1) - - display(map_parts(num_cells,local_views(model_h))) - display(map_parts(num_cells,local_views(model_H))) - - Uh = get_fe_space_before_redist(trials,lev) - Ωh = GridapSolvers.MultilevelTools.get_triangulation(Uh,model_h) - - UH = get_fe_space(trials,lev+1) - VH = GridapSolvers.MultilevelTools.get_test_space(UH) - ΩH = GridapSolvers.MultilevelTools.get_triangulation(UH,model_H) - dΩH = Measure(ΩH,qdegree) - dΩhH = Measure(ΩH,Ωh,qdegree) - - uH = interpolate(u,UH) - uh = interpolate(u,Uh) - - aH(u,v) = ∫(v⋅u)*dΩH - lh(v) = ∫(v⋅uh)*dΩhH - lH(v) = ∫(v⋅uH)*dΩH - assem = SparseMatrixAssembler(UH,VH) - - u_dir = interpolate(0.0,UH) - u,v = get_trial_fe_basis(UH), get_fe_basis(VH) - data = Gridap.FESpaces.collect_cell_matrix_and_vector(UH,VH,aH(u,v),lh(v),u_dir) - AH,bH = Gridap.FESpaces.assemble_matrix_and_vector(assem,data) - - data2 = Gridap.FESpaces.collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v),u_dir) - AH2,bH2 = Gridap.FESpaces.assemble_matrix_and_vector(assem,data2) - - vecdata = Gridap.FESpaces.collect_cell_vector(VH,lh(v)) - display(vecdata) - - display(bH.values) - display(bH2.values) - else xH = nothing yH = nothing diff --git a/test/mpi/DistributedGridTransferOperatorsTests.jl b/test/mpi/DistributedGridTransferOperatorsTests.jl index b6fdc12b..276cffdf 100644 --- a/test/mpi/DistributedGridTransferOperatorsTests.jl +++ b/test/mpi/DistributedGridTransferOperatorsTests.jl @@ -20,18 +20,18 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) # Create Operators: order = 1 - u(x) = x[1] + x[2] + u(x) = 1.0 reffe = ReferenceFE(lagrangian,Float64,order) tests = TestFESpace(mh,reffe;dirichlet_tags="boundary") trials = TrialFESpace(tests,u) qdegree = order*2+1 - ops = setup_transfer_operators(trials, qdegree; restriction_method=:projection) - restrictions, prolongations = ops - ops2 = setup_transfer_operators(trials, qdegree; restriction_method=:interpolation) + ops1 = setup_transfer_operators(trials, qdegree; restriction_method=:projection, mode=:solution) + restrictions1, prolongations1 = ops1 + ops2 = setup_transfer_operators(trials, qdegree; restriction_method=:interpolation, mode=:solution) restrictions2, prolongations2 = ops2 - ops3 = setup_transfer_operators(trials, qdegree; restriction_method=:dof_mask) + ops3 = setup_transfer_operators(trials, qdegree; restriction_method=:dof_mask, mode=:solution) restrictions3, prolongations3 = ops3 a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ @@ -44,38 +44,61 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) if i_am_in(parts_h) i_am_main(parts_h) && println("Lev : ", lev) - Ah = mats[lev] - xh = PVector(1.0,Ah.cols) - yh = PVector(0.0,Ah.rows) + Ah = mats[lev] + xh = PVector(1.0,Ah.cols) + yh1 = PVector(0.0,Ah.cols) + yh2 = PVector(0.0,Ah.cols) + yh3 = PVector(0.0,Ah.cols) if i_am_in(parts_H) - AH = mats[lev+1] - xH = PVector(1.0,AH.cols) - yH = PVector(0.0,AH.rows) + AH = mats[lev+1] + xH = PVector(1.0,AH.cols) + yH1 = PVector(0.0,AH.cols) + yH2 = PVector(0.0,AH.cols) + yH3 = PVector(0.0,AH.cols) else - xH = nothing - yH = nothing + xH = nothing + yH1 = nothing + yH2 = nothing + yH3 = nothing end + # ---- Restriction ---- i_am_main(parts_h) && println(" > Restriction") - R = restrictions[lev] - mul!(yH,R,xh) + R1 = restrictions1[lev] + mul!(yH1,R1,xh) R2 = restrictions2[lev] - mul!(yH,R2,xh) + mul!(yH2,R2,xh) R3 = restrictions3[lev] - mul!(yH,R3,xh) + mul!(yH3,R3,xh) + if i_am_in(parts_H) + y_ref = PVector(1.0,AH.cols) + tests = map_parts(y_ref.owned_values,yH1.owned_values,yH2.owned_values,yH3.owned_values) do y_ref,y1,y2,y3 + map(y -> norm(y-y_ref) < 1.e-3 ,[y1,y2,y3]) + end + @test all(tests.part) + end + + # ---- Prolongation ---- i_am_main(parts_h) && println(" > Prolongation") - P = prolongations[lev] - mul!(yh,P,xH) + P1 = prolongations1[lev] + mul!(yh1,P1,xH) P2 = prolongations2[lev] - mul!(yh,P2,xH) + mul!(yh2,P2,xH) P3 = prolongations3[lev] - mul!(yh,P3,xH) + mul!(yh3,P3,xH) + + y_ref = PVector(1.0,Ah.cols) + tests = map_parts(y_ref.owned_values,yh1.owned_values,yh2.owned_values,yh2.owned_values) do y_ref,y1,y2,y3 + map(y -> norm(y-y_ref) < 1.e-3 ,[y1,y2,y3]) + end + @test all(tests.part) + end end end diff --git a/test/seq/Debugging.jl b/test/seq/Debugging.jl index fedf058c..32dc5eea 100644 --- a/test/seq/Debugging.jl +++ b/test/seq/Debugging.jl @@ -6,6 +6,8 @@ using Gridap using Gridap.Adaptivity using Gridap.FESpaces +using GridapDistributed +using PartitionedArrays function assemble_matrix_and_vector_bis(a,l,U,V) u_dir = zero(UH) @@ -20,55 +22,41 @@ function assemble_matrix_and_vector_bis(a,l,U,V) return A,b end -""" -function Gridap.Adaptivity.FineToCoarseField(fine_fields,rrule::RefinementRule) - return Gridap.Adaptivity.FineToCoarseField(collect(fine_fields),rrule) -end -""" + + +backend = SequentialBackend() +parts = get_part_ids(backend,(1,2)) domain = (0,1,0,1) partition = Tuple(fill(4,2)) -model_H = CartesianDiscreteModel(domain,partition) -model_h = refine(model_H) +model = CartesianDiscreteModel(parts,domain,partition) order = 1 u(x) = 1.0 reffe = ReferenceFE(lagrangian,Float64,order) -VH = TestFESpace(model_H,reffe;dirichlet_tags="boundary") -UH = TrialFESpace(VH,u) -Vh = TestFESpace(model_h,reffe;dirichlet_tags="boundary") -Uh = TrialFESpace(Vh,u) +V = TestFESpace(model,reffe;dirichlet_tags="boundary") +U = TrialFESpace(V,u) -uh = interpolate(u,Uh) -uH = interpolate(u,UH) +uh = interpolate(u,U) qorder = order*2+1 -ΩH = Triangulation(model_H) -dΩH = Measure(ΩH,qorder) -Ωh = Triangulation(model_h) -dΩh = Measure(Ωh,qorder) - -dΩHh = Measure(ΩH,Ωh,qorder) - -a(u,v) = ∫(v⋅u)*dΩH -lh(v) = ∫(v⋅uh)*dΩHh -lH(v) = ∫(v⋅uH)*dΩH +Ω = Triangulation(model) +dΩ = Measure(Ω,qorder) -op = AffineFEOperator(a,lH,UH,VH) +a(u,v) = ∫(v⋅u)*dΩ +l(v) = ∫(v⋅uh)*dΩ +h(v) = ∫(v⋅v)*dΩ -AH, bH = assemble_matrix_and_vector_bis(a,lh,UH,VH) -xH = zeros(size(bH)) -rH = AH*xH - bH -xH, hist = cg!(xH,AH,bH;log=true) -xH +SAR = SparseMatrixAssembler(U,V) +FAR = SparseMatrixAssembler(U,V,FullyAssembledRows()) -uH2 = FEFunction(UH,xH) +v = get_fe_basis(V) +vecdata = collect_cell_vector(V,l(v)) -pts = get_cell_points(dΩH) -uH(pts) -uH2(pts) +v_sar = assemble_vector(SAR,vecdata) +v_far = assemble_vector(FAR,vecdata) end \ No newline at end of file From ffc047a7fb5ae2d4748c36710db8de6dcf8cf975 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 21 Feb 2023 16:28:30 +1100 Subject: [PATCH 77/95] Cleanup --- test/mpi/Debugging.jl | 87 ------------------------------------------- test/seq/Debugging.jl | 62 ------------------------------ 2 files changed, 149 deletions(-) delete mode 100644 test/mpi/Debugging.jl delete mode 100644 test/seq/Debugging.jl diff --git a/test/mpi/Debugging.jl b/test/mpi/Debugging.jl deleted file mode 100644 index 8c4392a6..00000000 --- a/test/mpi/Debugging.jl +++ /dev/null @@ -1,87 +0,0 @@ - -using MPI -using PartitionedArrays -using Gridap -using GridapDistributed -using GridapP4est -using Test - -using GridapSolvers -using GridapSolvers.MultilevelTools - -function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) - GridapP4est.with(parts) do - domain = (0,1,0,1) - num_levels = length(num_parts_x_level) - cmodel = CartesianDiscreteModel(domain,coarse_grid_partition) - - cparts = generate_subparts(parts,num_parts_x_level[num_levels]) - coarse_model = OctreeDistributedDiscreteModel(cparts,cmodel,num_refs_coarse) - - fparts = generate_subparts(parts,num_parts_x_level[1]) - fine_model = OctreeDistributedDiscreteModel(fparts,cmodel,num_refs_coarse + num_levels) - mh = ModelHierarchy(parts,coarse_model,num_parts_x_level) - - # Create Operators: - order = 1 - u(x) = 1.0 - reffe = ReferenceFE(lagrangian,Float64,order) - - tests = TestFESpace(mh,reffe;dirichlet_tags="boundary") - trials = TrialFESpace(tests,u) - - qdegree = order*2+1 - ops = setup_transfer_operators(trials, qdegree; restriction_method=:projection, mode=:solution) - restrictions, prolongations = ops - - a(u,v,dΩ) = ∫(∇(v)⋅∇(u))*dΩ - l(v,dΩ) = ∫(v⋅u)*dΩ - mats, A, b = compute_hierarchy_matrices(trials,a,l,qdegree) - - for lev in 1:num_levels-1 - parts_h = get_level_parts(mh,lev) - parts_H = get_level_parts(mh,lev+1) - - if i_am_in(parts_h) - i_am_main(parts_h) && println("Lev : ", lev) - Ah = mats[lev] - xh = PVector(1.0,Ah.cols) - yh = PVector(0.0,Ah.cols) - - if i_am_in(parts_H) - AH = mats[lev+1] - xH = PVector(1.0,AH.cols) - yH = PVector(0.0,AH.cols) - else - xH = nothing - yH = nothing - end - - i_am_main(parts_h) && println(" > Restriction") - R = restrictions[lev] - mul!(yH,R,xh) - i_am_in(parts_H) && display(yH.values) - - i_am_main(parts_h) && println(" > Prolongation") - P = prolongations[lev] - mul!(yh,P,xH) - i_am_in(parts_h) && display(yh.values) - - end - end - end -end - -num_parts_x_level = [4,2,2] # Procs in each refinement level -#num_parts_x_level = [1,1,1] # Procs in each refinement level -num_trees = (1,1) # Number of initial P4est trees -num_refs_coarse = 1 # Number of initial refinements - -num_ranks = num_parts_x_level[1] -with_backend(run,MPIBackend(),num_ranks,num_parts_x_level,num_trees,num_refs_coarse) -println("AT THE END") -MPI.Finalize() - - - - diff --git a/test/seq/Debugging.jl b/test/seq/Debugging.jl deleted file mode 100644 index 32dc5eea..00000000 --- a/test/seq/Debugging.jl +++ /dev/null @@ -1,62 +0,0 @@ -module Debugging - -using IterativeSolvers -using FillArrays -using Gridap -using Gridap.Adaptivity -using Gridap.FESpaces - -using GridapDistributed -using PartitionedArrays - -function assemble_matrix_and_vector_bis(a,l,U,V) - u_dir = zero(UH) - u = get_trial_fe_basis(U) - v = get_fe_basis(V) - - assem = SparseMatrixAssembler(U,V) - - matcontribs, veccontribs = a(u,v), l(v) - data = collect_cell_matrix_and_vector(U,V,matcontribs,veccontribs,u_dir) - A,b = assemble_matrix_and_vector(assem,data) - return A,b -end - - - -backend = SequentialBackend() -parts = get_part_ids(backend,(1,2)) - -domain = (0,1,0,1) -partition = Tuple(fill(4,2)) -model = CartesianDiscreteModel(parts,domain,partition) - -order = 1 -u(x) = 1.0 -reffe = ReferenceFE(lagrangian,Float64,order) - -V = TestFESpace(model,reffe;dirichlet_tags="boundary") -U = TrialFESpace(V,u) - -uh = interpolate(u,U) - -qorder = order*2+1 -Ω = Triangulation(model) -dΩ = Measure(Ω,qorder) - -a(u,v) = ∫(v⋅u)*dΩ -l(v) = ∫(v⋅uh)*dΩ -h(v) = ∫(v⋅v)*dΩ - - -SAR = SparseMatrixAssembler(U,V) -FAR = SparseMatrixAssembler(U,V,FullyAssembledRows()) - -v = get_fe_basis(V) -vecdata = collect_cell_vector(V,l(v)) - -v_sar = assemble_vector(SAR,vecdata) -v_far = assemble_vector(FAR,vecdata) - - -end \ No newline at end of file From 6725b13547c3806f832d06de514cfd601e1e1a79 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 21 Feb 2023 18:22:06 +1100 Subject: [PATCH 78/95] Small bugfix --- Manifest.toml | 8 +++---- .../DistributedGridTransferOperators.jl | 24 ++++++++++--------- 2 files changed, 17 insertions(+), 15 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index c6de6061..84845db3 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -38,9 +38,9 @@ version = "1.1.1" [[deps.ArrayInterface]] deps = ["Adapt", "LinearAlgebra", "Requires", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "1da9f7b4f41abece283e0fbeb7ed406e7905dcdd" +git-tree-sha1 = "4d9946e51e24f5e509779e3e2c06281a733914c2" uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" -version = "7.0.0" +version = "7.1.0" [[deps.ArrayLayouts]] deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] @@ -344,9 +344,9 @@ uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" [[deps.LogExpFunctions]] deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "071602a0be5af779066df0d7ef4e14945a010818" +git-tree-sha1 = "0a1b7c2863e44523180fdb3146534e265a91870b" uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.22" +version = "0.3.23" [[deps.Logging]] uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 544da23a..9521e614 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -93,17 +93,18 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: lH(v,uh) = ∫(v⋅uh)*dΩhH assem = SparseMatrixAssembler(UH,VH) - u_dir = (mode == :solution) ? interpolate(0.0,UH) : interpolate_everywhere(0.0,UH) - u,v = get_trial_fe_basis(UH), get_fe_basis(VH) - data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,0.0),u_dir) - AH,bH = assemble_matrix_and_vector(assem,data) - xH = PVector(0.0,AH.cols) - - cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, assem + u_dir = (mode == :solution) ? interpolate(0.0,UH) : interpolate_everywhere(0.0,UH) + u,v = get_trial_fe_basis(UH), get_fe_basis(VH) + data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,0.0),u_dir) + AH,bH0 = assemble_matrix_and_vector(assem,data) + xH = PVector(0.0,AH.cols) + bH = copy(bH0) + + cache_refine = model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, bH0, assem else model_h = get_model_before_redist(mh,lev) Uh = get_fe_space_before_redist(sh,lev) - cache_refine = model_h, Uh, nothing, nothing, nothing, nothing, nothing, nothing, nothing, nothing + cache_refine = model_h, Uh, nothing, nothing, nothing, nothing, nothing, nothing, nothing, nothing, nothing end return cache_refine @@ -188,12 +189,13 @@ end # B.2) Restriction, without redistribution, by projection function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{false},Val{:projection}},x::PVector) cache_refine, cache_redist = A.cache - model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, assem = cache_refine + model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, bH0, assem = cache_refine copy!(fv_h,x) # Matrix layout -> FE layout uh = FEFunction(Uh,fv_h,dv_h) v = get_fe_basis(VH) vec_data = collect_cell_vector(VH,lH(v,uh)) + copy!(bH,bH0) assemble_vector_add!(bH,assem,vec_data) # Matrix layout IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) copy!(y,xH) @@ -258,7 +260,7 @@ end # D.2) Restriction, with redistribution, by projection function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true},Val{:projection}},x::PVector) cache_refine, cache_redist = A.cache - model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, assem = cache_refine + model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, bH0, assem = cache_refine fv_h_red, dv_h_red, Uh_red, model_h_red, glue, cache_exchange = cache_redist # 1 - Redistribute from fine partition to coarse partition @@ -268,10 +270,10 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer # 2 - Solve f2c projection coarse partition if !isa(y,Nothing) - #exchange!(fv_h) uh = FEFunction(Uh,fv_h,dv_h) v = get_fe_basis(VH) vec_data = collect_cell_vector(VH,lH(v,uh)) + copy!(bH,bH0) assemble_vector_add!(bH,assem,vec_data) # Matrix layout IterativeSolvers.cg!(xH,AH,bH;reltol=1.0e-06) copy!(y,xH) From bd6d8cca1228d1f5b83be8568d707ed51f335d9e Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Wed, 22 Feb 2023 15:04:08 +1100 Subject: [PATCH 79/95] Fixed distributed Patch-Based smoothers --- .../seq/PatchBasedLinearSolvers.jl | 21 +++-- test/seq/DistributedPatchFESpacesTests.jl | 90 +++++++++++-------- 2 files changed, 69 insertions(+), 42 deletions(-) diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index 3758f434..9178178d 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -49,23 +49,27 @@ function Gridap.Algebra.numerical_setup(ss::PatchBasedSymbolicSetup,A::AbstractM Ap_ns = numerical_setup(Ap_ss,Ap) # Caches - caches = _patch_based_solver_caches(Ph,Ap) + caches = _patch_based_solver_caches(Ph,Vh,Ap) return PatchBasedSmootherNumericalSetup(ss.solver,Ap,Ap_ns,weights,caches) end -function _patch_based_solver_caches(Ph::PatchFESpace,Ap::AbstractMatrix) +function _patch_based_solver_caches(Ph::PatchFESpace,Vh::FESpace,Ap::AbstractMatrix) rp = _allocate_row_vector(Ap) dxp = _allocate_col_vector(Ap) return rp, dxp end -function _patch_based_solver_caches(Ph::GridapDistributed.DistributedSingleFieldFESpace,Ap::PSparseMatrix) +function _patch_based_solver_caches(Ph::GridapDistributed.DistributedSingleFieldFESpace, + Vh::GridapDistributed.DistributedSingleFieldFESpace, + Ap::PSparseMatrix) rp_mat = _allocate_row_vector(Ap) dxp_mat = _allocate_col_vector(Ap) rp = PVector(0.0,Ph.gids) dxp = PVector(0.0,Ph.gids) - return rp_mat, dxp_mat, rp, dxp + r = PVector(0.0,Vh.gids) + x = PVector(0.0,Vh.gids) + return rp_mat, dxp_mat, rp, dxp, r, x end function _allocate_col_vector(A::AbstractMatrix) @@ -102,18 +106,21 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::PatchBasedSmootherNumerical return x end -function Gridap.Algebra.solve!(x::PVector,ns::PatchBasedSmootherNumericalSetup,r::PVector) +function Gridap.Algebra.solve!(x_mat::PVector,ns::PatchBasedSmootherNumericalSetup,r_mat::PVector) Ap_ns, weights, caches = ns.Ap_ns, ns.weights, ns.caches Ph = ns.solver.Ph w, w_sums = weights - rp_mat, dxp_mat, rp, dxp = caches + rp_mat, dxp_mat, rp, dxp, r, x = caches + copy!(r,r_mat) + exchange!(r) prolongate!(rp,Ph,r) copy!(rp_mat,rp) solve!(dxp_mat,Ap_ns,rp_mat) copy!(dxp,dxp_mat) inject!(x,Ph,dxp,w,w_sums) + copy!(x_mat,x) - return x + return x_mat end diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index c9ee8232..dc0a7dcc 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -1,8 +1,5 @@ module DistributedPatchFESpacesTests -ENV["JULIA_MPI_BINARY"] = "system" -ENV["JULIA_MPI_PATH"] = "/usr/lib/x86_64-linux-gnu" - using LinearAlgebra using Test using PartitionedArrays @@ -12,13 +9,8 @@ using Gridap.Geometry using GridapDistributed using FillArrays -include("../../src/PatchBasedSmoothers/PatchBasedSmoothers.jl") -import .PatchBasedSmoothers as PBS - -# This is needed for assembly -include("../../src/MultilevelTools/GridapFixes.jl") - -include("../../src/LinearSolvers/RichardsonSmoothers.jl") +using GridapSolvers +import GridapSolvers.PatchBasedSmoothers as PBS backend = SequentialBackend() ranks = (1,2) @@ -34,6 +26,7 @@ Vh = TestFESpace(model,reffe) PD = PBS.PatchDecomposition(model)#,patch_boundary_style=PBS.PatchBoundaryInclude()) Ph = PBS.PatchFESpace(model,reffe,Gridap.ReferenceFEs.H1Conformity(),PD,Vh) +# ---- Testing Prolongation and Injection ---- # w, w_sums = PBS.compute_weight_operators(Ph,Vh); xP = PVector(1.0,Ph.gids) @@ -49,50 +42,77 @@ PBS.inject!(x,Ph,xP,w,w_sums) PBS.prolongate!(yP,Ph,x) @test xP ≈ yP -Ωₚ = Triangulation(PD) -dΩₚ = Measure(Ωₚ,2*order+1) -a(u,v) = ∫(∇(v)⋅∇(u))*dΩₚ -l(v) = ∫(1*v)*dΩₚ +# ---- Assemble systems ---- # +Ω = Triangulation(model) +dΩ = Measure(Ω,2*order+1) +a(u,v) = ∫(v⋅u)*dΩ +l(v) = ∫(1*v)*dΩ assembler = SparseMatrixAssembler(Vh,Vh) Ah = assemble_matrix(a,assembler,Vh,Vh) fh = assemble_vector(l,assembler,Vh) -M = PBS.PatchBasedLinearSolver(a,Ph,Vh,LUSolver()) -R = RichardsonSmoother(M,10,1.0/3.0) -Rss = symbolic_setup(R,Ah) -Rns = numerical_setup(Rss,Ah) +sol_h = solve(LUSolver(),Ah,fh) -x = PBS._allocate_col_vector(Ah) -r = fh-Ah*x -exchange!(r) -solve!(x,Rns,r) +Ωₚ = Triangulation(PD) +dΩₚ = Measure(Ωₚ,2*order+1) +ap(u,v) = ∫(v⋅u)*dΩₚ +lp(v) = ∫(1*v)*dΩₚ + +assembler_P = SparseMatrixAssembler(Ph,Ph) +Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) +fhp = assemble_vector(lp,assembler_P,Ph) +# ---- Define solvers ---- # +LU = LUSolver() +LUss = symbolic_setup(LU,Ahp) +LUns = numerical_setup(LUss,Ahp) + +M = PBS.PatchBasedLinearSolver(ap,Ph,Vh,LU) Mss = symbolic_setup(M,Ah) Mns = numerical_setup(Mss,Ah) -solve!(x,Mns,r) -assembler_P = SparseMatrixAssembler(Ph,Ph) -Ahp = assemble_matrix(a,assembler_P,Ph,Ph) -fhp = assemble_vector(l,assembler_P,Ph) +R = RichardsonSmoother(M,10,1.0/3.0) +Rss = symbolic_setup(R,Ah) +Rns = numerical_setup(Rss,Ah) -lu = LUSolver() -luss = symbolic_setup(lu,Ahp) -luns = numerical_setup(luss,Ahp) +# ---- Manual solve using LU ---- # -rp = PVector(0.0,Ph.gids) -PBS.prolongate!(rp,Ph,r) +x1_mat = PVector(0.5,Ah.cols) +r1_mat = fh-Ah*x1_mat +exchange!(r1_mat) +r1 = PVector(0.0,Vh.gids) +x1 = PVector(0.0,Vh.gids) +rp = PVector(0.0,Ph.gids) +xp = PVector(0.0,Ph.gids) rp_mat = PVector(0.0,Ahp.cols) -copy!(rp_mat,rp) xp_mat = PVector(0.0,Ahp.cols) -solve!(xp_mat,luns,rp_mat) +copy!(r1,r1_mat) +exchange!(r1) +PBS.prolongate!(rp,Ph,r1) -xp = PVector(0.0,Ph.gids) +copy!(rp_mat,rp) +solve!(xp_mat,LUns,rp_mat) copy!(xp,xp_mat) w, w_sums = PBS.compute_weight_operators(Ph,Vh); -PBS.inject!(x,Ph,xp,w,w_sums) +PBS.inject!(x1,Ph,xp,w,w_sums) +copy!(x1_mat,x1) + +# ---- Same using the PatchBasedSmoother ---- # + +x2_mat = PVector(0.5,Ah.cols) +r2_mat = fh-Ah*x2_mat +exchange!(r2_mat) +solve!(x2_mat,Mns,r2_mat) + +# ---- Smoother inside Richardson +x3_mat = PVector(0.5,Ah.cols) +r3_mat = fh-Ah*x3_mat +exchange!(r3_mat) +solve!(x3_mat,Rns,r3_mat) +exchange!(x3_mat) end \ No newline at end of file From 7b73cf44f57854ee13ca66f42f51842b43fd8737 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 23 Feb 2023 14:33:01 +1100 Subject: [PATCH 80/95] Bugfix: TransferOps not working for VectorValued FEFunctions --- .../DistributedGridTransferOperators.jl | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/src/MultilevelTools/DistributedGridTransferOperators.jl b/src/MultilevelTools/DistributedGridTransferOperators.jl index 9521e614..d991bed6 100644 --- a/src/MultilevelTools/DistributedGridTransferOperators.jl +++ b/src/MultilevelTools/DistributedGridTransferOperators.jl @@ -79,7 +79,7 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: model_h = get_model_before_redist(mh,lev) Uh = get_fe_space_before_redist(sh,lev) Ωh = Triangulation(model_h) - fv_h = PVector(0.0,Uh.gids) + fv_h = zero_free_values(Uh) dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh) model_H = get_model(mh,lev+1) @@ -92,10 +92,15 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode:: aH(u,v) = ∫(v⋅u)*dΩH lH(v,uh) = ∫(v⋅uh)*dΩhH assem = SparseMatrixAssembler(UH,VH) - - u_dir = (mode == :solution) ? interpolate(0.0,UH) : interpolate_everywhere(0.0,UH) + + fv_H = zero_free_values(UH) + dv_H = zero_dirichlet_values(UH) + u0 = FEFunction(UH,fv_H,true) # Zero at free dofs + u00 = FEFunction(UH,fv_H,dv_H,true) # Zero everywhere + + u_dir = (mode == :solution) ? u0 : u00 u,v = get_trial_fe_basis(UH), get_fe_basis(VH) - data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,0.0),u_dir) + data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,u00),u_dir) AH,bH0 = assemble_matrix_and_vector(assem,data) xH = PVector(0.0,AH.cols) bH = copy(bH0) From c64b505c5ea91a94b5ae44d400140625a620e98f Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 23 Feb 2023 14:33:36 +1100 Subject: [PATCH 81/95] Added PatchBased methods for multilevel --- .../PatchBasedSmoothers.jl | 2 ++ .../mpi/PatchDecompositions.jl | 23 +++++++++++--- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 30 +++++++++++++++---- 3 files changed, 46 insertions(+), 9 deletions(-) diff --git a/src/PatchBasedSmoothers/PatchBasedSmoothers.jl b/src/PatchBasedSmoothers/PatchBasedSmoothers.jl index dcaf0d6c..e6a03299 100644 --- a/src/PatchBasedSmoothers/PatchBasedSmoothers.jl +++ b/src/PatchBasedSmoothers/PatchBasedSmoothers.jl @@ -12,6 +12,8 @@ using Gridap.FESpaces using PartitionedArrays using GridapDistributed +using GridapSolvers.MultilevelTools + export PatchDecomposition export PatchFESpace export PatchBasedLinearSolver diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl index 5075c697..79a44488 100644 --- a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -4,24 +4,39 @@ struct DistributedPatchDecomposition{Dc,Dp,A,B} <: GridapType model::B end -function PatchDecomposition(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}; +GridapDistributed.local_views(a::DistributedPatchDecomposition) = a.patch_decompositions + +function PatchDecomposition(model::GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp}; Dr=0, patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} - patch_decompositions = map_parts(model.models) do lmodel + patch_decompositions = map_parts(local_views(model)) do lmodel PatchDecomposition(lmodel; Dr=Dr, patch_boundary_style=patch_boundary_style) end A = typeof(patch_decompositions) B = typeof(model) - DistributedPatchDecomposition{Dc,Dp,A,B}(patch_decompositions,model) + return DistributedPatchDecomposition{Dc,Dp,A,B}(patch_decompositions,model) +end + +function PatchDecomposition(mh::ModelHierarchy;kwargs...) + nlevs = num_levels(mh) + decompositions = Vector{DistributedPatchDecomposition}(undef,nlevs-1) + for lev in 1:nlevs-1 + parts = get_level_parts(mh,lev) + if i_am_in(parts) + model = get_model(mh,lev) + decompositions[lev] = PatchDecomposition(model;kwargs...) + end + end + return decompositions end function Gridap.Geometry.Triangulation(a::DistributedPatchDecomposition) trians = map_parts(a.patch_decompositions) do a Triangulation(a) end - GridapDistributed.DistributedTriangulation(trians,a.model) + return GridapDistributed.DistributedTriangulation(trians,a.model) end function get_patch_root_dim(a::DistributedPatchDecomposition) diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index cdd0c40d..2278eb52 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -7,7 +7,7 @@ # 3. Each processor needs to know how many patches "touch" its owned DoFs. # This requires NO->O communication as well. [PENDING] -function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, +function PatchFESpace(model::GridapDistributed.AbstractDistributedDiscreteModel, reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}, conformity::Gridap.FESpaces.Conformity, patch_decomposition::DistributedPatchDecomposition, @@ -26,12 +26,12 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, end spaces = map_parts(f, - model.models, - patch_decomposition.patch_decompositions, - Vh.spaces, + local_views(model), + local_views(patch_decomposition), + local_views(Vh), root_gids.partition) - parts = get_part_ids(model.models) + parts = get_parts(model) nodofs = map_parts(spaces) do space num_free_dofs(space) end @@ -43,6 +43,26 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,get_vector_type(Vh)) end +function PatchFESpace(mh::ModelHierarchy, + reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}, + conformity::Gridap.FESpaces.Conformity, + patch_decompositions::AbstractArray{<:DistributedPatchDecomposition}, + sh::FESpaceHierarchy) + nlevs = num_levels(mh) + levels = Vector{MultilevelTools.FESpaceHierarchyLevel}(undef,nlevs) + for lev in 1:nlevs-1 + parts = get_level_parts(mh,lev) + if i_am_in(parts) + model = get_model(mh,lev) + space = MultilevelTools.get_fe_space(sh,lev) + decomp = patch_decompositions[lev] + patch_space = PatchFESpace(model,reffe,conformity,decomp,space) + levels[lev] = MultilevelTools.FESpaceHierarchyLevel(lev,nothing,patch_space) + end + end + return FESpaceHierarchy(mh,levels) +end + # x \in PatchFESpace # y \in SingleFESpace function prolongate!(x::PVector, From 17b2c132094b172ffec3806891cca4e5d5bbfad8 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Thu, 23 Feb 2023 14:34:30 +1100 Subject: [PATCH 82/95] Adapted HDiv test to new patch based solvers --- test/mpi/GMGLinearSolversHDivRTTests.jl | 34 ++++++++++++++++++++++--- 1 file changed, 30 insertions(+), 4 deletions(-) diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index 1a9bff6f..f9513dd7 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -13,11 +13,33 @@ using GridapP4est using GridapSolvers using GridapSolvers.LinearSolvers +using GridapSolvers.MultilevelTools +using GridapSolvers.PatchBasedSmoothers u(x) = VectorValue(x[1],x[2]) f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) +function get_patch_smoothers(tests,patch_spaces,patch_decompositions,biform,qdegree) + mh = tests.mh + nlevs = num_levels(mh) + smoothers = Vector{RichardsonSmoother}(undef,nlevs-1) + for lev in 1:nlevs-1 + parts = get_level_parts(mh,lev) + if i_am_in(parts) + PD = patch_decompositions[lev] + Ph = get_fe_space(patch_spaces,lev) + Vh = get_fe_space(tests,lev) + Ω = Triangulation(PD) + dΩ = Measure(Ω,qdegree) + a(u,v) = biform(u,v,dΩ) + patch_smoother = PatchBasedLinearSolver(a,Ph,Vh,LUSolver()) + smoothers[lev] = RichardsonSmoother(patch_smoother,1,1.0/3.0) + end + end + return smoothers +end + function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, order, α) GridapP4est.with(parts) do domain = (0,1,0,1) @@ -32,12 +54,15 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, tests = TestFESpace(mh,reffe;dirichlet_tags="boundary") trials = TrialFESpace(tests,u) + patch_decompositions = PatchDecomposition(mh) + patch_spaces = PatchFESpace(mh,reffe,DivConformity(),patch_decompositions,tests) + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ liform(v,dΩ) = ∫(v⋅f)dΩ smatrices, A, b = compute_hierarchy_matrices(trials,biform,liform,qdegree) # Preconditioner - smoothers = Fill(RichardsonSmoother(JacobiLinearSolver(),10,2.0/3.0),num_levels-1) + smoothers = get_patch_smoothers(tests,patch_spaces,patch_decompositions,biform,qdegree) restrictions, prolongations = setup_transfer_operators(trials,qdegree;mode=:residual) gmg = GMGLinearSolver(mh, @@ -57,9 +82,10 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, x = PVector(0.0,A.cols) x, history = IterativeSolvers.cg!(x,A,b; verbose=i_am_main(parts), - reltol=1.0e-12, + reltol=1.0e-8, Pl=ns, - log=true) + log=true, + maxiter=10) # Error norms and print solution model = get_model(mh,1) @@ -93,7 +119,7 @@ num_refs_coarse = 2 α = 1.0 num_parts_x_level = [4,2,1] ranks = num_parts_x_level[1] -#num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) +num_iters, num_free_dofs2 = with_backend(main,MPIBackend(),ranks,coarse_grid_partition,num_parts_x_level,num_refs_coarse,order,α) """ From 3f5077dfc669b860a4d4d8622a1c7a5d45313d00 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Mon, 27 Feb 2023 17:00:10 +1100 Subject: [PATCH 83/95] Updated to GridapDistributed v0.2.7 --- Manifest.toml | 36 +++++++++---------- Project.toml | 2 ++ .../GridapDistributedExtensions.jl | 4 +-- src/MultilevelTools/ModelHierarchies.jl | 10 +++--- 4 files changed, 25 insertions(+), 27 deletions(-) diff --git a/Manifest.toml b/Manifest.toml index 84845db3..98105560 100644 --- a/Manifest.toml +++ b/Manifest.toml @@ -2,7 +2,7 @@ julia_version = "1.8.5" manifest_format = "2.0" -project_hash = "fd265c84f39675a275e4824c8107d86c61971692" +project_hash = "6e1b2e88a25d8cf834bc00f13db13fe1d85e9c3d" [[deps.AbstractFFTs]] deps = ["ChainRulesCore", "LinearAlgebra"] @@ -117,9 +117,9 @@ version = "1.0.1+0" [[deps.ConstructionBase]] deps = ["LinearAlgebra"] -git-tree-sha1 = "fb21ddd70a051d882a1686a5a550990bbe371a95" +git-tree-sha1 = "89a9db8d28102b094992472d333674bd1a83ce2a" uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" -version = "1.4.1" +version = "1.5.1" [[deps.DataStructures]] deps = ["Compat", "InteractiveUtils", "OrderedCollections"] @@ -139,9 +139,9 @@ version = "1.1.0" [[deps.DiffRules]] deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "c5b6685d53f933c11404a3ae9822afe30d522494" +git-tree-sha1 = "a4ad7ef19d2cdc2eff57abbbe68032b1cd0bd8f8" uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.12.2" +version = "1.13.0" [[deps.Distances]] deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"] @@ -205,9 +205,9 @@ version = "2.18.0" [[deps.ForwardDiff]] deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] -git-tree-sha1 = "a69dd6db8a809f78846ff259298678f0d6212180" +git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d" uuid = "f6369f11-7733-5829-9624-2563aa707210" -version = "0.10.34" +version = "0.10.35" [[deps.Future]] deps = ["Random"] @@ -215,23 +215,19 @@ uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" [[deps.Gridap]] deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DataStructures", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "ddd11e52330f755829ccc36e40c50d98a400c42c" -repo-rev = "adaptivity" -repo-url = "https://github.com/gridap/Gridap.jl.git" +git-tree-sha1 = "31eb81cdfba7c5d1e00e70891b3257719a6237b4" uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" -version = "0.17.16" +version = "0.17.17" [[deps.GridapDistributed]] deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "0cd6de7e550a07ae3a51aff738cc53ce3055753d" -repo-rev = "p4est-migration" -repo-url = "https://github.com/gridap/GridapDistributed.jl.git" +git-tree-sha1 = "180748d8c92eb66a2c67efab53b617e2dd154ca6" uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" -version = "0.2.6" +version = "0.2.7" [[deps.GridapP4est]] deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "Libdl", "MPI", "P4est_wrapper", "PartitionedArrays", "Test"] -git-tree-sha1 = "6bae326892fa48e44633cf948f8b7cba8d4a92b0" +git-tree-sha1 = "f3df9c0476d172c8fe0dda27ed71d87df03fec92" repo-rev = "p4est-migration" repo-url = "https://github.com/gridap/GridapP4est.jl.git" uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" @@ -260,9 +256,9 @@ uuid = "3587e190-3f89-42d0-90ee-14403ec27112" version = "0.1.8" [[deps.IrrationalConstants]] -git-tree-sha1 = "7fd44fd4ff43fc60815f8e764c0f352b83c49151" +git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2" uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" -version = "0.1.1" +version = "0.2.2" [[deps.IterativeSolvers]] deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"] @@ -622,9 +618,9 @@ version = "0.6.7" [[deps.SpecialFunctions]] deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] -git-tree-sha1 = "d75bda01f8c31ebb72df80a46c88b25d1c79c56d" +git-tree-sha1 = "ef28127915f4229c971eb43f3fc075dd3fe91880" uuid = "276daf66-3868-5448-9aa4-cd146d93841b" -version = "2.1.7" +version = "2.2.0" [[deps.StaticArrays]] deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] diff --git a/Project.toml b/Project.toml index 50de03a2..61de76a3 100644 --- a/Project.toml +++ b/Project.toml @@ -17,6 +17,8 @@ PartitionedArrays = "5a9dfac6-5c52-46f7-8278-5e2210713be9" Printf = "de0858da-6303-5e67-8744-51eddeeeb8d7" [compat] +Gridap = "0.17.17" +GridapDistributed = "0.2.7" PartitionedArrays = "0.2.15" julia = "1.7" diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index 0f3d58a1..b7e163a0 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -45,7 +45,7 @@ end # Void GridapDistributed structures -struct VoidDistributedDiscreteModel{Dc,Dp,A} <: GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp} +struct VoidDistributedDiscreteModel{Dc,Dp,A} <: GridapDistributed.DistributedDiscreteModel{Dc,Dp} parts::A function VoidDistributedDiscreteModel(Dc::Int,Dp::Int,parts) A = typeof(parts) @@ -53,7 +53,7 @@ struct VoidDistributedDiscreteModel{Dc,Dp,A} <: GridapDistributed.AbstractDistri end end -function VoidDistributedDiscreteModel(model::GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} +function VoidDistributedDiscreteModel(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} return VoidDistributedDiscreteModel(Dc,Dp,get_parts(model)) end diff --git a/src/MultilevelTools/ModelHierarchies.jl b/src/MultilevelTools/ModelHierarchies.jl index 9403e189..8c50802c 100644 --- a/src/MultilevelTools/ModelHierarchies.jl +++ b/src/MultilevelTools/ModelHierarchies.jl @@ -51,7 +51,7 @@ has_refinement(a::ModelHierarchyLevel{A,Nothing}) where A = false the number of parts of `model`. """ function ModelHierarchy(root_parts ::AbstractPData, - model ::GridapDistributed.AbstractDistributedDiscreteModel, + model ::GridapDistributed.DistributedDiscreteModel, num_procs_x_level ::Vector{<:Integer}; mesh_refinement = true, kwargs...) @@ -81,7 +81,7 @@ function ModelHierarchy(root_parts ::AbstractPData, end function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractPData, - bottom_model::GridapDistributed.AbstractDistributedDiscreteModel, + bottom_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}) num_levels = length(num_procs_x_level) level_parts = Vector{typeof(root_parts)}(undef,num_levels) @@ -107,7 +107,7 @@ function _model_hierarchy_without_refinement_bottom_up(root_parts::AbstractPData end function _model_hierarchy_without_refinement_top_down(root_parts::AbstractPData, - top_model::GridapDistributed.AbstractDistributedDiscreteModel, + top_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}) num_levels = length(num_procs_x_level) level_parts = Vector{typeof(root_parts)}(undef,num_levels) @@ -133,7 +133,7 @@ function _model_hierarchy_without_refinement_top_down(root_parts::AbstractPData, end function _model_hierarchy_by_refinement(root_parts::AbstractPData, - coarsest_model::GridapDistributed.AbstractDistributedDiscreteModel, + coarsest_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}; num_refs_x_level=nothing) # TODO: Implement support for num_refs_x_level? (future work) @@ -165,7 +165,7 @@ function _model_hierarchy_by_refinement(root_parts::AbstractPData, end function _model_hierarchy_by_coarsening(root_parts::AbstractPData, - finest_model::GridapDistributed.AbstractDistributedDiscreteModel, + finest_model::GridapDistributed.DistributedDiscreteModel, num_procs_x_level::Vector{<:Integer}; num_refs_x_level=nothing) # TODO: Implement support for num_refs_x_level? (future work) From 31f58b96566aceee40993a3e8086894e32806068 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 28 Feb 2023 12:23:11 +1100 Subject: [PATCH 84/95] Fix Project.toml --- Project.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Project.toml b/Project.toml index 02bd1f70..c4c8c073 100644 --- a/Project.toml +++ b/Project.toml @@ -3,10 +3,6 @@ name = "GridapSolvers" uuid = "6d3209ee-5e3c-4db7-a716-942eb12ed534" version = "0.1.0" -[compat] -PartitionedArrays = "0.2.15" -julia = "1.7" - [deps] ArgParse = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" FillArrays = "1a297f60-69ca-5386-bcde-b61e274b549b" From 0fb4b121ae4a71a11682a98ae7d73e5fe1c405da Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 28 Feb 2023 12:26:16 +1100 Subject: [PATCH 85/95] Updated to GridapDistributed 0.2.7 --- src/PatchBasedSmoothers/mpi/PatchDecompositions.jl | 2 +- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl index 79a44488..20b45e7c 100644 --- a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -6,7 +6,7 @@ end GridapDistributed.local_views(a::DistributedPatchDecomposition) = a.patch_decompositions -function PatchDecomposition(model::GridapDistributed.AbstractDistributedDiscreteModel{Dc,Dp}; +function PatchDecomposition(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}; Dr=0, patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} patch_decompositions = map_parts(local_views(model)) do lmodel diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 2278eb52..24232d87 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -7,7 +7,7 @@ # 3. Each processor needs to know how many patches "touch" its owned DoFs. # This requires NO->O communication as well. [PENDING] -function PatchFESpace(model::GridapDistributed.AbstractDistributedDiscreteModel, +function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any}, conformity::Gridap.FESpaces.Conformity, patch_decomposition::DistributedPatchDecomposition, From ee6d28ec183f440b47b350bc0c463136989f24d7 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Tue, 28 Feb 2023 16:58:58 +1100 Subject: [PATCH 86/95] Removed Manifest.toml --- .gitignore | 1 + Manifest.toml | 728 -------------------------------------------------- 2 files changed, 1 insertion(+), 728 deletions(-) delete mode 100644 Manifest.toml diff --git a/.gitignore b/.gitignore index 722d5e71..f8509162 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,2 @@ .vscode +Manifest.toml diff --git a/Manifest.toml b/Manifest.toml deleted file mode 100644 index 98105560..00000000 --- a/Manifest.toml +++ /dev/null @@ -1,728 +0,0 @@ -# This file is machine-generated - editing it directly is not advised - -julia_version = "1.8.5" -manifest_format = "2.0" -project_hash = "6e1b2e88a25d8cf834bc00f13db13fe1d85e9c3d" - -[[deps.AbstractFFTs]] -deps = ["ChainRulesCore", "LinearAlgebra"] -git-tree-sha1 = "69f7020bd72f069c219b5e8c236c1fa90d2cb409" -uuid = "621f4979-c628-5d54-868e-fcf4e3e8185c" -version = "1.2.1" - -[[deps.AbstractTrees]] -git-tree-sha1 = "faa260e4cb5aba097a73fab382dd4b5819d8ec8c" -uuid = "1520ce14-60c1-5f80-bbc7-55ef81b5835c" -version = "0.4.4" - -[[deps.Adapt]] -deps = ["LinearAlgebra"] -git-tree-sha1 = "0310e08cb19f5da31d08341c6120c047598f5b9c" -uuid = "79e6a3ab-5dfb-504d-930d-738a2a938a0e" -version = "3.5.0" - -[[deps.ArgCheck]] -git-tree-sha1 = "a3a402a35a2f7e0b87828ccabbd5ebfbebe356b4" -uuid = "dce04be8-c92d-5529-be00-80e4d2c0e197" -version = "2.3.0" - -[[deps.ArgParse]] -deps = ["Logging", "TextWrap"] -git-tree-sha1 = "3102bce13da501c9104df33549f511cd25264d7d" -uuid = "c7e460c6-2fb9-53a9-8c5b-16f535851c63" -version = "1.1.4" - -[[deps.ArgTools]] -uuid = "0dad84c5-d112-42e6-8d28-ef12dabb789f" -version = "1.1.1" - -[[deps.ArrayInterface]] -deps = ["Adapt", "LinearAlgebra", "Requires", "SnoopPrecompile", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "4d9946e51e24f5e509779e3e2c06281a733914c2" -uuid = "4fba245c-0d91-5ea0-9b3e-6abc04ee57a9" -version = "7.1.0" - -[[deps.ArrayLayouts]] -deps = ["FillArrays", "LinearAlgebra", "SparseArrays"] -git-tree-sha1 = "56c347caf09ad8acb3e261fe75f8e09652b7b05b" -uuid = "4c555306-a7a7-4459-81d9-ec55ddd5c99a" -version = "0.7.10" - -[[deps.Artifacts]] -uuid = "56f22d72-fd6d-98f1-02f0-08ddc0907c33" - -[[deps.AutoHashEquals]] -git-tree-sha1 = "45bb6705d93be619b81451bb2006b7ee5d4e4453" -uuid = "15f4f7f2-30c1-5605-9d31-71845cf9641f" -version = "0.2.0" - -[[deps.BSON]] -git-tree-sha1 = "86e9781ac28f4e80e9b98f7f96eae21891332ac2" -uuid = "fbb218c0-5317-5bc6-957e-2ee96dd4b1f0" -version = "0.3.6" - -[[deps.Base64]] -uuid = "2a0f44e3-6c83-55bd-87e4-b1978d98bd5f" - -[[deps.BlockArrays]] -deps = ["ArrayLayouts", "FillArrays", "LinearAlgebra"] -git-tree-sha1 = "21490270d1fcf2efa9ddb2126d6958e9b72a4db0" -uuid = "8e7c35d0-a365-5155-bbbb-fb81a777f24e" -version = "0.16.11" - -[[deps.CEnum]] -git-tree-sha1 = "eb4cb44a499229b3b8426dcfb5dd85333951ff90" -uuid = "fa961155-64e5-5f13-b03f-caf6b980ea82" -version = "0.4.2" - -[[deps.ChainRulesCore]] -deps = ["Compat", "LinearAlgebra", "SparseArrays"] -git-tree-sha1 = "c6d890a52d2c4d55d326439580c3b8d0875a77d9" -uuid = "d360d2e6-b24c-11e9-a2a3-2a2ae2dbcce4" -version = "1.15.7" - -[[deps.ChangesOfVariables]] -deps = ["ChainRulesCore", "LinearAlgebra", "Test"] -git-tree-sha1 = "485193efd2176b88e6622a39a246f8c5b600e74e" -uuid = "9e997f8a-9a97-42d5-a9f1-ce6bfc15e2c0" -version = "0.1.6" - -[[deps.CodecZlib]] -deps = ["TranscodingStreams", "Zlib_jll"] -git-tree-sha1 = "9c209fb7536406834aa938fb149964b985de6c83" -uuid = "944b1d66-785c-5afd-91f1-9de20f533193" -version = "0.7.1" - -[[deps.Combinatorics]] -git-tree-sha1 = "08c8b6831dc00bfea825826be0bc8336fc369860" -uuid = "861a8166-3701-5b0c-9a16-15d98fcdc6aa" -version = "1.0.2" - -[[deps.CommonSubexpressions]] -deps = ["MacroTools", "Test"] -git-tree-sha1 = "7b8a93dba8af7e3b42fecabf646260105ac373f7" -uuid = "bbf7d656-a473-5ed7-a52c-81e309532950" -version = "0.3.0" - -[[deps.Compat]] -deps = ["Dates", "LinearAlgebra", "UUIDs"] -git-tree-sha1 = "61fdd77467a5c3ad071ef8277ac6bd6af7dd4c04" -uuid = "34da2185-b29b-5c13-b0c7-acf172513d20" -version = "4.6.0" - -[[deps.CompilerSupportLibraries_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "e66e0078-7015-5450-92f7-15fbd957f2ae" -version = "1.0.1+0" - -[[deps.ConstructionBase]] -deps = ["LinearAlgebra"] -git-tree-sha1 = "89a9db8d28102b094992472d333674bd1a83ce2a" -uuid = "187b0558-2788-49d3-abe0-74a17ed4e7c9" -version = "1.5.1" - -[[deps.DataStructures]] -deps = ["Compat", "InteractiveUtils", "OrderedCollections"] -git-tree-sha1 = "d1fff3a548102f48987a52a2e0d114fa97d730f0" -uuid = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8" -version = "0.18.13" - -[[deps.Dates]] -deps = ["Printf"] -uuid = "ade2ca70-3891-5945-98fb-dc099432e06a" - -[[deps.DiffResults]] -deps = ["StaticArraysCore"] -git-tree-sha1 = "782dd5f4561f5d267313f23853baaaa4c52ea621" -uuid = "163ba53b-c6d8-5494-b064-1a9d43ac40c5" -version = "1.1.0" - -[[deps.DiffRules]] -deps = ["IrrationalConstants", "LogExpFunctions", "NaNMath", "Random", "SpecialFunctions"] -git-tree-sha1 = "a4ad7ef19d2cdc2eff57abbbe68032b1cd0bd8f8" -uuid = "b552c78f-8df3-52c6-915a-8e097449b14b" -version = "1.13.0" - -[[deps.Distances]] -deps = ["LinearAlgebra", "SparseArrays", "Statistics", "StatsAPI"] -git-tree-sha1 = "3258d0659f812acde79e8a74b11f17ac06d0ca04" -uuid = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7" -version = "0.10.7" - -[[deps.Distributed]] -deps = ["Random", "Serialization", "Sockets"] -uuid = "8ba89e20-285c-5b6f-9357-94700520ee1b" - -[[deps.DocStringExtensions]] -deps = ["LibGit2"] -git-tree-sha1 = "b19534d1895d702889b219c382a6e18010797f0b" -uuid = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae" -version = "0.8.6" - -[[deps.Downloads]] -deps = ["ArgTools", "FileWatching", "LibCURL", "NetworkOptions"] -uuid = "f43a241f-c20a-4ad4-852c-f6b1247861c6" -version = "1.6.0" - -[[deps.FFTW]] -deps = ["AbstractFFTs", "FFTW_jll", "LinearAlgebra", "MKL_jll", "Preferences", "Reexport"] -git-tree-sha1 = "90630efff0894f8142308e334473eba54c433549" -uuid = "7a1cc6ca-52ef-59f5-83cd-3a7055c09341" -version = "1.5.0" - -[[deps.FFTW_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "c6033cc3892d0ef5bb9cd29b7f2f0331ea5184ea" -uuid = "f5851436-0d7a-5f13-b9de-f02708fd171a" -version = "3.3.10+0" - -[[deps.FastGaussQuadrature]] -deps = ["LinearAlgebra", "SpecialFunctions", "StaticArrays"] -git-tree-sha1 = "58d83dd5a78a36205bdfddb82b1bb67682e64487" -uuid = "442a2c76-b920-505d-bb47-c5924d526838" -version = "0.4.9" - -[[deps.FileIO]] -deps = ["Pkg", "Requires", "UUIDs"] -git-tree-sha1 = "7be5f99f7d15578798f338f5433b6c432ea8037b" -uuid = "5789e2e9-d7fb-5bc7-8068-2c6fae9b9549" -version = "1.16.0" - -[[deps.FileWatching]] -uuid = "7b1f6079-737a-58dc-b8bc-7a2ca5c1b5ee" - -[[deps.FillArrays]] -deps = ["LinearAlgebra", "Random", "SparseArrays", "Statistics"] -git-tree-sha1 = "deed294cde3de20ae0b2e0355a6c4e1c6a5ceffc" -uuid = "1a297f60-69ca-5386-bcde-b61e274b549b" -version = "0.12.8" - -[[deps.FiniteDiff]] -deps = ["ArrayInterface", "LinearAlgebra", "Requires", "Setfield", "SparseArrays", "StaticArrays"] -git-tree-sha1 = "ed1b56934a2f7a65035976985da71b6a65b4f2cf" -uuid = "6a86dc24-6348-571c-b903-95158fe2bd41" -version = "2.18.0" - -[[deps.ForwardDiff]] -deps = ["CommonSubexpressions", "DiffResults", "DiffRules", "LinearAlgebra", "LogExpFunctions", "NaNMath", "Preferences", "Printf", "Random", "SpecialFunctions", "StaticArrays"] -git-tree-sha1 = "00e252f4d706b3d55a8863432e742bf5717b498d" -uuid = "f6369f11-7733-5829-9624-2563aa707210" -version = "0.10.35" - -[[deps.Future]] -deps = ["Random"] -uuid = "9fa8497b-333b-5362-9e8d-4d0656e87820" - -[[deps.Gridap]] -deps = ["AbstractTrees", "BSON", "BlockArrays", "Combinatorics", "DataStructures", "DocStringExtensions", "FastGaussQuadrature", "FileIO", "FillArrays", "ForwardDiff", "JLD2", "JSON", "LineSearches", "LinearAlgebra", "NLsolve", "NearestNeighbors", "PolynomialBases", "QuadGK", "Random", "SparseArrays", "SparseMatricesCSR", "StaticArrays", "Test", "WriteVTK"] -git-tree-sha1 = "31eb81cdfba7c5d1e00e70891b3257719a6237b4" -uuid = "56d4f2e9-7ea1-5844-9cf6-b9c51ca7ce8e" -version = "0.17.17" - -[[deps.GridapDistributed]] -deps = ["FillArrays", "Gridap", "LinearAlgebra", "MPI", "PartitionedArrays", "SparseArrays", "SparseMatricesCSR", "WriteVTK"] -git-tree-sha1 = "180748d8c92eb66a2c67efab53b617e2dd154ca6" -uuid = "f9701e48-63b3-45aa-9a63-9bc6c271f355" -version = "0.2.7" - -[[deps.GridapP4est]] -deps = ["ArgParse", "FillArrays", "Gridap", "GridapDistributed", "Libdl", "MPI", "P4est_wrapper", "PartitionedArrays", "Test"] -git-tree-sha1 = "f3df9c0476d172c8fe0dda27ed71d87df03fec92" -repo-rev = "p4est-migration" -repo-url = "https://github.com/gridap/GridapP4est.jl.git" -uuid = "c2c8e14b-f5fd-423d-9666-1dd9ad120af9" -version = "0.1.3" - -[[deps.GridapPETSc]] -deps = ["Gridap", "GridapDistributed", "Libdl", "LinearAlgebra", "MPI", "PETSc_jll", "PartitionedArrays", "Random", "SparseArrays", "SparseMatricesCSR"] -git-tree-sha1 = "e49b0ed48134534e8faf1ebfc30a62852cbaa00e" -uuid = "bcdc36c2-0c3e-11ea-095a-c9dadae499f1" -version = "0.4.4" - -[[deps.IntelOpenMP_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "d979e54b71da82f3a65b62553da4fc3d18c9004c" -uuid = "1d5cc7b8-4909-519e-a0f8-d0f5ad9712d0" -version = "2018.0.3+2" - -[[deps.InteractiveUtils]] -deps = ["Markdown"] -uuid = "b77e0a4c-d291-57a0-90e8-8db25a27a240" - -[[deps.InverseFunctions]] -deps = ["Test"] -git-tree-sha1 = "49510dfcb407e572524ba94aeae2fced1f3feb0f" -uuid = "3587e190-3f89-42d0-90ee-14403ec27112" -version = "0.1.8" - -[[deps.IrrationalConstants]] -git-tree-sha1 = "630b497eafcc20001bba38a4651b327dcfc491d2" -uuid = "92d709cd-6900-40b7-9082-c6be49f344b6" -version = "0.2.2" - -[[deps.IterativeSolvers]] -deps = ["LinearAlgebra", "Printf", "Random", "RecipesBase", "SparseArrays"] -git-tree-sha1 = "1169632f425f79429f245113b775a0e3d121457c" -uuid = "42fd0dbc-a981-5370-80f2-aaf504508153" -version = "0.9.2" - -[[deps.JLD2]] -deps = ["FileIO", "MacroTools", "Mmap", "OrderedCollections", "Pkg", "Printf", "Reexport", "TranscodingStreams", "UUIDs"] -git-tree-sha1 = "c3244ef42b7d4508c638339df1bdbf4353e144db" -uuid = "033835bb-8acc-5ee8-8aae-3f567f8a3819" -version = "0.4.30" - -[[deps.JLLWrappers]] -deps = ["Preferences"] -git-tree-sha1 = "abc9885a7ca2052a736a600f7fa66209f96506e1" -uuid = "692b3bcd-3c85-4b1f-b108-f13ce0eb3210" -version = "1.4.1" - -[[deps.JSON]] -deps = ["Dates", "Mmap", "Parsers", "Unicode"] -git-tree-sha1 = "3c837543ddb02250ef42f4738347454f95079d4e" -uuid = "682c06a0-de6a-54ab-a142-c8b1cf79cde6" -version = "0.21.3" - -[[deps.LLVMOpenMP_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "f689897ccbe049adb19a065c495e75f372ecd42b" -uuid = "1d63c593-3942-5779-bab2-d838dc0a180e" -version = "15.0.4+0" - -[[deps.LazyArtifacts]] -deps = ["Artifacts", "Pkg"] -uuid = "4af54fe1-eca0-43a8-85a7-787d91b784e3" - -[[deps.LibCURL]] -deps = ["LibCURL_jll", "MozillaCACerts_jll"] -uuid = "b27032c2-a3e7-50c8-80cd-2d36dbcbfd21" -version = "0.6.3" - -[[deps.LibCURL_jll]] -deps = ["Artifacts", "LibSSH2_jll", "Libdl", "MbedTLS_jll", "Zlib_jll", "nghttp2_jll"] -uuid = "deac9b47-8bc7-5906-a0fe-35ac56dc84c0" -version = "7.84.0+0" - -[[deps.LibGit2]] -deps = ["Base64", "NetworkOptions", "Printf", "SHA"] -uuid = "76f85450-5226-5b5a-8eaa-529ad045b433" - -[[deps.LibSSH2_jll]] -deps = ["Artifacts", "Libdl", "MbedTLS_jll"] -uuid = "29816b5a-b9ab-546f-933c-edad1886dfa8" -version = "1.10.2+0" - -[[deps.Libdl]] -uuid = "8f399da3-3557-5675-b5ff-fb832c97cbdb" - -[[deps.Libiconv_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "c7cb1f5d892775ba13767a87c7ada0b980ea0a71" -uuid = "94ce4f54-9a6c-5748-9c1c-f9c7231a4531" -version = "1.16.1+2" - -[[deps.LightXML]] -deps = ["Libdl", "XML2_jll"] -git-tree-sha1 = "e129d9391168c677cd4800f5c0abb1ed8cb3794f" -uuid = "9c8b4983-aa76-5018-a973-4c85ecc9e179" -version = "0.9.0" - -[[deps.LineSearches]] -deps = ["LinearAlgebra", "NLSolversBase", "NaNMath", "Parameters", "Printf"] -git-tree-sha1 = "7bbea35cec17305fc70a0e5b4641477dc0789d9d" -uuid = "d3d80556-e9d4-5f37-9878-2ab0fcc64255" -version = "7.2.0" - -[[deps.LinearAlgebra]] -deps = ["Libdl", "libblastrampoline_jll"] -uuid = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e" - -[[deps.LogExpFunctions]] -deps = ["ChainRulesCore", "ChangesOfVariables", "DocStringExtensions", "InverseFunctions", "IrrationalConstants", "LinearAlgebra"] -git-tree-sha1 = "0a1b7c2863e44523180fdb3146534e265a91870b" -uuid = "2ab3a3ac-af41-5b50-aa03-7779005ae688" -version = "0.3.23" - -[[deps.Logging]] -uuid = "56ddb016-857b-54e1-b83d-db4d58db5568" - -[[deps.METIS_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "1fd0a97409e418b78c53fac671cf4622efdf0f21" -uuid = "d00139f3-1899-568f-a2f0-47f597d42d70" -version = "5.1.2+0" - -[[deps.MKL_jll]] -deps = ["Artifacts", "IntelOpenMP_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "Pkg"] -git-tree-sha1 = "2ce8695e1e699b68702c03402672a69f54b8aca9" -uuid = "856f044c-d86e-5d09-b602-aeab76dc8ba7" -version = "2022.2.0+0" - -[[deps.MPI]] -deps = ["Distributed", "DocStringExtensions", "Libdl", "MPICH_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Pkg", "Random", "Requires", "Serialization", "Sockets"] -git-tree-sha1 = "d56a80d8cf8b9dc3050116346b3d83432b1912c0" -uuid = "da04e1cc-30fd-572f-bb4f-1f8673147195" -version = "0.19.2" - -[[deps.MPICH_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] -git-tree-sha1 = "6d4fa43afab4611d090b11617ecea1a144b21d35" -uuid = "7cb0a576-ebde-5e09-9194-50597f1243b4" -version = "4.0.2+5" - -[[deps.MPIPreferences]] -deps = ["Libdl", "Preferences"] -git-tree-sha1 = "71f937129731a29eabe6969db2c90368a4408933" -uuid = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267" -version = "0.1.7" - -[[deps.MPItrampoline_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] -git-tree-sha1 = "b3f9e42685b4ad614eca0b44bd863cd41b1c86ea" -uuid = "f1f71cc9-e9ae-5b93-9b94-4fe0e1ad3748" -version = "5.0.2+1" - -[[deps.MUMPS_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "METIS_jll", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS32_jll", "OpenMPI_jll", "PARMETIS_jll", "Pkg", "SCALAPACK32_jll", "SCOTCH_jll", "TOML"] -git-tree-sha1 = "99f5791b81e59aad952d0629b066cde87a1e81f9" -uuid = "ca64183c-ec4f-5579-95d5-17e128c21291" -version = "5.5.1+1" - -[[deps.MacroTools]] -deps = ["Markdown", "Random"] -git-tree-sha1 = "42324d08725e200c23d4dfb549e0d5d89dede2d2" -uuid = "1914dd2f-81c6-5fcd-8719-6d5c9610ff09" -version = "0.5.10" - -[[deps.Markdown]] -deps = ["Base64"] -uuid = "d6f4376e-aef5-505a-96c1-9c027394607a" - -[[deps.MbedTLS_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "c8ffd9c3-330d-5841-b78e-0817d7145fa1" -version = "2.28.0+0" - -[[deps.MicrosoftMPI_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "a16aa086d335ed7e0170c5265247db29172af2f9" -uuid = "9237b28f-5490-5468-be7b-bb81f5f5e6cf" -version = "10.1.3+2" - -[[deps.Mmap]] -uuid = "a63ad114-7e13-5084-954f-fe012c677804" - -[[deps.MozillaCACerts_jll]] -uuid = "14a3606d-f60d-562e-9121-12d972cd8159" -version = "2022.2.1" - -[[deps.NLSolversBase]] -deps = ["DiffResults", "Distributed", "FiniteDiff", "ForwardDiff"] -git-tree-sha1 = "a0b464d183da839699f4c79e7606d9d186ec172c" -uuid = "d41bc354-129a-5804-8e4c-c37616107c6c" -version = "7.8.3" - -[[deps.NLsolve]] -deps = ["Distances", "LineSearches", "LinearAlgebra", "NLSolversBase", "Printf", "Reexport"] -git-tree-sha1 = "019f12e9a1a7880459d0173c182e6a99365d7ac1" -uuid = "2774e3e8-f4cf-5e23-947b-6d7e65073b56" -version = "4.5.1" - -[[deps.NaNMath]] -deps = ["OpenLibm_jll"] -git-tree-sha1 = "0877504529a3e5c3343c6f8b4c0381e57e4387e4" -uuid = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3" -version = "1.0.2" - -[[deps.NearestNeighbors]] -deps = ["Distances", "StaticArrays"] -git-tree-sha1 = "2c3726ceb3388917602169bed973dbc97f1b51a8" -uuid = "b8a86587-4115-5ab1-83bc-aa920d37bbce" -version = "0.4.13" - -[[deps.NetworkOptions]] -uuid = "ca575930-c2e3-43a9-ace4-1e988b2c1908" -version = "1.2.0" - -[[deps.OpenBLAS32_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "9c6c2ed4b7acd2137b878eb96c68e63b76199d0f" -uuid = "656ef2d0-ae68-5445-9ca0-591084a874a2" -version = "0.3.17+0" - -[[deps.OpenBLAS_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "Libdl"] -uuid = "4536629a-c528-5b80-bd46-f80d51c5b363" -version = "0.3.20+0" - -[[deps.OpenLibm_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "05823500-19ac-5b8b-9628-191a04bc5112" -version = "0.8.1+0" - -[[deps.OpenMPI_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPIPreferences", "Pkg", "TOML"] -git-tree-sha1 = "346d6b357a480300ed7854dbc70e746ac52e10fd" -uuid = "fe0851c0-eecd-5654-98d4-656369965a5c" -version = "4.1.3+3" - -[[deps.OpenSpecFun_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "Libdl", "Pkg"] -git-tree-sha1 = "13652491f6856acfd2db29360e1bbcd4565d04f1" -uuid = "efe28fd5-8261-553b-a9e1-b2916fc3738e" -version = "0.5.5+0" - -[[deps.OrderedCollections]] -git-tree-sha1 = "85f8e6578bf1f9ee0d11e7bb1b1456435479d47c" -uuid = "bac558e1-5e72-5ebc-8fee-abe8a469f55d" -version = "1.4.1" - -[[deps.P4est_jll]] -deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Pkg", "TOML", "Zlib_jll"] -git-tree-sha1 = "d4b48fd3ca75a398916c58c1e4628bf0ce11a7b6" -uuid = "6b5a15aa-cf52-5330-8376-5e5d90283449" -version = "2.8.1+0" - -[[deps.P4est_wrapper]] -deps = ["CEnum", "Libdl", "MPI", "P4est_jll"] -git-tree-sha1 = "9aaa64b061ef8f4ba4ef0095cad2e25305ee65cf" -uuid = "3743d7c0-8adf-11ea-380b-7d33b0ecc1da" -version = "0.1.3" - -[[deps.PARMETIS_jll]] -deps = ["Artifacts", "JLLWrappers", "LazyArtifacts", "Libdl", "METIS_jll", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenMPI_jll", "Pkg", "TOML"] -git-tree-sha1 = "ac9f4ce9b7dd92575b1f0e1d6a6e0f0729597f5f" -uuid = "b247a4be-ddc1-5759-8008-7e02fe3dbdaa" -version = "4.0.6+0" - -[[deps.PETSc_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "METIS_jll", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MUMPS_jll", "MicrosoftMPI_jll", "OpenBLAS32_jll", "OpenMPI_jll", "PARMETIS_jll", "Pkg", "SCALAPACK32_jll", "SCOTCH_jll", "SuiteSparse_jll", "SuperLU_DIST_jll", "TOML"] -git-tree-sha1 = "a6182e59b4c174b847752c4cd70895cfe44fa7a7" -uuid = "8fa3689e-f0b9-5420-9873-adf6ccf46f2d" -version = "3.16.8+0" - -[[deps.Parameters]] -deps = ["OrderedCollections", "UnPack"] -git-tree-sha1 = "34c0e9ad262e5f7fc75b10a9952ca7692cfc5fbe" -uuid = "d96e819e-fc66-5662-9728-84c9c7592b0a" -version = "0.12.3" - -[[deps.Parsers]] -deps = ["Dates", "SnoopPrecompile"] -git-tree-sha1 = "6f4fbcd1ad45905a5dee3f4256fabb49aa2110c6" -uuid = "69de0a69-1ddd-5017-9359-2bf0b02dc9f0" -version = "2.5.7" - -[[deps.PartitionedArrays]] -deps = ["Distances", "IterativeSolvers", "LinearAlgebra", "MPI", "Printf", "SparseArrays", "SparseMatricesCSR"] -git-tree-sha1 = "8a8a72723ffb62a395b0475b78b4695fb7090441" -uuid = "5a9dfac6-5c52-46f7-8278-5e2210713be9" -version = "0.2.15" - -[[deps.Pkg]] -deps = ["Artifacts", "Dates", "Downloads", "LibGit2", "Libdl", "Logging", "Markdown", "Printf", "REPL", "Random", "SHA", "Serialization", "TOML", "Tar", "UUIDs", "p7zip_jll"] -uuid = "44cfe95a-1eb2-52ea-b672-e2afdf69b78f" -version = "1.8.0" - -[[deps.PolynomialBases]] -deps = ["ArgCheck", "AutoHashEquals", "FFTW", "FastGaussQuadrature", "LinearAlgebra", "Requires", "SpecialFunctions", "UnPack"] -git-tree-sha1 = "38629c0a9cace7c6f51c103084589ff8a7a1c02f" -uuid = "c74db56a-226d-5e98-8bb0-a6049094aeea" -version = "0.4.15" - -[[deps.Preferences]] -deps = ["TOML"] -git-tree-sha1 = "47e5f437cc0e7ef2ce8406ce1e7e24d44915f88d" -uuid = "21216c6a-2e73-6563-6e65-726566657250" -version = "1.3.0" - -[[deps.Printf]] -deps = ["Unicode"] -uuid = "de0858da-6303-5e67-8744-51eddeeeb8d7" - -[[deps.QuadGK]] -deps = ["DataStructures", "LinearAlgebra"] -git-tree-sha1 = "786efa36b7eff813723c4849c90456609cf06661" -uuid = "1fd47b50-473d-5c70-9696-f719f8f3bcdc" -version = "2.8.1" - -[[deps.REPL]] -deps = ["InteractiveUtils", "Markdown", "Sockets", "Unicode"] -uuid = "3fa0cd96-eef1-5676-8a61-b3b8758bbffb" - -[[deps.Random]] -deps = ["SHA", "Serialization"] -uuid = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c" - -[[deps.RecipesBase]] -deps = ["SnoopPrecompile"] -git-tree-sha1 = "261dddd3b862bd2c940cf6ca4d1c8fe593e457c8" -uuid = "3cdcf5f2-1ef4-517c-9805-6587b60abb01" -version = "1.3.3" - -[[deps.Reexport]] -git-tree-sha1 = "45e428421666073eab6f2da5c9d310d99bb12f9b" -uuid = "189a3867-3050-52da-a836-e630ba90ab69" -version = "1.2.2" - -[[deps.Requires]] -deps = ["UUIDs"] -git-tree-sha1 = "838a3a4188e2ded87a4f9f184b4b0d78a1e91cb7" -uuid = "ae029012-a4dd-5104-9daa-d747884805df" -version = "1.3.0" - -[[deps.SCALAPACK32_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LazyArtifacts", "Libdl", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS32_jll", "OpenMPI_jll", "Pkg", "TOML"] -git-tree-sha1 = "36312ec64bf64a4120ac2a47438f5775ea28abc4" -uuid = "aabda75e-bfe4-5a37-92e3-ffe54af3c273" -version = "2.2.1+0" - -[[deps.SCOTCH_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Pkg", "Zlib_jll"] -git-tree-sha1 = "7110b749766853054ce8a2afaa73325d72d32129" -uuid = "a8d0f55d-b80e-548d-aff6-1a04c175f0f9" -version = "6.1.3+0" - -[[deps.SHA]] -uuid = "ea8e919c-243c-51af-8825-aaa63cd721ce" -version = "0.7.0" - -[[deps.Serialization]] -uuid = "9e88b42a-f829-5b0c-bbe9-9e923198166b" - -[[deps.Setfield]] -deps = ["ConstructionBase", "Future", "MacroTools", "StaticArraysCore"] -git-tree-sha1 = "e2cc6d8c88613c05e1defb55170bf5ff211fbeac" -uuid = "efcf1570-3423-57d1-acb7-fd33fddbac46" -version = "1.1.1" - -[[deps.SnoopPrecompile]] -deps = ["Preferences"] -git-tree-sha1 = "e760a70afdcd461cf01a575947738d359234665c" -uuid = "66db9d55-30c0-4569-8b51-7e840670fc0c" -version = "1.0.3" - -[[deps.Sockets]] -uuid = "6462fe0b-24de-5631-8697-dd941f90decc" - -[[deps.SparseArrays]] -deps = ["LinearAlgebra", "Random"] -uuid = "2f01184e-e22b-5df5-ae63-d93ebab69eaf" - -[[deps.SparseMatricesCSR]] -deps = ["LinearAlgebra", "SparseArrays", "SuiteSparse"] -git-tree-sha1 = "38677ca58e80b5cad2382e5a1848f93b054ad28d" -uuid = "a0a7dd2c-ebf4-11e9-1f05-cf50bc540ca1" -version = "0.6.7" - -[[deps.SpecialFunctions]] -deps = ["ChainRulesCore", "IrrationalConstants", "LogExpFunctions", "OpenLibm_jll", "OpenSpecFun_jll"] -git-tree-sha1 = "ef28127915f4229c971eb43f3fc075dd3fe91880" -uuid = "276daf66-3868-5448-9aa4-cd146d93841b" -version = "2.2.0" - -[[deps.StaticArrays]] -deps = ["LinearAlgebra", "Random", "StaticArraysCore", "Statistics"] -git-tree-sha1 = "2d7d9e1ddadc8407ffd460e24218e37ef52dd9a3" -uuid = "90137ffa-7385-5640-81b9-e52037218182" -version = "1.5.16" - -[[deps.StaticArraysCore]] -git-tree-sha1 = "6b7ba252635a5eff6a0b0664a41ee140a1c9e72a" -uuid = "1e83bf80-4336-4d27-bf5d-d5a4f845583c" -version = "1.4.0" - -[[deps.Statistics]] -deps = ["LinearAlgebra", "SparseArrays"] -uuid = "10745b16-79ce-11e8-11f9-7d13ad32a3b2" - -[[deps.StatsAPI]] -deps = ["LinearAlgebra"] -git-tree-sha1 = "f9af7f195fb13589dd2e2d57fdb401717d2eb1f6" -uuid = "82ae8749-77ed-4fe6-ae5f-f523153014b0" -version = "1.5.0" - -[[deps.SuiteSparse]] -deps = ["Libdl", "LinearAlgebra", "Serialization", "SparseArrays"] -uuid = "4607b0f0-06f3-5cda-b6b1-a6196a1729e9" - -[[deps.SuiteSparse_jll]] -deps = ["Artifacts", "Libdl", "Pkg", "libblastrampoline_jll"] -uuid = "bea87d4a-7f5b-5778-9afe-8cc45184846c" -version = "5.10.1+0" - -[[deps.SuperLU_DIST_jll]] -deps = ["Artifacts", "CompilerSupportLibraries_jll", "JLLWrappers", "LLVMOpenMP_jll", "LazyArtifacts", "Libdl", "METIS_jll", "MPICH_jll", "MPIPreferences", "MPItrampoline_jll", "MicrosoftMPI_jll", "OpenBLAS32_jll", "OpenMPI_jll", "PARMETIS_jll", "Pkg", "TOML"] -git-tree-sha1 = "e156418856a6c1cc2f5418f0542d9cc43b24a1f9" -uuid = "9a1356b0-3c82-5da3-b77c-7c198e8bd7ab" -version = "8.0.2+0" - -[[deps.TOML]] -deps = ["Dates"] -uuid = "fa267f1f-6049-4f14-aa54-33bafae1ed76" -version = "1.0.0" - -[[deps.Tar]] -deps = ["ArgTools", "SHA"] -uuid = "a4e569a6-e804-4fa4-b0f3-eef7a1d5b13e" -version = "1.10.1" - -[[deps.Test]] -deps = ["InteractiveUtils", "Logging", "Random", "Serialization"] -uuid = "8dfed614-e22c-5e08-85e1-65c5234f0b40" - -[[deps.TextWrap]] -git-tree-sha1 = "9250ef9b01b66667380cf3275b3f7488d0e25faf" -uuid = "b718987f-49a8-5099-9789-dcd902bef87d" -version = "1.0.1" - -[[deps.TranscodingStreams]] -deps = ["Random", "Test"] -git-tree-sha1 = "94f38103c984f89cf77c402f2a68dbd870f8165f" -uuid = "3bb67fe8-82b1-5028-8e26-92a6c54297fa" -version = "0.9.11" - -[[deps.UUIDs]] -deps = ["Random", "SHA"] -uuid = "cf7118a7-6976-5b1a-9a39-7adc72f591a4" - -[[deps.UnPack]] -git-tree-sha1 = "387c1f73762231e86e0c9c5443ce3b4a0a9a0c2b" -uuid = "3a884ed6-31ef-47d7-9d2a-63182c4928ed" -version = "1.0.2" - -[[deps.Unicode]] -uuid = "4ec0a83e-493e-50e2-b9ac-8f72acf5a8f5" - -[[deps.WriteVTK]] -deps = ["Base64", "CodecZlib", "FillArrays", "LightXML", "TranscodingStreams"] -git-tree-sha1 = "f50c47d715199601a54afdd5267f24c8174842ae" -uuid = "64499a7a-5c06-52f2-abe2-ccb03c286192" -version = "1.16.0" - -[[deps.XML2_jll]] -deps = ["Artifacts", "JLLWrappers", "Libdl", "Libiconv_jll", "Pkg", "Zlib_jll"] -git-tree-sha1 = "93c41695bc1c08c46c5899f4fe06d6ead504bb73" -uuid = "02c8fc9c-b97f-50b9-bbe4-9be30ff0a78a" -version = "2.10.3+0" - -[[deps.Zlib_jll]] -deps = ["Libdl"] -uuid = "83775a58-1f1d-513f-b197-d71354ab007a" -version = "1.2.12+3" - -[[deps.libblastrampoline_jll]] -deps = ["Artifacts", "Libdl", "OpenBLAS_jll"] -uuid = "8e850b90-86db-534c-a0d3-1478176c7d93" -version = "5.1.1+0" - -[[deps.nghttp2_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "8e850ede-7688-5339-a07c-302acd2aaf8d" -version = "1.48.0+0" - -[[deps.p7zip_jll]] -deps = ["Artifacts", "Libdl"] -uuid = "3f19e933-33d8-53b3-aaab-bd5110c3b7a0" -version = "17.4.0+0" From ffa839aa5834931adeedf4c023ba0d70912cc21a Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Mar 2023 12:44:03 +1100 Subject: [PATCH 87/95] Bugfix: Edge-based patch-based smoothers When deciding whether a facet belongs to the patch boundary, the serial version was not correctly treating interface facets (which are neither marked as boundary nor have any neighboring cell not belonging to the patch). --- .../mpi/PatchDecompositions.jl | 38 ++- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 4 +- .../seq/PatchDecompositions.jl | 46 ++-- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 6 +- test/mpi/GMGLinearSolversHDivRTTests.jl | 4 +- .../DistributedPatchFESpacesDebuggingTests.jl | 223 ++++++++++++++++++ test/seq/DistributedPatchFESpacesTests.jl | 20 +- 7 files changed, 307 insertions(+), 34 deletions(-) create mode 100644 test/seq/DistributedPatchFESpacesDebuggingTests.jl diff --git a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl index 20b45e7c..36c5dcba 100644 --- a/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/mpi/PatchDecompositions.jl @@ -9,10 +9,12 @@ GridapDistributed.local_views(a::DistributedPatchDecomposition) = a.patch_decomp function PatchDecomposition(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}; Dr=0, patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} + mark_interface_facets!(model) patch_decompositions = map_parts(local_views(model)) do lmodel PatchDecomposition(lmodel; Dr=Dr, - patch_boundary_style=patch_boundary_style) + patch_boundary_style=patch_boundary_style, + boundary_tag_names=["boundary","interface"]) end A = typeof(patch_decompositions) B = typeof(model) @@ -46,3 +48,37 @@ function get_patch_root_dim(a::DistributedPatchDecomposition) end return patch_root_dim end + +function mark_interface_facets!(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}) where {Dc,Dp} + face_labeling = get_face_labeling(model) + topo = get_grid_topology(model) + + map_parts(local_views(face_labeling),local_views(topo)) do face_labeling, topo + tag_to_name = face_labeling.tag_to_name + tag_to_entities = face_labeling.tag_to_entities + d_to_dface_to_entity = face_labeling.d_to_dface_to_entity + + # Create new tag & entity + interface_entity = maximum(map(maximum,tag_to_entities)) + 1 + push!(tag_to_entities,[interface_entity]) + push!(tag_to_name,"interface") + + # Interface faces should also be interior + interior_tag = findfirst(x->(x=="interior"),tag_to_name) + push!(tag_to_entities[interior_tag],interface_entity) + + # Select interface entities + boundary_tag = findfirst(x->(x=="boundary"),tag_to_name) + boundary_entities = tag_to_entities[boundary_tag] + + f2c_map = Geometry.get_faces(topo,Dc-1,Dc) + num_cells_around_facet = map(length,f2c_map) + mx = maximum(num_cells_around_facet) + for (f,nf) in enumerate(num_cells_around_facet) + is_boundary = (d_to_dface_to_entity[Dc][f] ∈ boundary_entities) + if !is_boundary && (nf != mx) + d_to_dface_to_entity[Dc][f] = interface_entity + end + end + end +end diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 24232d87..1f6614e9 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -74,13 +74,15 @@ function prolongate!(x::PVector, exchange!(x) end +# x \in SingleFESpace +# y \in PatchFESpace function inject!(x::PVector, Ph::GridapDistributed.DistributedSingleFieldFESpace, y::PVector, w::PVector, w_sums::PVector) - exchange!(y) + #exchange!(y) map_parts(x.values,Ph.spaces,y.values,w.values,w_sums.values) do x,Ph,y,w,w_sums inject!(x,Ph,y,w,w_sums) end diff --git a/src/PatchBasedSmoothers/seq/PatchDecompositions.jl b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl index df093840..913f0c3d 100644 --- a/src/PatchBasedSmoothers/seq/PatchDecompositions.jl +++ b/src/PatchBasedSmoothers/seq/PatchDecompositions.jl @@ -20,7 +20,8 @@ Gridap.Geometry.num_cells(a::PatchDecomposition) = a.patch_cells_overlapped_mesh function PatchDecomposition( model::DiscreteModel{Dc,Dp}; Dr=0, - patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp} + patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude(), + boundary_tag_names::AbstractArray{String}=["boundary"]) where {Dc,Dp} Gridap.Helpers.@check 0 <= Dr <= Dc-1 grid = get_grid(model) @@ -41,12 +42,13 @@ function PatchDecomposition( patch_cells, patch_cells_overlapped_mesh) - generate_patch_boundary_faces!(model, - patch_cells_faces_on_boundary, + generate_patch_boundary_faces!(patch_cells_faces_on_boundary, + model, patch_cells, patch_cells_overlapped_mesh, patch_facets, - patch_boundary_style) + patch_boundary_style, + boundary_tag_names) return PatchDecomposition{Dc,Dp}(model, Dr, patch_cells, @@ -112,47 +114,45 @@ function allocate_cell_overlapped_mesh_lface(::Type{T}, return Gridap.Arrays.Table(data,ptrs) end -function generate_patch_boundary_faces!(model, - patch_cells_faces_on_boundary, +function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, + model::DiscreteModel, patch_cells, patch_cells_overlapped_mesh, patch_facets, - patch_boundary_style) - Dc = num_cell_dims(model) - topology = get_grid_topology(model) - labeling = get_face_labeling(model) - num_patches = length(patch_cells.ptrs)-1 + patch_boundary_style, + boundary_tag_names) + num_patches = length(patch_cells.ptrs)-1 cache_patch_cells = array_cache(patch_cells) cache_patch_facets = array_cache(patch_facets) for patch = 1:num_patches current_patch_cells = getindex!(cache_patch_cells,patch_cells,patch) current_patch_facets = getindex!(cache_patch_facets,patch_facets,patch) generate_patch_boundary_faces!(patch_cells_faces_on_boundary, - Dc, - topology, - labeling, + model, patch, current_patch_cells, patch_cells_overlapped_mesh, current_patch_facets, - patch_boundary_style) + patch_boundary_style, + boundary_tag_names) end end function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, - Dc, - topology, - face_labeling, + model::DiscreteModel{Dc}, patch, patch_cells, patch_cells_overlapped_mesh, patch_facets, - patch_boundary_style) + patch_boundary_style, + boundary_tag_names) where Dc + face_labeling = get_face_labeling(model) + topology = get_grid_topology(model) - boundary_tag = findfirst(x->(x=="boundary"),face_labeling.tag_to_name) - Gridap.Helpers.@check !isa(boundary_tag, Nothing) - boundary_entities = face_labeling.tag_to_entities[boundary_tag] + boundary_tags = findall(x -> (x ∈ boundary_tag_names), face_labeling.tag_to_name) + Gridap.Helpers.@check !isempty(boundary_tags) + boundary_entities = vcat(face_labeling.tag_to_entities[boundary_tags]...) # Cells facets Df = Dc-1 @@ -184,7 +184,7 @@ function generate_patch_boundary_faces!(patch_cells_faces_on_boundary, facet_at_global_boundary = (facet_entity ∈ boundary_entities) A = (facet_at_global_boundary) && (facet ∉ patch_facets) B = (patch_boundary_style isa PatchBoundaryExclude) && cell_not_in_patch_found - facet_at_patch_boundary = A || B + facet_at_patch_boundary = (A || B) if (facet_at_patch_boundary) diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index 36fc2395..9758e813 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -320,12 +320,12 @@ end function compute_weight_operators(Ph::PatchFESpace,Vh) w = Fill(1.0,num_free_dofs(Ph)) - w_sums = compute_partial_sums(Ph,w) + w_sums = compute_partial_sums(Ph,Vh,w) return w, w_sums end -function compute_partial_sums(Ph::PatchFESpace,x) - x_sums = zeros(num_free_dofs(Ph.Vh)) +function compute_partial_sums(Ph::PatchFESpace,Vh,x) + x_sums = zeros(num_free_dofs(Vh)) inject!(x_sums,Ph,x,Fill(1.0,num_free_dofs(Ph))) return x_sums end diff --git a/test/mpi/GMGLinearSolversHDivRTTests.jl b/test/mpi/GMGLinearSolversHDivRTTests.jl index f9513dd7..a982d8da 100644 --- a/test/mpi/GMGLinearSolversHDivRTTests.jl +++ b/test/mpi/GMGLinearSolversHDivRTTests.jl @@ -101,7 +101,7 @@ function main(parts, coarse_grid_partition, num_parts_x_level, num_refs_coarse, println("L2 error = ", e_l2) end - return history.iters, num_free_dofs(Vh) + return history.iters, num_free_dofs(Uh) end end @@ -112,7 +112,7 @@ if !MPI.Initialized() end # Parameters -order = 2 +order = 0 coarse_grid_partition = (2,2) num_refs_coarse = 2 diff --git a/test/seq/DistributedPatchFESpacesDebuggingTests.jl b/test/seq/DistributedPatchFESpacesDebuggingTests.jl new file mode 100644 index 00000000..ecf2202a --- /dev/null +++ b/test/seq/DistributedPatchFESpacesDebuggingTests.jl @@ -0,0 +1,223 @@ +module DistributedPatchFESpacesHDivTests + +using LinearAlgebra +using Test +using PartitionedArrays +using Gridap +using Gridap.Helpers +using Gridap.Arrays +using Gridap.Geometry +using Gridap.ReferenceFEs +using GridapDistributed +using FillArrays + +using GridapSolvers +import GridapSolvers.PatchBasedSmoothers as PBS + +#= +backend = SequentialBackend() +ranks = (1,2) +parts = get_part_ids(backend,ranks) +=# + +function run(parts) + domain = (0.0,1.0,0.0,1.0) + partition = (2,4) + model = CartesianDiscreteModel(parts,domain,partition) + + order = 0 + reffe = ReferenceFE(raviart_thomas,Float64,order) + Vh = TestFESpace(model,reffe) + PD = PBS.PatchDecomposition(model) + Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) + + + # ---- Testing Prolongation and Injection ---- # + + w, w_sums = PBS.compute_weight_operators(Ph,Vh); + + xP = PVector(1.0,Ph.gids) + yP = PVector(0.0,Ph.gids) + x = PVector(1.0,Vh.gids) + y = PVector(0.0,Vh.gids) + + PBS.prolongate!(yP,Ph,x) + PBS.inject!(y,Ph,yP,w,w_sums) + @test x ≈ y + + PBS.inject!(x,Ph,xP,w,w_sums) + PBS.prolongate!(yP,Ph,x) + @test xP ≈ yP + + # ---- Assemble systems ---- # + + sol(x) = VectorValue(x[1],x[2]) + f(x) = VectorValue(2.0*x[2]*(1.0-x[1]*x[1]),2.0*x[1]*(1-x[2]*x[2])) + + α = 1.0 + biform(u,v,dΩ) = ∫(v⋅u)dΩ + ∫(α*divergence(v)⋅divergence(u))dΩ + liform(v,dΩ) = ∫(v⋅f)dΩ + + Ω = Triangulation(model) + dΩ = Measure(Ω,2*order+1) + a(u,v) = biform(u,v,dΩ) + l(v) = liform(v,dΩ) + + assembler = SparseMatrixAssembler(Vh,Vh) + Ah = assemble_matrix(a,assembler,Vh,Vh) + fh = assemble_vector(l,assembler,Vh) + + sol_h = solve(LUSolver(),Ah,fh) + + Ωₚ = Triangulation(PD) + dΩₚ = Measure(Ωₚ,2*order+1) + ap(u,v) = biform(u,v,dΩₚ) + lp(v) = liform(v,dΩₚ) + + assembler_P = SparseMatrixAssembler(Ph,Ph,FullyAssembledRows()) + Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) + fhp = assemble_vector(lp,assembler_P,Ph) + + sol_hp = solve(LUSolver(),Ahp,fhp) + + # ---- Define solvers ---- # + + LU = LUSolver() + LUss = symbolic_setup(LU,Ahp) + LUns = numerical_setup(LUss,Ahp) + + M = PBS.PatchBasedLinearSolver(ap,Ph,Vh,LU) + Mss = symbolic_setup(M,Ah) + Mns = numerical_setup(Mss,Ah) + + R = RichardsonSmoother(M,10,1.0/3.0) + Rss = symbolic_setup(R,Ah) + Rns = numerical_setup(Rss,Ah) + + # ---- Manual solve using LU ---- # + + x1_mat = PVector(0.5,Ah.cols) + r1_mat = fh-Ah*x1_mat + exchange!(r1_mat) + + r1 = PVector(0.0,Vh.gids) + x1 = PVector(0.0,Vh.gids) + rp = PVector(0.0,Ph.gids) + xp = PVector(0.0,Ph.gids) + rp_mat = PVector(0.0,Ahp.cols) + xp_mat = PVector(0.0,Ahp.cols) + + copy!(r1,r1_mat) + exchange!(r1) + PBS.prolongate!(rp,Ph,r1) # OK + + copy!(rp_mat,rp) + exchange!(rp_mat) + solve!(xp_mat,LUns,rp_mat) + copy!(xp,xp_mat) # Some big numbers appear here.... + + w, w_sums = PBS.compute_weight_operators(Ph,Vh); + PBS.inject!(x1,Ph,xp,w,w_sums) # Problem here!! + copy!(x1_mat,x1) + + # ---- Same using the PatchBasedSmoother ---- # + + x2_mat = PVector(0.5,Ah.cols) + r2_mat = fh-Ah*x2_mat + exchange!(r2_mat) + solve!(x2_mat,Mns,r2_mat) + + # ---- Smoother inside Richardson + + x3_mat = PVector(0.5,Ah.cols) + r3_mat = fh-Ah*x3_mat + exchange!(r3_mat) + solve!(x3_mat,Rns,r3_mat) + exchange!(x3_mat) + + # Outputs + res = Dict{String,Any}() + res["sol_h"] = sol_h + res["sol_hp"] = sol_hp + + res["r1"] = r1 + res["x1"] = x1 + res["r1_mat"] = r1_mat + res["x1_mat"] = x1_mat + res["rp"] = rp + res["xp"] = xp + res["rp_mat"] = rp_mat + res["xp_mat"] = xp_mat + + res["w"] = w + res["w_sums"] = w_sums + + return model,PD,Ph,Vh,res +end + +backend = SequentialBackend() + +parts = get_part_ids(backend,(1,1)) +Ms,PDs,Phs,Vhs,res_single = run(parts); + +parts = get_part_ids(backend,(1,2)) +Mm,PDm,Phm,Vhm,res_multi = run(parts); + +println(repeat('#',80)) + +map_parts(local_views(Ms)) do model + cell_ids = get_cell_node_ids(model) + cell_coords = get_cell_coordinates(model) + display(reshape(cell_ids,length(cell_ids))) + display(reshape(cell_coords,length(cell_coords))) +end; +println(repeat('-',80)) + +cell_gids = get_cell_gids(Mm) +vertex_gids = get_face_gids(Mm,0) +edge_gids = get_face_gids(Mm,1) + +println(">>> Cell gids") +map_parts(cell_gids.partition) do p + println(transpose(p.lid_to_ohid)) +end; +println(repeat('-',80)) + +println(">>> Vertex gids") +map_parts(vertex_gids.partition) do p + println(transpose(p.lid_to_ohid)) +end; +println(repeat('-',80)) + +println(">>> Edge gids") +map_parts(edge_gids.partition) do p + println(transpose(p.lid_to_ohid)) +end; + +println(repeat('#',80)) + +map_parts(local_views(Phs)) do Ph + display(Ph.patch_cell_dofs_ids) +end; + +map_parts(local_views(Phm)) do Ph + display(Ph.patch_cell_dofs_ids) +end; + +println(repeat('#',80)) + +for key in keys(res_single) + val_s = res_single[key] + val_m = res_multi[key] + + println(">>> ", key) + map_parts(val_s.values) do v + println(transpose(v)) + end; + map_parts(val_m.owned_values) do v + println(transpose(v)) + end; + println(repeat('-',80)) +end + +end \ No newline at end of file diff --git a/test/seq/DistributedPatchFESpacesTests.jl b/test/seq/DistributedPatchFESpacesTests.jl index dc0a7dcc..18dc37c3 100644 --- a/test/seq/DistributedPatchFESpacesTests.jl +++ b/test/seq/DistributedPatchFESpacesTests.jl @@ -6,6 +6,7 @@ using PartitionedArrays using Gridap using Gridap.Helpers using Gridap.Geometry +using Gridap.ReferenceFEs using GridapDistributed using FillArrays @@ -20,13 +21,16 @@ domain = (0.0,1.0,0.0,1.0) partition = (2,4) model = CartesianDiscreteModel(parts,domain,partition) -order = 1 -reffe = ReferenceFE(lagrangian,Float64,order) +# order = 1 +# reffe = ReferenceFE(lagrangian,Float64,order) +order = 0 +reffe = ReferenceFE(raviart_thomas,Float64,order) Vh = TestFESpace(model,reffe) -PD = PBS.PatchDecomposition(model)#,patch_boundary_style=PBS.PatchBoundaryInclude()) -Ph = PBS.PatchFESpace(model,reffe,Gridap.ReferenceFEs.H1Conformity(),PD,Vh) +PD = PBS.PatchDecomposition(model) +Ph = PBS.PatchFESpace(model,reffe,DivConformity(),PD,Vh) # ---- Testing Prolongation and Injection ---- # + w, w_sums = PBS.compute_weight_operators(Ph,Vh); xP = PVector(1.0,Ph.gids) @@ -42,7 +46,9 @@ PBS.inject!(x,Ph,xP,w,w_sums) PBS.prolongate!(yP,Ph,x) @test xP ≈ yP + # ---- Assemble systems ---- # + Ω = Triangulation(model) dΩ = Measure(Ω,2*order+1) a(u,v) = ∫(v⋅u)*dΩ @@ -63,7 +69,9 @@ assembler_P = SparseMatrixAssembler(Ph,Ph) Ahp = assemble_matrix(ap,assembler_P,Ph,Ph) fhp = assemble_vector(lp,assembler_P,Ph) + # ---- Define solvers ---- # + LU = LUSolver() LUss = symbolic_setup(LU,Ahp) LUns = numerical_setup(LUss,Ahp) @@ -76,6 +84,7 @@ R = RichardsonSmoother(M,10,1.0/3.0) Rss = symbolic_setup(R,Ah) Rns = numerical_setup(Rss,Ah) + # ---- Manual solve using LU ---- # x1_mat = PVector(0.5,Ah.cols) @@ -101,6 +110,7 @@ w, w_sums = PBS.compute_weight_operators(Ph,Vh); PBS.inject!(x1,Ph,xp,w,w_sums) copy!(x1_mat,x1) + # ---- Same using the PatchBasedSmoother ---- # x2_mat = PVector(0.5,Ah.cols) @@ -108,7 +118,9 @@ r2_mat = fh-Ah*x2_mat exchange!(r2_mat) solve!(x2_mat,Mns,r2_mat) + # ---- Smoother inside Richardson + x3_mat = PVector(0.5,Ah.cols) r3_mat = fh-Ah*x3_mat exchange!(r3_mat) From fffaeaf64c71f9b5ec687a3dca994b160a0ad558 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Mar 2023 14:57:36 +1100 Subject: [PATCH 88/95] Small bugfix --- src/MultilevelTools/GridapDistributedExtensions.jl | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/MultilevelTools/GridapDistributedExtensions.jl b/src/MultilevelTools/GridapDistributedExtensions.jl index b7e163a0..56fa3264 100644 --- a/src/MultilevelTools/GridapDistributedExtensions.jl +++ b/src/MultilevelTools/GridapDistributedExtensions.jl @@ -12,6 +12,10 @@ function Gridap.CellData.Measure(tt::GridapDistributed.DistributedTriangulation{ return GridapDistributed.DistributedMeasure(measures) end +function GridapDistributed.get_parts(x::GridapDistributed.DistributedFESpace) + PartitionedArrays.get_part_ids(local_views(x)) +end + # change_parts function change_parts(x::Union{AbstractPData,Nothing}, new_parts; default=nothing) From c0134382a99d3d89f3a72b2cd02020e372c0fc26 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Mar 2023 16:57:19 +1100 Subject: [PATCH 89/95] Small bugfix in tests --- test/mpi/RefinementToolsTests.jl | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/test/mpi/RefinementToolsTests.jl b/test/mpi/RefinementToolsTests.jl index 235d9e6e..06a02165 100644 --- a/test/mpi/RefinementToolsTests.jl +++ b/test/mpi/RefinementToolsTests.jl @@ -32,15 +32,17 @@ function run(parts,num_parts_x_level,coarse_grid_partition,num_refs_coarse) cparts = get_level_parts(mh,lev+1) if i_am_in(cparts) + model_h = get_model_before_redist(mh,lev) Vh = get_fe_space_before_redist(tests,lev) Uh = get_fe_space_before_redist(trials,lev) - Ωh = get_triangulation(Uh,get_model_before_redist(mh,lev)) + Ωh = get_triangulation(model_h) dΩh = Measure(Ωh,quad_order) uh = interpolate(sol,Uh) + model_H = get_model(mh,lev+1) VH = get_fe_space(tests,lev+1) UH = get_fe_space(trials,lev+1) - ΩH = get_triangulation(UH,get_model(mh,lev+1)) + ΩH = get_triangulation(model_H) dΩH = Measure(ΩH,quad_order) uH = interpolate(sol,UH) dΩhH = Measure(ΩH,Ωh,quad_order) From 5d7ce2101bc138cac0fd03f9da471dcead8b2c81 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 3 Mar 2023 22:29:39 +1030 Subject: [PATCH 90/95] Minor bugfix --- src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl | 2 +- test/seq/PatchLinearSolverTests.jl | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index 9178178d..867f0f5d 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -100,7 +100,7 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::PatchBasedSmootherNumerical rp, dxp = caches prolongate!(rp,Ph,r) - solve!(dxp,nsAp,rp) + solve!(dxp,Ap_ns,rp) inject!(x,Ph,dxp,w,w_sums) return x diff --git a/test/seq/PatchLinearSolverTests.jl b/test/seq/PatchLinearSolverTests.jl index 015e5134..5df48824 100644 --- a/test/seq/PatchLinearSolverTests.jl +++ b/test/seq/PatchLinearSolverTests.jl @@ -82,7 +82,7 @@ module PatchLinearSolverTests A,b = compute_matrix_vector(model,Vh) x = test_smoother(PD,Ph,Vh,A,b) - parts = get_part_ids(sequential,(1,1)) + parts = get_part_ids(SequentialBackend(),(1,1)) dmodel = CartesianDiscreteModel(parts,domain,partition) dPD,dPh,dxh,dVh = returns_PD_Ph_xh_Vh(dmodel); dA,db = compute_matrix_vector(dmodel,dVh); From 226dda743bf4181f557eae0ad31dd8646a7e06bf Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 10 Mar 2023 12:31:08 +1100 Subject: [PATCH 91/95] Export PatchBasedSmoothers --- src/GridapSolvers.jl | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/src/GridapSolvers.jl b/src/GridapSolvers.jl index 0317137a..988c541f 100644 --- a/src/GridapSolvers.jl +++ b/src/GridapSolvers.jl @@ -6,6 +6,7 @@ module GridapSolvers using GridapSolvers.MultilevelTools using GridapSolvers.LinearSolvers + using GridapSolvers.PatchBasedSmoothers # MultilevelTools export get_parts, generate_level_parts, generate_subparts @@ -27,4 +28,9 @@ module GridapSolvers export RichardsonSmoother export GMGLinearSolver + # PatchBasedSmoothers + export PatchDecomposition + export PatchFESpace + export PatchBasedLinearSolver + end From f5f1808520b20ceed6fb0715fb1c74a9dafbd1d7 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 10 Mar 2023 15:17:48 +1100 Subject: [PATCH 92/95] Optimized injection for patch-based fespaces --- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 143 ++++++++++++------- 1 file changed, 88 insertions(+), 55 deletions(-) diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index 9758e813..55b6e734 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -3,6 +3,7 @@ struct PatchFESpace <: Gridap.FESpaces.SingleFieldFESpace patch_cell_dofs_ids :: Gridap.Arrays.Table Vh :: Gridap.FESpaces.SingleFieldFESpace patch_decomposition :: PatchDecomposition + dof_to_pdof :: Gridap.Arrays.Table end # INPUT @@ -68,7 +69,10 @@ function PatchFESpace(model::DiscreteModel, cell_conformity, patches_mask) - return PatchFESpace(num_dofs,patch_cell_dofs_ids,Vh,patch_decomposition) + dof_to_pdof = allocate_dof_to_pdof(Vh,patch_decomposition,patch_cell_dofs_ids) + generate_dof_to_pdof!(dof_to_pdof,Vh,patch_decomposition,patch_cell_dofs_ids) + + return PatchFESpace(num_dofs,patch_cell_dofs_ids,Vh,patch_decomposition,dof_to_pdof) end Gridap.FESpaces.get_dof_value_type(a::PatchFESpace) = Gridap.FESpaces.get_dof_value_type(a.Vh) @@ -106,7 +110,7 @@ function allocate_patch_cell_dofs_ids(num_cells_overlapped_mesh,cell_patches,cel end end - Gridap.Helpers.@check num_cells_overlapped_mesh+1 == gcell_overlapped_mesh + @check num_cells_overlapped_mesh+1 == gcell_overlapped_mesh data = Vector{Int}(undef,ptrs[end]-1) return Gridap.Arrays.Table(data,ptrs) end @@ -159,9 +163,6 @@ function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, free_dofs_offset=1, mask=false) - patch_global_space_cell_dofs_ids= - lazy_map(Broadcasting(Reindex(global_space_cell_dofs_ids)),patch_cells) - o = patch_cells_overlapped_mesh.ptrs[patch] if mask for lpatch_cell = 1:length(patch_cells) @@ -186,8 +187,9 @@ function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, cells_d_faces = Gridap.Geometry.get_faces(topology,Dc,d) cell_d_face = cells_d_faces[patch_cell] + # 1) DoFs belonging to faces (Df < Dc) for (lf,f) in enumerate(cell_d_face) - # If current face is on the patch boundary + # A) If current face is on the patch boundary if (patch_cells_faces_on_boundary[d+1][cell_overlapped_mesh][lf]) # assign negative indices to DoFs owned by face for ldof in cell_conformity.ctype_lface_own_ldofs[ctype][face_offset+lf] @@ -195,6 +197,7 @@ function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, current_patch_cell_dofs_ids[ldof] = -1 end else + # B) If current face is not in patch boundary, # rely on the existing glued info (available at global_space_cell_dof_ids) # (we will need a Dict{Int,Int} to hold the correspondence among global # space and patch cell dofs IDs) @@ -217,7 +220,7 @@ function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, face_offset += cell_conformity.d_ctype_num_dfaces[d+1][ctype] end - # Interior DoFs + # 2) Interior DoFs for ldof in cell_conformity.ctype_lface_own_ldofs[ctype][face_offset+1] current_patch_cell_dofs_ids[ldof] = free_dofs_offset free_dofs_offset += 1 @@ -227,6 +230,68 @@ function generate_patch_cell_dofs_ids!(patch_cell_dofs_ids, return free_dofs_offset end +function allocate_dof_to_pdof(Vh,PD,patch_cell_dofs_ids) + touched = Dict{Int,Bool}() + cell_mesh_overlapped = 1 + cache_patch_cells = array_cache(PD.patch_cells) + cell_dof_ids = get_cell_dof_ids(Vh) + cache_cell_dof_ids = array_cache(cell_dof_ids) + + ptrs = fill(0,num_free_dofs(Vh)+1) + for patch = 1:length(PD.patch_cells) + current_patch_cells = getindex!(cache_patch_cells,PD.patch_cells,patch) + for cell in current_patch_cells + current_cell_dof_ids = getindex!(cache_cell_dof_ids,cell_dof_ids,cell) + s = patch_cell_dofs_ids.ptrs[cell_mesh_overlapped] + e = patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 + current_patch_cell_dof_ids = view(patch_cell_dofs_ids.data,s:e) + for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) + if pdof > 0 && !(dof ∈ keys(touched)) + touched[dof] = true + ptrs[dof+1] += 1 + end + end + cell_mesh_overlapped += 1 + end + empty!(touched) + end + PartitionedArrays.length_to_ptrs!(ptrs) + + data = fill(0,ptrs[end]-1) + return Gridap.Arrays.Table(data,ptrs) +end + +function generate_dof_to_pdof!(dof_to_pdof,Vh,PD,patch_cell_dofs_ids) + touched = Dict{Int,Bool}() + cell_mesh_overlapped = 1 + cache_patch_cells = array_cache(PD.patch_cells) + cell_dof_ids = get_cell_dof_ids(Vh) + cache_cell_dof_ids = array_cache(cell_dof_ids) + + ptrs = dof_to_pdof.ptrs + data = dof_to_pdof.data + local_ptrs = fill(Int32(0),num_free_dofs(Vh)) + for patch = 1:length(PD.patch_cells) + current_patch_cells = getindex!(cache_patch_cells,PD.patch_cells,patch) + for cell in current_patch_cells + current_cell_dof_ids = getindex!(cache_cell_dof_ids,cell_dof_ids,cell) + s = patch_cell_dofs_ids.ptrs[cell_mesh_overlapped] + e = patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 + current_patch_cell_dof_ids = view(patch_cell_dofs_ids.data,s:e) + for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) + if pdof > 0 && !(dof ∈ keys(touched)) + touched[dof] = true + idx = ptrs[dof] + local_ptrs[dof] + @check idx < ptrs[dof+1] + data[idx] = pdof + local_ptrs[dof] += 1 + end + end + cell_mesh_overlapped += 1 + end + empty!(touched) + end +end # x \in PatchFESpace # y \in SingleFESpace @@ -261,60 +326,28 @@ function inject!(x,Ph::PatchFESpace,y) end function inject!(x,Ph::PatchFESpace,y,w) - touched = Dict{Int,Bool}() - cell_mesh_overlapped = 1 - cache_patch_cells = array_cache(Ph.patch_decomposition.patch_cells) - cell_dof_ids = get_cell_dof_ids(Ph.Vh) - cache_cell_dof_ids = array_cache(cell_dof_ids) - - fill!(x,0.0) - for patch = 1:length(Ph.patch_decomposition.patch_cells) - current_patch_cells = getindex!(cache_patch_cells, - Ph.patch_decomposition.patch_cells, - patch) - for cell in current_patch_cells - current_cell_dof_ids = getindex!(cache_cell_dof_ids,cell_dof_ids,cell) - s = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped] - e = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 - current_patch_cell_dof_ids = view(Ph.patch_cell_dofs_ids.data,s:e) - for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) - if pdof > 0 && !(dof ∈ keys(touched)) - touched[dof] = true - x[dof] += y[pdof] * w[pdof] - end - end - cell_mesh_overlapped += 1 + dof_to_pdof = Ph.dof_to_pdof + cache = array_cache(dof_to_pdof) + + for dof in 1:length(dof_to_pdof) + x[dof] = 0.0 + pdofs = getindex!(cache,dof_to_pdof,dof) + for pdof in pdofs + x[dof] += y[pdof] * w[pdof] end - empty!(touched) end end function inject!(x,Ph::PatchFESpace,y,w,w_sums) - touched = Dict{Int,Bool}() - cell_mesh_overlapped = 1 - cache_patch_cells = array_cache(Ph.patch_decomposition.patch_cells) - cell_dof_ids = get_cell_dof_ids(Ph.Vh) - cache_cell_dof_ids = array_cache(cell_dof_ids) - - fill!(x,0.0) - for patch = 1:length(Ph.patch_decomposition.patch_cells) - current_patch_cells = getindex!(cache_patch_cells, - Ph.patch_decomposition.patch_cells, - patch) - for cell in current_patch_cells - current_cell_dof_ids = getindex!(cache_cell_dof_ids,cell_dof_ids,cell) - s = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped] - e = Ph.patch_cell_dofs_ids.ptrs[cell_mesh_overlapped+1]-1 - current_patch_cell_dof_ids = view(Ph.patch_cell_dofs_ids.data,s:e) - for (dof,pdof) in zip(current_cell_dof_ids,current_patch_cell_dof_ids) - if pdof > 0 && !(dof ∈ keys(touched)) - touched[dof] = true - x[dof] += y[pdof] * w[pdof] / w_sums[dof] - end - end - cell_mesh_overlapped += 1 + dof_to_pdof = Ph.dof_to_pdof + cache = array_cache(dof_to_pdof) + + for dof in 1:length(dof_to_pdof) + x[dof] = 0.0 + pdofs = getindex!(cache,dof_to_pdof,dof) + for pdof in pdofs + x[dof] += y[pdof] * w[pdof] / w_sums[dof] end - empty!(touched) end end From 2b18131d2bd545ad5d34f0f742ed969438132678 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 10 Mar 2023 15:36:28 +1100 Subject: [PATCH 93/95] Removed unnecessary caches in the solver The PatchFESpace does not have ghosts, which means that we do not have to worry about the ghost structure of the matrix vs fespace. We can thus remove some of the auxiliary vectors in the solver. --- src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl index 867f0f5d..171188dc 100644 --- a/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl +++ b/src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl @@ -63,13 +63,11 @@ end function _patch_based_solver_caches(Ph::GridapDistributed.DistributedSingleFieldFESpace, Vh::GridapDistributed.DistributedSingleFieldFESpace, Ap::PSparseMatrix) - rp_mat = _allocate_row_vector(Ap) - dxp_mat = _allocate_col_vector(Ap) rp = PVector(0.0,Ph.gids) dxp = PVector(0.0,Ph.gids) r = PVector(0.0,Vh.gids) x = PVector(0.0,Vh.gids) - return rp_mat, dxp_mat, rp, dxp, r, x + return rp, dxp, r, x end function _allocate_col_vector(A::AbstractMatrix) @@ -111,14 +109,12 @@ function Gridap.Algebra.solve!(x_mat::PVector,ns::PatchBasedSmootherNumericalSet Ph = ns.solver.Ph w, w_sums = weights - rp_mat, dxp_mat, rp, dxp, r, x = caches + rp, dxp, r, x = caches copy!(r,r_mat) exchange!(r) prolongate!(rp,Ph,r) - copy!(rp_mat,rp) - solve!(dxp_mat,Ap_ns,rp_mat) - copy!(dxp,dxp_mat) + solve!(dxp,Ap_ns,rp) inject!(x,Ph,dxp,w,w_sums) copy!(x_mat,x) From 442f008c543b6616a583eb100157451742591085 Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 10 Mar 2023 15:48:57 +1100 Subject: [PATCH 94/95] Further optimized patch-based injection --- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index 55b6e734..ef1ae92a 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -327,12 +327,13 @@ end function inject!(x,Ph::PatchFESpace,y,w) dof_to_pdof = Ph.dof_to_pdof - cache = array_cache(dof_to_pdof) + ptrs = dof_to_pdof.ptrs + data = dof_to_pdof.data for dof in 1:length(dof_to_pdof) x[dof] = 0.0 - pdofs = getindex!(cache,dof_to_pdof,dof) - for pdof in pdofs + for k in ptrs[dof]:ptrs[dof+1]-1 + pdof = data[k] x[dof] += y[pdof] * w[pdof] end end @@ -340,12 +341,13 @@ end function inject!(x,Ph::PatchFESpace,y,w,w_sums) dof_to_pdof = Ph.dof_to_pdof - cache = array_cache(dof_to_pdof) - + + ptrs = dof_to_pdof.ptrs + data = dof_to_pdof.data for dof in 1:length(dof_to_pdof) x[dof] = 0.0 - pdofs = getindex!(cache,dof_to_pdof,dof) - for pdof in pdofs + for k in ptrs[dof]:ptrs[dof+1]-1 + pdof = data[k] x[dof] += y[pdof] * w[pdof] / w_sums[dof] end end From 86a0af747a0972d96972c55db9557c6e7e6d180d Mon Sep 17 00:00:00 2001 From: JordiManyer Date: Fri, 10 Mar 2023 17:57:24 +1100 Subject: [PATCH 95/95] Optimized weights for distributed --- src/PatchBasedSmoothers/mpi/PatchFESpaces.jl | 33 ++++++-------------- src/PatchBasedSmoothers/seq/PatchFESpaces.jl | 12 +++---- 2 files changed, 16 insertions(+), 29 deletions(-) diff --git a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl index 1f6614e9..5ba023b3 100644 --- a/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/mpi/PatchFESpaces.jl @@ -14,32 +14,21 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel, Vh::GridapDistributed.DistributedSingleFieldFESpace) root_gids = get_face_gids(model,get_patch_root_dim(patch_decomposition)) - function f(model,patch_decomposition,Vh,partition) + spaces = map_parts(local_views(model), + local_views(patch_decomposition), + local_views(Vh), + root_gids.partition) do model, patch_decomposition, Vh, partition patches_mask = fill(false,length(partition.lid_to_gid)) patches_mask[partition.hid_to_lid] .= true # Mask ghost patch roots - PatchFESpace(model, - reffe, - conformity, - patch_decomposition, - Vh; - patches_mask=patches_mask) + PatchFESpace(model,reffe,conformity,patch_decomposition,Vh;patches_mask=patches_mask) end - - spaces = map_parts(f, - local_views(model), - local_views(patch_decomposition), - local_views(Vh), - root_gids.partition) parts = get_parts(model) - nodofs = map_parts(spaces) do space - num_free_dofs(space) - end - ngdofs = sum(nodofs) - - first_gdof, _ = xscan(+,reduce,nodofs,init=1) + local_ndofs = map_parts(num_free_dofs,spaces) + global_ndofs = sum(local_ndofs) + first_gdof, _ = xscan(+,reduce,local_ndofs,init=1) # This PRange has no ghost dofs - gids = PRange(parts,ngdofs,nodofs,first_gdof) + gids = PRange(parts,global_ndofs,local_ndofs,first_gdof) return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,get_vector_type(Vh)) end @@ -98,9 +87,7 @@ function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFE w = PVector(0.0,Ph.gids) w_sums = PVector(0.0,Vh.gids) map_parts(w.values,w_sums.values,Ph.spaces) do w, w_sums, Ph - _w, _w_sums = compute_weight_operators(Ph,Ph.Vh) - w .= _w - w_sums .= _w_sums + compute_weight_operators!(Ph,Ph.Vh,w,w_sums) end # partial sums -> global sums diff --git a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl index ef1ae92a..9c6358f6 100644 --- a/src/PatchBasedSmoothers/seq/PatchFESpaces.jl +++ b/src/PatchBasedSmoothers/seq/PatchFESpaces.jl @@ -355,12 +355,12 @@ end function compute_weight_operators(Ph::PatchFESpace,Vh) w = Fill(1.0,num_free_dofs(Ph)) - w_sums = compute_partial_sums(Ph,Vh,w) + w_sums = zeros(num_free_dofs(Vh)) + inject!(w_sums,Ph,w,Fill(1.0,num_free_dofs(Vh))) return w, w_sums end -function compute_partial_sums(Ph::PatchFESpace,Vh,x) - x_sums = zeros(num_free_dofs(Vh)) - inject!(x_sums,Ph,x,Fill(1.0,num_free_dofs(Ph))) - return x_sums -end +function compute_weight_operators!(Ph::PatchFESpace,Vh,w,w_sums) + fill!(w,1.0) + inject!(w_sums,Ph,w,Fill(1.0,num_free_dofs(Ph))) +end \ No newline at end of file