Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Distributed Patch based smoothers #7

Merged
merged 17 commits into from
Mar 3, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion Project.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
authors = ["Santiago Badia <[email protected]>", "Jordi Manyer <[email protected]>", "Alberto F. Martin <[email protected]>", "Javier Principe <[email protected]>"]
name = "GridapSolvers"
uuid = "6d3209ee-5e3c-4db7-a716-942eb12ed534"
authors = ["Santiago Badia <[email protected]>", "Jordi Manyer <[email protected]>", "Alberto F. Martin <[email protected]>", "Javier Principe <[email protected]>"]
version = "0.1.0"

[deps]
Expand All @@ -23,6 +23,7 @@ PartitionedArrays = "0.2.15"
julia = "1.7"

[extras]
MPIPreferences = "3da0fdf6-3ccc-4f1b-acd9-58baa6c99267"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"

[targets]
Expand Down
13 changes: 9 additions & 4 deletions src/MultilevelTools/DistributedGridTransferOperators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode::
model_h = get_model_before_redist(mh,lev)
Uh = get_fe_space_before_redist(sh,lev)
Ωh = Triangulation(model_h)
fv_h = PVector(0.0,Uh.gids)
fv_h = zero_free_values(Uh)
dv_h = (mode == :solution) ? get_dirichlet_dof_values(Uh) : zero_dirichlet_values(Uh)

model_H = get_model(mh,lev+1)
Expand All @@ -92,10 +92,15 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode::
aH(u,v) = ∫(v⋅u)*dΩH
lH(v,uh) = ∫(v⋅uh)*dΩhH
assem = SparseMatrixAssembler(UH,VH)

u_dir = (mode == :solution) ? interpolate(0.0,UH) : interpolate_everywhere(0.0,UH)

fv_H = zero_free_values(UH)
dv_H = zero_dirichlet_values(UH)
u0 = FEFunction(UH,fv_H,true) # Zero at free dofs
u00 = FEFunction(UH,fv_H,dv_H,true) # Zero everywhere

u_dir = (mode == :solution) ? u0 : u00
u,v = get_trial_fe_basis(UH), get_fe_basis(VH)
data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,0.0),u_dir)
data = collect_cell_matrix_and_vector(UH,VH,aH(u,v),lH(v,u00),u_dir)
AH,bH0 = assemble_matrix_and_vector(assem,data)
xH = PVector(0.0,AH.cols)
bH = copy(bH0)
Expand Down
4 changes: 4 additions & 0 deletions src/MultilevelTools/GridapDistributedExtensions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,10 @@ function Gridap.CellData.Measure(tt::GridapDistributed.DistributedTriangulation{
return GridapDistributed.DistributedMeasure(measures)
end

function GridapDistributed.get_parts(x::GridapDistributed.DistributedFESpace)
PartitionedArrays.get_part_ids(local_views(x))
end

# change_parts

function change_parts(x::Union{AbstractPData,Nothing}, new_parts; default=nothing)
Expand Down
2 changes: 2 additions & 0 deletions src/PatchBasedSmoothers/PatchBasedSmoothers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@ using Gridap.FESpaces
using PartitionedArrays
using GridapDistributed

using GridapSolvers.MultilevelTools

export PatchDecomposition
export PatchFESpace
export PatchBasedLinearSolver
Expand Down
71 changes: 61 additions & 10 deletions src/PatchBasedSmoothers/mpi/PatchDecompositions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,30 +4,81 @@ struct DistributedPatchDecomposition{Dc,Dp,A,B} <: GridapType
model::B
end

GridapDistributed.local_views(a::DistributedPatchDecomposition) = a.patch_decompositions

function PatchDecomposition(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp};
Dr=0,
patch_boundary_style::PatchBoundaryStyle=PatchBoundaryExclude()) where {Dc,Dp}
patch_decompositions=map_parts(model.models) do lmodel
mark_interface_facets!(model)
patch_decompositions = map_parts(local_views(model)) do lmodel
PatchDecomposition(lmodel;
Dr=Dr,
patch_boundary_style=patch_boundary_style)
patch_boundary_style=patch_boundary_style,
boundary_tag_names=["boundary","interface"])
end
A=typeof(patch_decompositions)
B=typeof(model)
DistributedPatchDecomposition{Dc,Dp,A,B}(patch_decompositions,model)
A = typeof(patch_decompositions)
B = typeof(model)
return DistributedPatchDecomposition{Dc,Dp,A,B}(patch_decompositions,model)
end

function PatchDecomposition(mh::ModelHierarchy;kwargs...)
nlevs = num_levels(mh)
decompositions = Vector{DistributedPatchDecomposition}(undef,nlevs-1)
for lev in 1:nlevs-1
parts = get_level_parts(mh,lev)
if i_am_in(parts)
model = get_model(mh,lev)
decompositions[lev] = PatchDecomposition(model;kwargs...)
end
end
return decompositions
end

function Gridap.Geometry.Triangulation(a::DistributedPatchDecomposition)
trians=map_parts(a.patch_decompositions) do a
trians = map_parts(a.patch_decompositions) do a
Triangulation(a)
end
GridapDistributed.DistributedTriangulation(trians,a.model)
return GridapDistributed.DistributedTriangulation(trians,a.model)
end

function get_patch_root_dim(a::DistributedPatchDecomposition)
patch_root_dim=0
patch_root_dim = -1
map_parts(a.patch_decompositions) do patch_decomposition
patch_root_dim=patch_decomposition.Dr
patch_root_dim = patch_decomposition.Dr
end
return patch_root_dim
end

function mark_interface_facets!(model::GridapDistributed.DistributedDiscreteModel{Dc,Dp}) where {Dc,Dp}
face_labeling = get_face_labeling(model)
topo = get_grid_topology(model)

map_parts(local_views(face_labeling),local_views(topo)) do face_labeling, topo
tag_to_name = face_labeling.tag_to_name
tag_to_entities = face_labeling.tag_to_entities
d_to_dface_to_entity = face_labeling.d_to_dface_to_entity

# Create new tag & entity
interface_entity = maximum(map(maximum,tag_to_entities)) + 1
push!(tag_to_entities,[interface_entity])
push!(tag_to_name,"interface")

# Interface faces should also be interior
interior_tag = findfirst(x->(x=="interior"),tag_to_name)
push!(tag_to_entities[interior_tag],interface_entity)

# Select interface entities
boundary_tag = findfirst(x->(x=="boundary"),tag_to_name)
boundary_entities = tag_to_entities[boundary_tag]

f2c_map = Geometry.get_faces(topo,Dc-1,Dc)
num_cells_around_facet = map(length,f2c_map)
mx = maximum(num_cells_around_facet)
for (f,nf) in enumerate(num_cells_around_facet)
is_boundary = (d_to_dface_to_entity[Dc][f] ∈ boundary_entities)
if !is_boundary && (nf != mx)
d_to_dface_to_entity[Dc][f] = interface_entity
end
end
end
patch_root_dim
end
77 changes: 54 additions & 23 deletions src/PatchBasedSmoothers/mpi/PatchFESpaces.jl
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
# Rationale behind distributed PatchFESpace:
# 1. Patches have an owner. Only owners compute subspace correction.
# If am not owner of a patch, all dofs in my patch become -1.
# If am not owner of a patch, all dofs in my patch become -1. [DONE]
# 2. Subspace correction on an owned patch may affect DoFs which
# are non-owned. These corrections should be sent to the owner
# process. I.e., NO -> O (reversed) communication. [PENDING]
Expand All @@ -12,7 +12,7 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel,
conformity::Gridap.FESpaces.Conformity,
patch_decomposition::DistributedPatchDecomposition,
Vh::GridapDistributed.DistributedSingleFieldFESpace)
root_gids=get_face_gids(model,get_patch_root_dim(patch_decomposition))
root_gids = get_face_gids(model,get_patch_root_dim(patch_decomposition))

function f(model,patch_decomposition,Vh,partition)
patches_mask = fill(false,length(partition.lid_to_gid))
Expand All @@ -26,12 +26,12 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel,
end

spaces = map_parts(f,
model.models,
patch_decomposition.patch_decompositions,
Vh.spaces,
local_views(model),
local_views(patch_decomposition),
local_views(Vh),
root_gids.partition)

parts = get_part_ids(model.models)
parts = get_parts(model)
nodofs = map_parts(spaces) do space
num_free_dofs(space)
end
Expand All @@ -43,38 +43,69 @@ function PatchFESpace(model::GridapDistributed.DistributedDiscreteModel,
return GridapDistributed.DistributedSingleFieldFESpace(spaces,gids,get_vector_type(Vh))
end

function PatchFESpace(mh::ModelHierarchy,
reffe::Tuple{<:Gridap.FESpaces.ReferenceFEName,Any,Any},
conformity::Gridap.FESpaces.Conformity,
patch_decompositions::AbstractArray{<:DistributedPatchDecomposition},
sh::FESpaceHierarchy)
nlevs = num_levels(mh)
levels = Vector{MultilevelTools.FESpaceHierarchyLevel}(undef,nlevs)
for lev in 1:nlevs-1
parts = get_level_parts(mh,lev)
if i_am_in(parts)
model = get_model(mh,lev)
space = MultilevelTools.get_fe_space(sh,lev)
decomp = patch_decompositions[lev]
patch_space = PatchFESpace(model,reffe,conformity,decomp,space)
levels[lev] = MultilevelTools.FESpaceHierarchyLevel(lev,nothing,patch_space)
end
end
return FESpaceHierarchy(mh,levels)
end

# x \in PatchFESpace
# y \in SingleFESpace
function prolongate!(x::PVector,
Ph::GridapDistributed.DistributedSingleFieldFESpace,
y::PVector)
parts=get_part_ids(x.owned_values)
Gridap.Helpers.@notimplementedif num_parts(parts)!=1

map_parts(x.owned_values,Ph.spaces,y.owned_values) do x,Ph,y
map_parts(x.values,Ph.spaces,y.values) do x,Ph,y
prolongate!(x,Ph,y)
end
exchange!(x)
end

# x \in SingleFESpace
# y \in PatchFESpace
function inject!(x::PVector,
Ph::GridapDistributed.DistributedSingleFieldFESpace,
y::PVector,
w::PVector)
parts = get_part_ids(x.owned_values)
Gridap.Helpers.@notimplementedif num_parts(parts)!=1

map_parts(x.owned_values,Ph.spaces,y.owned_values,w.owned_values) do x,Ph,y,w
inject!(x,Ph,y,w)
w::PVector,
w_sums::PVector)

#exchange!(y)
map_parts(x.values,Ph.spaces,y.values,w.values,w_sums.values) do x,Ph,y,w,w_sums
inject!(x,Ph,y,w,w_sums)
end

# Exchange local contributions
assemble!(x)
exchange!(x) # TO CONSIDER: Is this necessary? Do we need ghosts for later?
return x
end

function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace)
parts = get_part_ids(Ph.spaces)
Gridap.Helpers.@notimplementedif num_parts(parts) != 1

function compute_weight_operators(Ph::GridapDistributed.DistributedSingleFieldFESpace,Vh)
# Local weights and partial sums
w = PVector(0.0,Ph.gids)
map_parts(w.owned_values,Ph.spaces) do w, Ph
w .= compute_weight_operators(Ph)
w_sums = PVector(0.0,Vh.gids)
map_parts(w.values,w_sums.values,Ph.spaces) do w, w_sums, Ph
_w, _w_sums = compute_weight_operators(Ph,Ph.Vh)
w .= _w
w_sums .= _w_sums
end
return w

# partial sums -> global sums
assemble!(w_sums) # ghost -> owners
exchange!(w_sums) # repopulate ghosts with owner info

return w, w_sums
end
93 changes: 71 additions & 22 deletions src/PatchBasedSmoothers/seq/PatchBasedLinearSolvers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,9 +12,10 @@
# (not 100% sure, to investigate)


struct PatchBasedLinearSolver{A} <: Gridap.Algebra.LinearSolver
struct PatchBasedLinearSolver{A,B} <: Gridap.Algebra.LinearSolver
bilinear_form :: Function
Ph :: A
Vh :: B
M :: Gridap.Algebra.LinearSolver
end

Expand All @@ -23,29 +24,52 @@ struct PatchBasedSymbolicSetup <: Gridap.Algebra.SymbolicSetup
end

function Gridap.Algebra.symbolic_setup(ls::PatchBasedLinearSolver,mat::AbstractMatrix)
PatchBasedSymbolicSetup(ls)
return PatchBasedSymbolicSetup(ls)
end

struct PatchBasedSmootherNumericalSetup{A,B,C,D,E} <: Gridap.Algebra.NumericalSetup
solver :: PatchBasedLinearSolver
Ap :: A
nsAp :: B
rp :: C
dxp :: D
w :: E
struct PatchBasedSmootherNumericalSetup{A,B,C,D} <: Gridap.Algebra.NumericalSetup
solver :: PatchBasedLinearSolver
Ap :: A
Ap_ns :: B
weights :: C
caches :: D
end

function Gridap.Algebra.numerical_setup(ss::PatchBasedSymbolicSetup,A::AbstractMatrix)
Ph = ss.solver.Ph
Ph, Vh = ss.solver.Ph, ss.solver.Vh
weights = compute_weight_operators(Ph,Vh)

# Assemble patch system
assembler = SparseMatrixAssembler(Ph,Ph)
Ap = assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph)
solver = ss.solver.M
ssAp = symbolic_setup(solver,Ap)
nsAp = numerical_setup(ssAp,Ap)
rp = _allocate_row_vector(Ap)
dxp = _allocate_col_vector(Ap)
w = compute_weight_operators(Ph)
PatchBasedSmootherNumericalSetup(ss.solver,Ap,nsAp,rp,dxp,w)
Ap = assemble_matrix(ss.solver.bilinear_form,assembler,Ph,Ph)

# Patch system solver
Ap_solver = ss.solver.M
Ap_ss = symbolic_setup(Ap_solver,Ap)
Ap_ns = numerical_setup(Ap_ss,Ap)

# Caches
caches = _patch_based_solver_caches(Ph,Vh,Ap)

return PatchBasedSmootherNumericalSetup(ss.solver,Ap,Ap_ns,weights,caches)
end

function _patch_based_solver_caches(Ph::PatchFESpace,Vh::FESpace,Ap::AbstractMatrix)
rp = _allocate_row_vector(Ap)
dxp = _allocate_col_vector(Ap)
return rp, dxp
end

function _patch_based_solver_caches(Ph::GridapDistributed.DistributedSingleFieldFESpace,
Vh::GridapDistributed.DistributedSingleFieldFESpace,
Ap::PSparseMatrix)
rp_mat = _allocate_row_vector(Ap)
dxp_mat = _allocate_col_vector(Ap)
rp = PVector(0.0,Ph.gids)
dxp = PVector(0.0,Ph.gids)
r = PVector(0.0,Vh.gids)
x = PVector(0.0,Vh.gids)
return rp_mat, dxp_mat, rp, dxp, r, x
end

function _allocate_col_vector(A::AbstractMatrix)
Expand All @@ -69,9 +93,34 @@ function Gridap.Algebra.numerical_setup!(ns::PatchBasedSmootherNumericalSetup, A
end

function Gridap.Algebra.solve!(x::AbstractVector,ns::PatchBasedSmootherNumericalSetup,r::AbstractVector)
Ap, nsAp, rp, dxp, w = ns.Ap, ns.nsAp, ns.rp, ns.dxp, ns.w
Ap_ns, weights, caches = ns.Ap_ns, ns.weights, ns.caches

Ph = ns.solver.Ph
w, w_sums = weights
rp, dxp = caches

prolongate!(rp,Ph,r)
solve!(dxp,Ap_ns,rp)
inject!(x,Ph,dxp,w,w_sums)

return x
end

function Gridap.Algebra.solve!(x_mat::PVector,ns::PatchBasedSmootherNumericalSetup,r_mat::PVector)
Ap_ns, weights, caches = ns.Ap_ns, ns.weights, ns.caches

Ph = ns.solver.Ph
w, w_sums = weights
rp_mat, dxp_mat, rp, dxp, r, x = caches

copy!(r,r_mat)
exchange!(r)
prolongate!(rp,Ph,r)
copy!(rp_mat,rp)
solve!(dxp_mat,Ap_ns,rp_mat)
copy!(dxp,dxp_mat)
inject!(x,Ph,dxp,w,w_sums)
copy!(x_mat,x)

prolongate!(rp,ns.solver.Ph,r)
solve!(dxp,nsAp,rp)
inject!(x,ns.solver.Ph,dxp,w)
return x_mat
end
Loading