Skip to content

Commit

Permalink
On the other hand, the current implementation is such that:
Browse files Browse the repository at this point in the history
  • Loading branch information
amartinhuertas committed Jun 22, 2020
1 parent 7b265f1 commit e98d5f4
Show file tree
Hide file tree
Showing 4 changed files with 135 additions and 53 deletions.
39 changes: 31 additions & 8 deletions src/DistributedFESpaces.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,16 +21,26 @@ end

function Gridap.FESpaces.FEFunction(dV::DistributedFESpace,x)
dfree_vals = x[dV.gids]
# IMPORTANT NOTE: we need to call collect below in order to duplicate the
# local portion of dfree_vals. When dfree_vals is of
# type MPIPETScDistributedVector, the Julia's GC can destroy
# the vector on which the entries of dfree_vals are ultimately
# stored when it goes out of scope.
funs = DistributedData(dV.spaces,dfree_vals) do part, V, free_vals
FEFunction(V,free_vals)
FEFunction(V,collect(free_vals))
end
DistributedFEFunction(funs,x,dV)
end

function Gridap.FESpaces.EvaluationFunction(dV::DistributedFESpace,x)
dfree_vals = x[dV.gids]
# IMPORTANT NOTE: we need to call collect below in order to duplicate the
# local portion of dfree_vals. When dfree_vals is of
# type MPIPETScDistributedVector, the Julia's GC can destroy
# the vector on which the entries of dfree_vals are ultimately
# stored when it goes out of scope.
funs = DistributedData(dV.spaces,dfree_vals) do part, V, free_vals
Gridap.FESpaces.EvaluationFunction(V,free_vals)
Gridap.FESpaces.EvaluationFunction(V,collect(free_vals))
end
DistributedFEFunction(funs,x,dV)
end
Expand Down Expand Up @@ -133,15 +143,26 @@ function DistributedFESpace(::Type{V}; model::DistributedDiscreteModel,kwargs...
ngids=lngids
end

function init_cell_to_owners(part,lspace,lid_to_owner)
num_dofs_x_cell = 0
do_on_parts(comm,spaces) do part, lspace
cell_dofs=get_cell_dofs(lspace)
num_dofs_x_cell=length(cell_dofs[1])
end

function init_cell_to_owners(part,cell_to_owners,lspace,lid_to_owner)
cell_to_lids = get_cell_dofs(lspace)
dlid_to_zero = zeros(eltype(lid_to_owner),num_dirichlet_dofs(lspace))
cell_to_owners = collect(LocalToGlobalPosNegArray(cell_to_lids,lid_to_owner,dlid_to_zero))
cell_to_owners
cell_to_owners_from = LocalToGlobalPosNegArray(cell_to_lids,lid_to_owner,dlid_to_zero)
for i=1:length(cell_to_owners_from)
for j=1:length(cell_to_owners_from[i])
cell_to_owners[i][j]=cell_to_owners_from[i][j]
end
end
end

part_to_cell_to_owners = DistributedVector{Vector{Int}}(
init_cell_to_owners,model.gids,spaces,part_to_lid_to_owner)
part_to_cell_to_owners = DistributedVector{Vector{Int}}(model.gids, num_dofs_x_cell)

do_on_parts(init_cell_to_owners,part_to_cell_to_owners,spaces,part_to_lid_to_owner)

exchange!(part_to_cell_to_owners)

Expand All @@ -162,7 +183,9 @@ function DistributedFESpace(::Type{V}; model::DistributedDiscreteModel,kwargs...
init_lid_to_gids,comm,part_to_lid_to_owner,offsets)

part_to_cell_to_gids = DistributedVector{Vector{Int}}(
init_cell_to_owners,model.gids,spaces,part_to_lid_to_gid)
model.gids,num_dofs_x_cell)

do_on_parts(init_cell_to_owners,part_to_cell_to_gids,spaces,part_to_lid_to_gid)

exchange!(part_to_cell_to_gids)

Expand Down
105 changes: 69 additions & 36 deletions src/MPIPETScDistributedVectors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,52 +24,83 @@ get_part(
a::PETSc.Vec{Float64},
part::Integer) = a

function DistributedVector(
initializer::Function, indices::MPIPETScDistributedIndexSet, args...)
comm = get_comm(indices)
data = DistributedData(initializer, comm, args...)
part = data.part
if (eltype(part) <: Number)
indices,vecghost = _create_eltype_number_indices_ghost(part,indices)
else
@assert eltype(part) <: AbstractVector{<:Number}
indices,vecghost = _create_eltype_vector_number_indices_ghost(part,indices)
end
# function DistributedVector(
# initializer::Function, indices::MPIPETScDistributedIndexSet, args...)
# comm = get_comm(indices)
# data = DistributedData(initializer, comm, args...)
# part = data.part
# if (eltype(part) <: Number)
# indices,vecghost = _create_eltype_number_indices_ghost(part,indices)
# else
# @assert eltype(part) <: AbstractVector{<:Number}
# indices,vecghost = _create_eltype_vector_number_indices_ghost(part,indices)
# end
# MPIPETScDistributedVector(part,indices,vecghost)
# end
#
# function DistributedVector{T}(
# initializer::Function, indices::MPIPETScDistributedIndexSet, args...) where T <: Union{Number,AbstractVector{<:Number}}
# comm = get_comm(indices)
# data = DistributedData(initializer, comm, args...)
# part = data.part
# if (T <: Number)
# indices,vecghost = _create_eltype_number_indices_ghost(part,indices)
# else
# indices,vecghost = _create_eltype_vector_number_indices_ghost(part,indices)
# end
# MPIPETScDistributedVector(part,indices,vecghost)
# end

function DistributedVector{T}(
indices::MPIPETScDistributedIndexSet) where T <: Number
indices,vecghost = _create_eltype_number_indices_ghost(T,indices)
lvecghost = PETSc.LocalVector(vecghost, length(indices.parts.part.lid_to_owner))
a_reint=reinterpret(T,lvecghost.a)
part=reindex(a_reint,indices.app_to_petsc_locidx)
PETSc.restore(lvecghost)
MPIPETScDistributedVector(part,indices,vecghost)
end

function DistributedVector{T}(
initializer::Function, indices::MPIPETScDistributedIndexSet, args...) where T <: Union{Number,AbstractVector{<:Number}}
comm = get_comm(indices)
data = DistributedData(initializer, comm, args...)
part = data.part
if (T <: Number)
indices,vecghost = _create_eltype_number_indices_ghost(part,indices)
else
indices,vecghost = _create_eltype_vector_number_indices_ghost(part,indices)
indices::MPIPETScDistributedIndexSet, length_entry :: Int ) where T <: AbstractVector{<:Number}
num_entries = length(indices.parts.part.lid_to_owner)
block_indices = indices
indices,vecghost = _create_eltype_vector_number_indices_ghost(T,length_entry,indices)
lvecghost = PETSc.LocalVector(vecghost, length(indices.parts.part.lid_to_owner))
a_reint=reinterpret(eltype(T), lvecghost.a)
ptrs=Vector{Int32}(undef,num_entries+1)
ptrs[1]=1
for i=1:num_entries
ptrs[i+1]=ptrs[i]+length_entry
end
TSUB=SubArray{eltype(T),1,typeof(a_reint),Tuple{UnitRange{Int64}},true}
part=TSUB[ view(a_reint,ptrs[i]:ptrs[i+1]-1) for i=1:num_entries ]
part=reindex(part,block_indices.app_to_petsc_locidx)
PETSc.restore(lvecghost)
MPIPETScDistributedVector(part,indices,vecghost)
end


function _create_eltype_number_indices_ghost(
part::Vector{T},
eltype::Type{T},
indices::MPIPETScDistributedIndexSet,
) where {T<:Number}
@assert sizeof(eltype(part)) == sizeof(Float64)
@assert length(part) == length(indices.parts.part.lid_to_owner)
@assert sizeof(T) == sizeof(Float64)
vecghost = create_ghost_vector(indices)
indices, vecghost
end

function _create_eltype_vector_number_indices_ghost(
local_part::Vector{T},
eltype::Type{T},
length_entry::Int,
indices::MPIPETScDistributedIndexSet,
) where {T<:AbstractVector{<:Number}}
) where T<:AbstractVector{<:Number}

@assert sizeof(eltype(T)) == sizeof(Float64)
#println(T)
#println(eltype(T))
#@assert sizeof(eltype(T)) == sizeof(Float64)

l = length(local_part[1])
l = length_entry
n = l * indices.ngids

indices = DistributedIndexSet(get_comm(indices),n,indices,l,n) do part, indices, l, n
Expand Down Expand Up @@ -98,12 +129,9 @@ function unpack_all_entries!(a::MPIPETScDistributedVector{T}) where T
end

function Base.getindex(a::PETSc.Vec{Float64},indices::MPIPETScDistributedIndexSet)
result= DistributedVector(indices,indices) do part, indices
local_part=Vector{Float64}(undef,length(indices.lid_to_owner))
end
result= DistributedVector{Float64}(indices)
copy!(result.vecghost,a)
exchange!(result.vecghost)
unpack_all_entries!(result)
result
end

Expand All @@ -122,15 +150,15 @@ function exchange!(a::MPIPETScDistributedVector{T}) where T
comm_rank = MPI.Comm_rank(comm.comm)

# Pack data
_pack_local_entries!(a.vecghost, local_part, lid_to_owner, comm_rank)
#_pack_local_entries!(a.vecghost, local_part, lid_to_owner, comm_rank)

exchange!(a.vecghost)

# Unpack data
num_local = length(lid_to_owner)
lvec = PETSc.LocalVector(a.vecghost,num_local)
_unpack_ghost_entries!(eltype(local_part), local_part, lid_to_owner, comm_rank, app_to_petsc_locidx, lvec)
PETSc.restore(lvec)
# Unpack data
# num_local = length(lid_to_owner)
# lvec = PETSc.LocalVector(a.vecghost,num_local)
# _unpack_ghost_entries!(eltype(local_part), local_part, lid_to_owner, comm_rank, app_to_petsc_locidx, lvec)
# PETSc.restore(lvec)
end

function _pack_local_entries!(vecghost, local_part, lid_to_owner, comm_rank)
Expand Down Expand Up @@ -235,3 +263,8 @@ function _unpack_all_entries!(
end
end
end

function Base.setindex!(a::Gridap.Arrays.Reindexed,v,j::Integer)
i = a.j_to_i[j]
a.i_to_v[i]=v
end
24 changes: 24 additions & 0 deletions src/SequentialDistributedVectors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,30 @@ function DistributedVector(
SequentialDistributedVector(parts,indices)
end

function DistributedVector{T}(
indices::SequentialDistributedIndexSet) where T <: Number
comm = get_comm(indices)
data = DistributedData(comm,indices) do part, lindices
Vector{T}(undef, length(lindices.lid_to_owner))
end
parts = data.parts
SequentialDistributedVector(parts,indices)
end

function DistributedVector{T}(
indices::SequentialDistributedIndexSet, length_entry :: Int ) where T <: AbstractVector{<:Number}
comm = get_comm(indices)
data = DistributedData(comm,indices) do part, lindices
Vector{eltype(T)}[ Vector{eltype(T)}(undef,length_entry) for i=1:length(lindices.lid_to_owner) ]
end
parts = data.parts
SequentialDistributedVector(parts,indices)
end





function Base.getindex(a::SequentialDistributedVector,indices::SequentialDistributedIndexSet)
@notimplementedif a.indices !== indices
exchange!(a)
Expand Down
20 changes: 11 additions & 9 deletions test/MPIPETScDistributedVectors.jl
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ module MPIPETScDistributedVectors
using GridapDistributed
using Test
using MPI
using PETSc

comm = MPIPETScCommunicator()
@test num_parts(comm) == 2
Expand All @@ -16,31 +17,32 @@ indices = DistributedIndexSet(comm,n) do part
end
end

vec = DistributedVector(indices,indices) do part, indices
local_part=Vector{Int64}(undef,length(indices.lid_to_owner))
vec = DistributedVector{Int64}(indices)
do_on_parts(vec,indices) do part, local_part, indices
for i=1:length(local_part)
if (indices.lid_to_owner[i] == part)
local_part[i]=part
end
end
local_part
end
exchange!(vec)
@test vec.part == indices.parts.part.lid_to_owner

vec = DistributedVector(indices,indices) do part, indices
local_part=Vector{Vector{Int64}}(undef,length(indices.lid_to_owner))
vec = DistributedVector{Vector{Int64}}(indices,4)
do_on_parts(vec,indices) do part, local_part, indices
for i=1:length(local_part)
if (indices.lid_to_owner[i] == part)
local_part[i]=[part for j=1:4]
for j=1:length(local_part[i])
local_part[i][j] = part
end
else
local_part[i]=[0 for j=1:4]
for j=1:length(local_part[i])
local_part[i][j] = 0
end
end
end
local_part
end
exchange!(vec)

test_result = true
for i = 1:length(vec.part)
for j = 1:length(vec.part[i])
Expand Down

0 comments on commit e98d5f4

Please sign in to comment.