Skip to content

Commit

Permalink
Merge pull request #45 from gridap/residual-projectors
Browse files Browse the repository at this point in the history
Residual projectors
  • Loading branch information
JordiManyer authored Nov 20, 2023
2 parents 1f1c6d3 + 2beee76 commit b7b50a4
Show file tree
Hide file tree
Showing 15 changed files with 215 additions and 115 deletions.
21 changes: 17 additions & 4 deletions src/LinearSolvers/GMGLinearSolvers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,8 @@ struct GMGNumericalSetup{A,B,C,D,E} <: Gridap.Algebra.NumericalSetup
else
post_smoothers_caches = pre_smoothers_caches
end
coarsest_solver_cache = setup_coarsest_solver_cache(mh,coarsest_solver,smatrices)
#coarsest_solver_cache = setup_coarsest_solver_cache(mh,coarsest_solver,smatrices)
coarsest_solver_cache = coarse_solver_caches(mh,coarsest_solver,smatrices)

A = typeof(finest_level_cache)
B = typeof(pre_smoothers_caches)
Expand Down Expand Up @@ -109,6 +110,17 @@ function setup_smoothers_caches(mh::ModelHierarchy,smoothers::AbstractVector{<:L
return caches
end

function coarse_solver_caches(mh,s,mats)
cache = nothing
nlevs = num_levels(mh)
parts = get_level_parts(mh,nlevs)
if i_am_in(parts)
mat = mats[nlevs]
cache = numerical_setup(symbolic_setup(s, mat), mat)
end
return cache
end

function setup_coarsest_solver_cache(mh::ModelHierarchy,coarsest_solver::LinearSolver,smatrices::Vector{<:AbstractMatrix})
cache = nothing
nlevs = num_levels(mh)
Expand Down Expand Up @@ -214,9 +226,10 @@ function apply_GMG_level!(lev::Integer,xh::Union{PVector,Nothing},rh::Union{PVec
if i_am_in(parts)
if (lev == num_levels(mh))
## Coarsest level
coarsest_solver = ns.solver.coarsest_solver
coarsest_solver_cache = ns.coarsest_solver_cache
solve_coarsest_level!(parts,coarsest_solver,xh,rh,coarsest_solver_cache)
#coarsest_solver = ns.solver.coarsest_solver
#coarsest_solver_cache = ns.coarsest_solver_cache
#solve_coarsest_level!(parts,coarsest_solver,xh,rh,coarsest_solver_cache)
solve!(xh, ns.coarsest_solver_cache, rh)
else
## General case
Ah = ns.solver.smatrices[lev]
Expand Down
12 changes: 6 additions & 6 deletions src/LinearSolvers/RichardsonSmoothers.jl
Original file line number Diff line number Diff line change
Expand Up @@ -48,12 +48,12 @@ function Gridap.Algebra.solve!(x::AbstractVector,ns::RichardsonSmootherNumerical

iter = 1
while iter <= ns.smoother.num_smooth_steps
solve!(dx,Mns,r)
dx .= ns.smoother.damping_factor .* dx
x .= x .+ dx
mul!(Adx, ns.A, dx)
r .= r .- Adx
iter += 1
solve!(dx,Mns,r)
dx .= ns.smoother.damping_factor .* dx
x .= x .+ dx
mul!(Adx, ns.A, dx)
r .= r .- Adx
iter += 1
end
end

Expand Down
29 changes: 0 additions & 29 deletions src/MultilevelTools/Algebra.jl

This file was deleted.

90 changes: 84 additions & 6 deletions src/MultilevelTools/DistributedGridTransferOperators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,8 @@ function ProlongationOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int;kwargs.
end

function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::Int,op_type::Symbol;
mode::Symbol=:solution,restriction_method::Symbol=:projection)
mode::Symbol=:solution,restriction_method::Symbol=:projection,
solver=LUSolver())
mh = sh.mh
@check lev < num_levels(mh)
@check op_type [:restriction, :prolongation]
Expand All @@ -35,13 +36,16 @@ function DistributedGridTransferOperator(lev::Int,sh::FESpaceHierarchy,qdegree::
# Refinement
if (op_type == :prolongation) || (restriction_method [:interpolation,:dof_mask])
cache_refine = _get_interpolation_cache(lev,sh,qdegree,mode)
else
elseif mode == :solution
cache_refine = _get_projection_cache(lev,sh,qdegree,mode)
else
cache_refine = _get_dual_projection_cache(lev,sh,qdegree,solver)
restriction_method = :dual_projection
end

# Redistribution
redist = has_redistribution(mh,lev)
cache_redist = _get_redistribution_cache(lev,sh,mode,op_type,cache_refine)
cache_redist = _get_redistribution_cache(lev,sh,mode,op_type,restriction_method,cache_refine)

cache = cache_refine, cache_redist
return DistributedGridTransferOperator(op_type,redist,restriction_method,sh,cache)
Expand Down Expand Up @@ -115,7 +119,38 @@ function _get_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,mode::
return cache_refine
end

function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol,op_type::Symbol,cache_refine)
function _get_dual_projection_cache(lev::Int,sh::FESpaceHierarchy,qdegree::Int,solver)
mh = sh.mh
cparts = get_level_parts(mh,lev+1)

if i_am_in(cparts)
model_h = get_model_before_redist(mh,lev)
Uh = get_fe_space_before_redist(sh,lev)
Ωh = Triangulation(model_h)
dΩh = Measure(Ωh,qdegree)
uh = FEFunction(Uh,zero_free_values(Uh),zero_dirichlet_values(Uh))

model_H = get_model(mh,lev+1)
UH = get_fe_space(sh,lev+1)
ΩH = Triangulation(model_H)
dΩhH = Measure(ΩH,Ωh,qdegree)

Mh = assemble_matrix((u,v)->(vu)*dΩh,Uh,Uh)
Mh_ns = numerical_setup(symbolic_setup(solver,Mh),Mh)

assem = SparseMatrixAssembler(UH,UH)
rh = allocate_col_vector(Mh)
cache_refine = model_h, Uh, UH, Mh_ns, rh, uh, assem, dΩhH
else
model_h = get_model_before_redist(mh,lev)
Uh = get_fe_space_before_redist(sh,lev)
cache_refine = model_h, Uh, nothing, nothing, nothing, nothing, nothing, nothing
end

return cache_refine
end

function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol,op_type::Symbol,restriction_method::Symbol,cache_refine)
mh = sh.mh
redist = has_redistribution(mh,lev)
if !redist
Expand All @@ -130,10 +165,14 @@ function _get_redistribution_cache(lev::Int,sh::FESpaceHierarchy,mode::Symbol,op
glue = mh.levels[lev].red_glue

if op_type == :prolongation
model_h, Uh, fv_h, dv_h = cache_refine
model_h, Uh, fv_h, dv_h, UH, fv_H, dv_H = cache_refine
cache_exchange = get_redistribute_free_values_cache(fv_h_red,Uh_red,fv_h,dv_h,Uh,model_h_red,glue;reverse=false)
elseif restriction_method == :projection
model_h, Uh, fv_h, dv_h, VH, AH, lH, xH, bH, bH0, assem = cache_refine
cache_exchange = get_redistribute_free_values_cache(fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true)
else
model_h, Uh, fv_h, dv_h = cache_refine
model_h, Uh, UH, Mh, rh, uh, assem, dΩhH = cache_refine
fv_h = isa(uh,Nothing) ? nothing : get_free_dof_values(uh)
cache_exchange = get_redistribute_free_values_cache(fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true)
end

Expand Down Expand Up @@ -307,4 +346,43 @@ function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransfer
end

return y
end

###############################################################

function LinearAlgebra.mul!(y::PVector,A::DistributedGridTransferOperator{Val{:restriction},Val{false},Val{:dual_projection}},x::PVector)
cache_refine, cache_redist = A.cache
model_h, Uh, VH, Mh_ns, rh, uh, assem, dΩhH = cache_refine
fv_h = get_free_dof_values(uh)

solve!(rh,Mh_ns,x)
copy!(fv_h,rh)
consistent!(fv_h) |> fetch
v = get_fe_basis(VH)
assemble_vector!(y,assem,collect_cell_vector(VH,(vuh)*dΩhH))

return y
end

function LinearAlgebra.mul!(y::Union{PVector,Nothing},A::DistributedGridTransferOperator{Val{:restriction},Val{true},Val{:dual_projection}},x::PVector)
cache_refine, cache_redist = A.cache
model_h, Uh, VH, Mh_ns, rh, uh, assem, dΩhH = cache_refine
fv_h_red, dv_h_red, Uh_red, model_h_red, glue, cache_exchange = cache_redist
fv_h = isa(uh,Nothing) ? nothing : get_free_dof_values(uh)

# 1 - Redistribute from fine partition to coarse partition
copy!(fv_h_red,x)
consistent!(fv_h_red) |> fetch
redistribute_free_values!(cache_exchange,fv_h,Uh,fv_h_red,dv_h_red,Uh_red,model_h,glue;reverse=true)

# 2 - Solve f2c projection coarse partition
if !isa(y,Nothing)
solve!(rh,Mh_ns,fv_h)
copy!(fv_h,rh)
consistent!(fv_h) |> fetch
v = get_fe_basis(VH)
assemble_vector!(y,assem,collect_cell_vector(VH,(vuh)*dΩhH))
end

return y
end
2 changes: 0 additions & 2 deletions src/MultilevelTools/MultilevelTools.jl
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ using GridapDistributed: redistribute_fe_function
using GridapDistributed: get_old_and_new_parts
using GridapDistributed: generate_subparts, local_views

export allocate_col_vector, allocate_row_vector
export change_parts, num_parts, i_am_in
export generate_level_parts, generate_subparts

Expand All @@ -38,7 +37,6 @@ export RestrictionOperator, ProlongationOperator
export setup_transfer_operators
export mul!

include("Algebra.jl")
include("SubpartitioningTools.jl")
include("GridapFixes.jl")
include("RefinementTools.jl")
Expand Down
4 changes: 2 additions & 2 deletions test/LinearSolvers/BlockDiagonalSmoothersTests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -118,7 +118,7 @@ function main_driver(D,model,solvers)
BDSss = symbolic_setup(BDS,A)
BDSns = numerical_setup(BDSss,A)

x = GridapSolvers.allocate_col_vector(A)
x = allocate_col_vector(A)
x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12)
@test is_same_vector(x,x_star,Xb,X)

Expand All @@ -127,7 +127,7 @@ function main_driver(D,model,solvers)
BDSss = symbolic_setup(BDS,A)
BDSns = numerical_setup(BDSss,A)

x = GridapSolvers.allocate_col_vector(A)
x = allocate_col_vector(A)
x = cg!(x,A,b;verbose=true,Pl=BDSns,reltol=1.0e-12)
@test is_same_vector(x,x_star,Xb,X)
end
Expand Down
Loading

0 comments on commit b7b50a4

Please sign in to comment.