Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Add script to benchmark KKT solution time for OPF #70

Merged
merged 1 commit into from
Nov 18, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
125 changes: 125 additions & 0 deletions scripts/kkt/benchmark_kkt.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,125 @@
include("common.jl")

#=
CONFIG
=#

# Number of trial runs to estimate running time.
ntrials = 3
# Save results on disk?
save_results = true
# Should we use the GPU to evaluate the derivatives?
use_gpu = true
# Verbose level
verbose = true
print_level = if verbose
MadNLP.DEBUG
else
MadNLP.ERROR
end

# OPF instances
cases = [
"case118.m",
"case1354pegase.m",
"case2869pegase.m",
"case9241pegase.m",
]


function benchmark_kkt(model, kkt; use_gpu=false, ntrials=3, options...)
use_gpu && refresh_memory()
blk = build_opf_model(model; use_gpu=use_gpu)

## Warm-up
solver = build_madnlp(blk, kkt; max_iter=1, options...)
MadNLP.solve!(solver)

## Benchmark
t_build, t_factorization, t_backsolve = (0.0, 0.0, 0.0)
delta_err = 0.0
for _ in 1:ntrials
t_build += CUDA.@elapsed begin
MadNLP.build_kkt!(solver.kkt)
end
t_factorization += CUDA.@elapsed begin
MadNLP.factorize!(solver.linear_solver)
end
t_backsolve += CUDA.@elapsed begin
MadNLP.solve_refine_wrapper!(solver, solver.d, solver.p)
end

dsol = MadNLP.primal_dual(solver.d)
n = length(dsol)
psol = zeros(n)

mul!(psol, solver.kkt, dsol)

delta_err += norm(psol .- MadNLP.primal_dual(solver.p), Inf)
end

return (
build=t_build / ntrials,
factorization=t_factorization / ntrials,
backsolve=t_backsolve / ntrials,
accuracy=delta_err / ntrials,
)
end

function benchmark_kkt(cases, kkt, ntrials, save_results; use_gpu=false, options...)
# Setup
dev = use_gpu ? :cuda : :cpu
form = isa(kkt, Argos.BieglerReduction) ? :biegler : :full

nexp = length(cases)
results = zeros(nexp, 5)

i = 0
for case in cases
i += 1
datafile = joinpath(DATA, case)
model = ExaPF.PolarForm(datafile)
nbus = PS.get(model, PS.NumberOfBuses())

r = benchmark_kkt(model, kkt; ntrials=ntrials, use_gpu=use_gpu, options...)
results[i, :] .= (nbus, r.build, r.factorization, r.backsolve, r.accuracy)
end

if save_results
output_dir = joinpath(dirname(@__FILE__), RESULTS_DIR)
if !isdir(output_dir)
mkdir(output_dir)
end
output_file = joinpath(output_dir, "benchmark_kkt_$(form)_$(dev).txt")
writedlm(output_file, results)
end
return results
end

#=
Benchmark using ma27 as a reference.
=#
benchmark_kkt(
cases,
Argos.FullSpace(),
ntrials,
save_results;
print_level=print_level,
linear_solver=Ma27Solver,
use_gpu=use_gpu,
)


#=
Benchmark Biegler's reduction.
=#
benchmark_kkt(
cases,
Argos.BieglerReduction(),
ntrials,
save_results;
print_level=print_level,
linear_solver=LapackGPUSolver,
use_gpu=use_gpu,
)

89 changes: 89 additions & 0 deletions scripts/kkt/common.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
using DelimitedFiles
using LazyArtifacts
using LinearAlgebra
using Printf
using Random

using NLPModels
using Argos
using ExaPF
using MadNLP

# HSL
using MadNLPHSL

# GPU
using CUDA
using KernelAbstractions
using ArgosCUDA
using MadNLPGPU

const PS = ExaPF.PowerSystem

const DATA = joinpath(artifact"ExaData", "ExaData")
RESULTS_DIR = "results"

if CUDA.has_cuda()
CUDA.allowscalar(false)
end

function refresh_memory()
GC.gc(true)
CUDA.has_cuda() && CUDA.reclaim()
return
end

function init_model!(blk)
x0 = NLPModels.get_x0(blk)
nnzj = NLPModels.get_nnzj(blk)
jac = zeros(nnzj)
NLPModels.jac_coord!(blk, x0, jac)
return
end

function build_opf_model(model; use_gpu=false)
if use_gpu
model_gpu = PolarForm(model, CUDABackend())
nlp = Argos.FullSpaceEvaluator(model_gpu)
blk = Argos.OPFModel(Argos.bridge(nlp))
else
nlp = Argos.FullSpaceEvaluator(model)
blk = Argos.OPFModel(nlp)
end
init_model!(blk)
return blk
end

function build_madnlp(
blk::Argos.OPFModel,
::Argos.FullSpace;
max_iter=max_iter,
dual_initialized=true,
tol=1e-5,
print_level=MadNLP.ERROR,
linear_solver=Ma27Solver,
)
return MadNLP.MadNLPSolver(blk; max_iter=max_iter, dual_initialized=dual_initialized, tol=tol, print_level=print_level, linear_solver=linear_solver)
end

function build_madnlp(
blk::Argos.OPFModel,
::Argos.BieglerReduction;
max_iter=max_iter,
dual_initialized=true,
tol=1e-5,
print_level=MadNLP.ERROR,
linear_solver=nothing,
)
madnlp_options = Dict{Symbol, Any}()
madnlp_options[:linear_solver] = LapackGPUSolver
madnlp_options[:lapack_algorithm] = MadNLP.CHOLESKY
madnlp_options[:dual_initialized] = dual_initialized
madnlp_options[:max_iter] = max_iter
madnlp_options[:print_level] = print_level
madnlp_options[:tol] = tol
opt_ipm, opt_linear, logger = MadNLP.load_options(; madnlp_options...)
KKT = Argos.BieglerKKTSystem{Float64, CuVector{Int}, CuVector{Float64}, CuMatrix{Float64}}
return MadNLP.MadNLPSolver{Float64, KKT}(blk, opt_ipm, opt_linear; logger=logger)
end

Loading