diff --git a/docs/src/apireference.md b/docs/src/apireference.md index d2741fc8a..26ed0823e 100644 --- a/docs/src/apireference.md +++ b/docs/src/apireference.md @@ -78,6 +78,7 @@ SDDP.DefaultForwardPass SDDP.RevisitingForwardPass SDDP.RiskAdjustedForwardPass SDDP.AlternativeForwardPass +SDDP.AlternativePostIterationCallback ``` ### Risk Measures diff --git a/docs/src/examples/air_conditioning_forward.jl b/docs/src/examples/air_conditioning_forward.jl index 80787c1a6..bc1be57a5 100644 --- a/docs/src/examples/air_conditioning_forward.jl +++ b/docs/src/examples/air_conditioning_forward.jl @@ -35,7 +35,7 @@ non_convex = create_air_conditioning_model(; convex = false) SDDP.train( convex; forward_pass = SDDP.AlternativeForwardPass(non_convex), - parallel_scheme = SDDP.AlternativeParallelScheme(non_convex), + post_iteration_callback = SDDP.AlternativePostIterationCallback(non_convex), iteration_limit = 10, ) Test.@test isapprox(SDDP.calculate_bound(non_convex), 62_500.0, atol = 0.1) diff --git a/docs/src/examples/pglib_opf.jl b/docs/src/examples/pglib_opf.jl index a9952ccf0..633bcc920 100644 --- a/docs/src/examples/pglib_opf.jl +++ b/docs/src/examples/pglib_opf.jl @@ -5,12 +5,25 @@ # # Alternative forward models +# This example demonstrates how to train convex and non-convex models. + +# This example uses teh following packages: + using SDDP import Ipopt import PowerModels import Test -function build_model(filename, model_type) +# ## Formulation + +# For our model, we build a simple optimal power flow model with a single +# hydro-electric generator. + +# The formulation of our optimal power flow problem depends on `model_type`, +# which must be on of the `PowerModels` formulations. + +function build_model(model_type) + filename = joinpath(@__DIR__, "pglib_opf_case5_pjm.m") data = PowerModels.parse_file(filename) return SDDP.PolicyGraph( SDDP.UnicyclicGraph(0.95); @@ -39,18 +52,38 @@ function build_model(filename, model_type) end end -filename = joinpath(@__DIR__, "pglib_opf_case5_pjm.m") -convex = build_model(filename, PowerModels.DCPPowerModel) +# ## Training a convex model + +# We can build and train a convex approximation of the optimal power flow +# problem. + +# The problem with the convex model is that it does not accurately simulate the +# true dynamics of the problem. Therefore, it under-estimates the true cost of +# operation. + +convex = build_model(PowerModels.DCPPowerModel) SDDP.train(convex; iteration_limit = 10) -non_convex = build_model(filename, PowerModels.ACPPowerModel) +# ## Training a non-convex model + +# We can also build and train the true non-convex formulation of the optimal +# power flow problem. + +# The problem with the non-convex model is that because it is non-convex, +# SDDP.jl may find a sub-optimal policy. Therefore, it may over-estimate the +# true cost of operation. + +non_convex = build_model(PowerModels.ACPPowerModel) SDDP.train(non_convex; iteration_limit = 10) -convex = build_model(filename, PowerModels.DCPPowerModel) +# ## Combining convex and non-convex models + +# As a compromise, we can combine the convex and non-convex models. +convex = build_model(PowerModels.DCPPowerModel) non_convex = build_model(filename, PowerModels.ACPPowerModel) SDDP.train( convex; forward_pass = SDDP.AlternativeForwardPass(non_convex), - parallel_scheme = SDDP.AlternativeParallelScheme(non_convex), + post_iteration_callback = SDDP.AlternativePostIterationCallback(non_convex), iteration_limit = 10, ) diff --git a/src/algorithm.jl b/src/algorithm.jl index cf166bb1f..0586da3da 100644 --- a/src/algorithm.jl +++ b/src/algorithm.jl @@ -99,7 +99,7 @@ struct Options{T} duality_handler::AbstractDualityHandler # A callback called after the forward pass. forward_pass_callback::Any - + post_iteration_callback::Any # Internal function: users should never construct this themselves. function Options( model::PolicyGraph{T}, @@ -119,6 +119,7 @@ struct Options{T} forward_pass::AbstractForwardPass = DefaultForwardPass(), duality_handler::AbstractDualityHandler = ContinuousConicDuality(), forward_pass_callback = x -> nothing, + post_iteration_callback = result -> nothing ) where {T} return new{T}( initial_state, @@ -140,6 +141,7 @@ struct Options{T} forward_pass, duality_handler, forward_pass_callback, + post_iteration_callback, ) end end @@ -913,6 +915,10 @@ Train the policy for `model`. - `duality_handler::AbstractDualityHandler`: specify a duality handler to use when creating cuts. + - `post_iteration_callback::Function`: a callback with the signature + `post_iteration_callback(::IterationResult)` that is evaluated after each + iteration of the algorithm. + There is also a special option for infinite horizon problems - `cycle_discretization_delta`: the maximum distance between states allowed on @@ -943,6 +949,7 @@ function train( add_to_existing_cuts::Bool = false, duality_handler::AbstractDualityHandler = SDDP.ContinuousConicDuality(), forward_pass_callback::Function = (x) -> nothing, + post_iteration_callback = result -> nothing, ) function log_frequency_f(log::Vector{Log}) if mod(length(log), log_frequency) != 0 @@ -1063,6 +1070,7 @@ function train( forward_pass, duality_handler, forward_pass_callback, + post_iteration_callback, ) status = :not_solved try diff --git a/src/alternative_forward.jl b/src/alternative_forward.jl index 50af605d1..9697389ac 100644 --- a/src/alternative_forward.jl +++ b/src/alternative_forward.jl @@ -11,6 +11,10 @@ A forward pass that simulates using `forward_model`, which may be different to the model used in the backwards pass. +When using this forward pass, you should almost always pass +[`SDDP.AlternativePostIterationCallback`](@ref) to the `post_iteration_callback` +argument of [`SDDP.train`](@ref). + This forward pass is most useful when the `forward_model` is non-convex and we use a convex approximation of the model in the backward pass. @@ -44,27 +48,17 @@ function forward_pass( return forward_pass(pass.model, options, pass.forward_pass) end -struct AlternativeParallelScheme{T} <: AbstractParallelScheme +""" + AlternativePostIterationCallback(forward_model::PolicyGraph) + +A post-iteration callback that should be used whenever [`SDDP.AlternativeForwardPass`](@ref) +is used. +""" +struct AlternativePostIterationCallback{T} model::PolicyGraph{T} end -Base.show(io::IO, ::AlternativeParallelScheme) = print(io, "alternative") - -interrupt(::AlternativeParallelScheme) = nothing - -function master_loop( - scheme::AlternativeParallelScheme{T}, - model::PolicyGraph{T}, - options::Options, -) where {T} - _initialize_solver(model; throw_error = false) - while true - result = iteration(model, options) - slave_update(scheme.model, result) - log_iteration(options) - if result.has_converged - return result.status - end - end +function (callback::AlternativePostIterationCallback)(result::IterationResult) + slave_update(callback.model, result) return end diff --git a/src/plugins/parallel_schemes.jl b/src/plugins/parallel_schemes.jl index 631bd3008..951e9298d 100644 --- a/src/plugins/parallel_schemes.jl +++ b/src/plugins/parallel_schemes.jl @@ -40,6 +40,7 @@ function master_loop( _initialize_solver(model; throw_error = false) while true result = iteration(model, options) + options.post_iteration_callback(result) log_iteration(options) if result.has_converged return result.status @@ -167,6 +168,7 @@ function slave_loop( results_to_add = IterationResult{T}[] while true result = iteration(model, options) + options.post_iteration_callback(result) # The next four lines are subject to a race condition: if the master closes # `results` _after_ the call to `isopen` and _before_` the call to `put!` has # executed, we get an `InvalidStateException`. This gets trapped in the outer @@ -243,6 +245,7 @@ function master_loop( # implementation anyway. while async.use_master && !isready(results) result = iteration(model, options) + options.post_iteration_callback(result) for (_, ch) in updates put!(ch, result) end