From 82e16af187924ac0fa65d31f87b00723aa059fdd Mon Sep 17 00:00:00 2001 From: Oscar Dowson Date: Sun, 26 Sep 2021 13:53:37 +1300 Subject: [PATCH] Minor formatting updates (#473) --- src/Experimental.jl | 108 +++++++++------ src/JuMP.jl | 1 + src/algorithm.jl | 95 +++++++------ src/cyclic.jl | 14 +- src/deterministic_equivalent.jl | 12 +- src/modeling_aids.jl | 33 +++-- src/plugins/bellman_functions.jl | 40 ++++-- src/plugins/duality_handlers.jl | 2 +- src/plugins/forward_passes.jl | 2 +- src/plugins/headers.jl | 59 ++++---- src/plugins/parallel_schemes.jl | 11 +- src/plugins/risk_measures.jl | 5 +- src/plugins/sampling_schemes.jl | 44 +++--- src/plugins/stopping_rules.jl | 22 +-- src/print.jl | 32 +++-- src/user_interface.jl | 188 +++++++++++++++----------- src/visualization/publication_plot.jl | 2 +- src/visualization/spaghetti_plot.jl | 14 +- src/visualization/value_functions.jl | 4 +- 19 files changed, 412 insertions(+), 276 deletions(-) diff --git a/src/Experimental.jl b/src/Experimental.jl index 4efda532a..a4de88cf3 100644 --- a/src/Experimental.jl +++ b/src/Experimental.jl @@ -6,7 +6,7 @@ import SHA """ - TestScenario{T, S}(probability::Float64, scenario::Vector{Tuple{T, S}}) + TestScenario{T,S}(probability::Float64, scenario::Vector{Tuple{T,S}}) A single scenario for testing. @@ -18,7 +18,7 @@ struct TestScenario{T,S} end """ - TestScenarios{T, S}(scenarios::Vector{TestScenario{T, S}}) + TestScenarios{T,S}(scenarios::Vector{TestScenario{T,S}}) An [`AbstractSamplingScheme`](@ref) based on a vector of scenarios. @@ -55,6 +55,7 @@ function _throw_if_belief_states(model::PolicyGraph) if length(model.belief_partition) != 0 error("StochOptFormat does not support belief states.") end + return end function _throw_if_objective_states(model::PolicyGraph) @@ -63,6 +64,7 @@ function _throw_if_objective_states(model::PolicyGraph) error("StochOptFormat does not support objective states.") end end + return end function _throw_if_exisiting_cuts(model::PolicyGraph) @@ -74,6 +76,7 @@ function _throw_if_exisiting_cuts(model::PolicyGraph) ) end end + return end function _test_scenarios(model::PolicyGraph, test_scenarios::Int, scenario_map) @@ -88,6 +91,7 @@ function _test_scenarios(model::PolicyGraph, test_scenarios::Int, scenario_map) scenario_map, ) end + function _test_scenarios( ::PolicyGraph, test_scenarios::TestScenarios, @@ -139,19 +143,21 @@ possible modifications are supported. These include: If your model uses something other than this, this function will silently write an incorrect formulation of the problem. -## Example - - open("my_model.sof.json", "w") do io - write( - io, - model; - test_scenarios = 10, - name = "MyModel", - author = "@odow", - date = "2020-07-20", - description = "Example problem for the SDDP.jl documentation", - ) - end +## Examples + +```julia +open("my_model.sof.json", "w") do io + write( + io, + model; + test_scenarios = 10, + name = "MyModel", + author = "@odow", + date = "2020-07-20", + description = "Example problem for the SDDP.jl documentation", + ) +end +``` """ function Base.write( io::IO, @@ -205,6 +211,7 @@ function _add_edges( ), ) end + return end function _add_node_to_dict(dest::Dict, node::Node, node_name::String) @@ -238,11 +245,13 @@ end """ _reformulate_uncertainty( - node::Node, realizations, random_variables + node::Node, + realizations, + random_variables, ) -Convert any lower and upper variable_bound_storage than depend on the uncertainty into linear -constraints with a random variable. +Convert any lower and upper variable_bound_storage than depend on the +uncertainty into linear constraints with a random variable. Fixed variables are recorded as random variables, but no transformation is done. @@ -544,6 +553,7 @@ function _reformulate_lower_bound( for (realization, bound) in zip(realizations, variable_bound_storage) realization["support"][new_name] = bound[x].l end + return end function _reformulate_upper_bound( @@ -568,6 +578,7 @@ function _reformulate_upper_bound( for (realization, bound) in zip(realizations, variable_bound_storage) realization["support"][new_name] = bound[x].u end + return end function _reformulate_constraint_rhs( @@ -648,15 +659,16 @@ end io::IO, ::Type{PolicyGraph}; bound::Float64 = 1e6, - )::Tuple{PolicyGraph, TestScenarios} + )::Tuple{PolicyGraph,TestScenarios} Return a tuple containing a [`PolicyGraph`](@ref) object and a [`TestScenarios`](@ref) read from `io` in the StochOptFormat file format. See also: [`evaluate`](@ref). -WARNING: THIS FUNCTION IS EXPERIMENTAL. THINGS MAY CHANGE BETWEEN COMMITS. YOU -SHOULD NOT RELY ON THIS FUNCTIONALITY AS A LONG-TERM FILE FORMAT (YET). +!!! warning + This function is experimental. Things may change between commits. You should + not rely on this functionality as a long-term file format (yet). In addition to potential changes to the underlying format, only a subset of possible modifications are supported. These include: @@ -666,11 +678,13 @@ possible modifications are supported. These include: If your model uses something other than this, this function may throw an error or silently build a non-convex model. -## Example +## Examples - open("my_model.sof.json", "r") do io - model, test_scenarios = read(io, PolicyGraph) - end +```julia +open("my_model.sof.json", "r") do io + model, test_scenarios = read(io, PolicyGraph) +end +``` """ function Base.read(io::IO, ::Type{PolicyGraph}; bound::Float64 = 1e6) data = JSON.parse(io; dicttype = Dict{String,Any}) @@ -773,12 +787,12 @@ function _convert_objective_function(sp::Model, rvs::Vector{String}) return _convert_objective_function(sp, rvs, objective_function(sp)) end -function _convert_objective_function(sp::Model, ::Vector{String}, objf) +function _convert_objective_function(::Model, ::Vector{String}, objf) return Dict{String,Any}(), objf end function _convert_objective_function( - sp::Model, + ::Model, rvs::Vector{String}, objf::QuadExpr, ) @@ -797,9 +811,8 @@ function _convert_objective_function( end if length(terms) == length(objf.terms) return terms, aff_obj - else - return terms, QuadExpr(aff_obj, quad_terms) end + return terms, QuadExpr(aff_obj, quad_terms) end """ @@ -819,12 +832,15 @@ detecting the file compression to use based on the extension of `filename`. See [`Base.write(::IO, ::PolicyGraph)`](@ref) for information on the keyword arguments that can be provided. -WARNING: THIS FUNCTION IS EXPERIMENTAL. SEE THE FULL WARNING IN -[`Base.write(::IO, ::PolicyGraph)`](@ref). +!!! warning + This function is experimental. See the full warning in + [`Base.write(::IO, ::PolicyGraph)`](@ref). -## Example +## Examples - write_to_file(model, "my_model.sof.json"; test_scenarios = 10) +```julia +write_to_file(model, "my_model.sof.json"; test_scenarios = 10) +``` """ function write_to_file( model::PolicyGraph, @@ -854,12 +870,15 @@ detecting the file compression to use based on the extension of `filename`. See [`Base.read(::IO, ::Type{PolicyGraph})`](@ref) for information on the keyword arguments that can be provided. -WARNING: THIS FUNCTION IS EXPERIMENTAL. SEE THE FULL WARNING IN -[`Base.read(::IO, ::Type{PolicyGraph})`](@ref). +!!! warning + This function is experimental. See the full warning in + [`Base.read(::IO, ::Type{PolicyGraph})`](@ref). -## Example +## Examples - model, test_scenarios = read_from_file("my_model.sof.json") +```julia +model, test_scenarios = read_from_file("my_model.sof.json") +``` """ function read_from_file( filename::String; @@ -873,17 +892,20 @@ end """ evaluate( - model::PolicyGraph{T}, test_scenarios::TestScenarios{T, S} - ) where {T, S} + model::PolicyGraph{T}, + test_scenarios::TestScenarios{T,S}, + ) where {T,S} Evaluate the performance of the policy contained in `model` after a call to [`train`](@ref) on the scenarios specified by `test_scenarios`. -## Example +## Examples - model, test_scenarios = read_from_file("my_model.sof.json") - train(model; iteration_limit = 100) - simulations = evaluate(model, test_scenarios) +```julia +model, test_scenarios = read_from_file("my_model.sof.json") +train(model; iteration_limit = 100) +simulations = evaluate(model, test_scenarios) +``` """ function evaluate( model::PolicyGraph{T}, diff --git a/src/JuMP.jl b/src/JuMP.jl index c2ef20c3f..219dcaf95 100644 --- a/src/JuMP.jl +++ b/src/JuMP.jl @@ -116,4 +116,5 @@ function JuMP.set_optimizer(model::SDDP.PolicyGraph, optimizer) for node in values(model.nodes) set_optimizer(node.subproblem, optimizer) end + return end diff --git a/src/algorithm.jl b/src/algorithm.jl index 72f38dc3d..0557d5e2d 100644 --- a/src/algorithm.jl +++ b/src/algorithm.jl @@ -216,7 +216,11 @@ stage_objective_value(stage_objective::Real) = stage_objective stage_objective_value(stage_objective) = JuMP.value(stage_objective) """ - write_subproblem_to_file(node::Node, filename::String; throw_error::Bool = false) + write_subproblem_to_file( + node::Node, + filename::String; + throw_error::Bool = false, + ) Write the subproblem contained in `node` to the file `filename`. """ @@ -239,6 +243,7 @@ function write_subproblem_to_file( "\nfor more information.", ) end + return end """ @@ -276,7 +281,8 @@ end """ _initialize_solver(node::Node; throw_error::Bool) -After passing a model to a different process, we need to set the optimizer again. +After passing a model to a different process, we need to set the optimizer +again. If `throw_error`, throw an error if the model is in direct mode. @@ -307,7 +313,8 @@ end """ _initialize_solver(model::PolicyGraph; throw_error::Bool) -After passing a model to a different process, we need to set the optimizer again. +After passing a model to a different process, we need to set the optimizer +again. If `throw_error`, throw an error if the model is in direct mode. @@ -413,7 +420,8 @@ end # Internal function: update the objective state given incoming `current_state` # and `noise`. -update_objective_state(obj_state::Nothing, current_state, noise) = nothing +update_objective_state(::Nothing, ::Any, ::Any) = nothing + function update_objective_state(obj_state, current_state, noise) if length(current_state) == 1 obj_state.state = (obj_state.update(current_state[1], noise),) @@ -439,9 +447,8 @@ function distance( ) if length(starting_states) == 0 return Inf - else - return minimum(norm.(starting_states, Ref(state))) end + return minimum(norm.(starting_states, Ref(state))) end # Internal function: the norm to use when checking the distance between two @@ -679,11 +686,15 @@ function solve_all_children( # Drop the last element (i.e., the one we added). pop!(scenario_path) end + return end """ - SDDP.calculate_bound(model::PolicyGraph, state::Dict{Symbol, Float64}, - risk_measure=Expectation()) + SDDP.calculate_bound( + model::PolicyGraph, + state::Dict{Symbol,Float64}, + risk_measure = Expectation(), + ) Calculate the lower bound (if minimizing, otherwise upper bound) of the problem model at the point state, assuming the risk measure at the root node is @@ -699,7 +710,6 @@ function calculate_bound( probabilities = Float64[] objectives = Float64[] current_belief = initialize_belief(model) - # Solve all problems that are children of the root node. for child in model.root_children if isapprox(child.probability, 0.0, atol = 1e-6) @@ -812,15 +822,16 @@ Query the reason why the training stopped. function termination_status(model::PolicyGraph) if model.most_recent_training_results === nothing return :model_not_solved - else - return model.most_recent_training_results.status end + return model.most_recent_training_results.status end """ SDDP.train(model::PolicyGraph; kwargs...) -Train the policy for `model`. Keyword arguments: +Train the policy for `model`. + +## Keyword arguments - `iteration_limit::Int`: number of iterations to conduct before termination. @@ -831,14 +842,14 @@ Train the policy for `model`. Keyword arguments: - `print_level::Int`: control the level of printing to the screen. Defaults to `1`. Set to `0` to disable all printing. - - `log_file::String`: filepath at which to write a log of the training progress. - Defaults to `SDDP.log`. + - `log_file::String`: filepath at which to write a log of the training + progress. Defaults to `SDDP.log`. - `log_frequency::Int`: control the frequency with which the logging is outputted (iterations/log). Defaults to `1`. - - `run_numerical_stability_report::Bool`: generate (and print) a numerical stability - report prior to solve. Defaults to `true`. + - `run_numerical_stability_report::Bool`: generate (and print) a numerical + stability report prior to solve. Defaults to `true`. - `refine_at_similar_nodes::Bool`: if SDDP can detect that two nodes have the same children, it can cheaply add a cut discovered at one to the other. In @@ -846,10 +857,12 @@ Train the policy for `model`. Keyword arguments: - `cut_deletion_minimum::Int`: the minimum number of cuts to cache before deleting cuts from the subproblem. The impact on performance is solver - specific; however, smaller values result in smaller subproblems (and therefore - quicker solves), at the expense of more time spent performing cut selection. + specific; however, smaller values result in smaller subproblems (and + therefore quicker solves), at the expense of more time spent performing cut + selection. - - `risk_measure`: the risk measure to use at each node. Defaults to [`Expectation`](@ref). + - `risk_measure`: the risk measure to use at each node. Defaults to + [`Expectation`](@ref). - `sampling_scheme`: a sampling scheme to use on the forward pass of the algorithm. Defaults to [`InSampleMonteCarlo`](@ref). @@ -857,13 +870,14 @@ Train the policy for `model`. Keyword arguments: - `backward_sampling_scheme`: a backward pass sampling scheme to use on the backward pass of the algorithm. Defaults to `CompleteSampler`. - - `cut_type`: choose between `SDDP.SINGLE_CUT` and `SDDP.MULTI_CUT` versions of SDDP. + - `cut_type`: choose between `SDDP.SINGLE_CUT` and `SDDP.MULTI_CUT` versions of + SDDP. - `dashboard::Bool`: open a visualization of the training over time. Defaults to `false`. - - `parallel_scheme::AbstractParallelScheme`: specify a scheme for solving in parallel. - Defaults to `Serial()`. + - `parallel_scheme::AbstractParallelScheme`: specify a scheme for solving in + parallel. Defaults to `Serial()`. - `forward_pass::AbstractForwardPass`: specify a scheme to use for the forward passes. @@ -949,7 +963,6 @@ function train( sampling_scheme, ) end - if run_numerical_stability_report report = sprint( io -> numerical_stability_report( @@ -960,7 +973,6 @@ function train( ) print_helper(print, log_file_handle, report) end - if print_level > 0 print_helper(print_iteration_header, log_file_handle) end @@ -982,7 +994,6 @@ function train( "the call to SDDP.train via a keyboard interrupt ([CTRL+C])." ) end - # Update the nodes with the selected cut type (SINGLE_CUT or MULTI_CUT) # and the cut deletion minimum. if cut_deletion_minimum < 0 @@ -996,13 +1007,11 @@ function train( oracle.cut_oracle.deletion_minimum = cut_deletion_minimum end end - dashboard_callback = if dashboard launch_dashboard() else (::Any, ::Any) -> nothing end - options = Options( model, model.initial_root_state, @@ -1022,7 +1031,6 @@ function train( duality_handler, forward_pass_callback, ) - status = :not_solved try status = master_loop(parallel_scheme, model, options) @@ -1065,8 +1073,7 @@ function _simulate( incoming_state::Dict{Symbol,Float64}, ) where {T} # Sample a scenario path. - scenario_path, terminated_due_to_cycle = - sample_scenario(model, sampling_scheme) + scenario_path, _ = sample_scenario(model, sampling_scheme) # Storage for the simulation results. simulation = Dict{Symbol,Any}[] @@ -1176,7 +1183,7 @@ end skip_undefined_variables::Bool = false, parallel_scheme::AbstractParallelScheme = Serial(), incoming_state::Dict{String,Float64} = _intial_state(model), - )::Vector{Vector{Dict{Symbol, Any}}} + )::Vector{Vector{Dict{Symbol,Any}}} Perform a simulation of the policy model with `number_replications` replications using the sampling scheme `sampling_scheme`. @@ -1204,24 +1211,30 @@ useful to obtain the primal value of the state and control variables. For more complicated data, the `custom_recorders` keyword argument can be used. - data = Dict{Symbol, Any}() - for (key, recorder) in custom_recorders - data[key] = foo(subproblem) - end +```julia +data = Dict{Symbol, Any}() +for (key, recorder) in custom_recorders + data[key] = foo(subproblem) +end +``` For example, to record the dual of a constraint named `my_constraint`, pass the following: - simulation_results = SDDP.simulate(model, 2; - custom_recorders = Dict{Symbol, Function}( - :constraint_dual => (sp) -> JuMP.dual(sp[:my_constraint]) - ) +```julia +simulation_results = SDDP.simulate(model, 2; + custom_recorders = Dict{Symbol, Function}( + :constraint_dual => (sp) -> JuMP.dual(sp[:my_constraint]) ) +) +``` The value of the dual in the first stage of the second replication can be accessed as: - simulation_results[2][1][:constraint_dual] +```julia +simulation_results[2][1][:constraint_dual] +``` If you do not require dual variables (or if they are not available), pass `duality_handler = nothing`. @@ -1278,7 +1291,7 @@ end """ evaluate( rule::DecisionRule; - incoming_state::Dict{Symbol, Float64}, + incoming_state::Dict{Symbol,Float64}, noise = nothing, controls_to_record = Symbol[], ) diff --git a/src/cyclic.jl b/src/cyclic.jl index b88bad994..e61ae847f 100644 --- a/src/cyclic.jl +++ b/src/cyclic.jl @@ -7,16 +7,16 @@ Return `true` or `false` if the graph `G` contains a cycle. -We implement Tarjan's strongly connected components algorithm to detect -cycles in a directed graph in O(|V| + |E|) time. See this Wiki for details +We implement Tarjan's strongly connected components algorithm to detect cycles +in a directed graph in O(|V| + |E|) time. See this Wiki for details https://en.wikipedia.org/wiki/Tarjan%27s_strongly_connected_components_algorithm -The notation here follows the pseudocode in the Wikipedia article, rather -than the typical JuMP style guide. +The notation here follows the pseudocode in the Wikipedia article, rather than +the typical JuMP style guide. -Since we're only checking for cyclic graphs, we can stop as soon as on is +Since we're only checking for cyclic graphs, we can stop as soon as one is found. A cyclic graph has a stongly connected component with at least two -components, or it has a node with connects to itself. That means we don't -need to store the set of all strongly connected components. +components, or it has a node with connects to itself. That means we don't need +to store the set of all strongly connected components. """ function is_cyclic(G::PolicyGraph{T}) where {T} index_counter = 0 diff --git a/src/deterministic_equivalent.jl b/src/deterministic_equivalent.jl index 301032c6a..c44866361 100644 --- a/src/deterministic_equivalent.jl +++ b/src/deterministic_equivalent.jl @@ -173,21 +173,27 @@ function add_linking_constraints( end add_linking_constraints(model, child, check_time_limit) end + return end """ deterministic_equivalent( pg::PolicyGraph{T}, optimizer = nothing; - time_limit::Union{Real, Nothing} = 60.0 + time_limit::Union{Real,Nothing} = 60.0, ) Form a JuMP model that represents the deterministic equivalent of the problem. ## Examples - deterministic_equivalent(model) - deterministic_equivalent(model, GLPK.Optimizer) +```julia +deterministic_equivalent(model) +``` + +```julia +deterministic_equivalent(model, GLPK.Optimizer) +``` """ function deterministic_equivalent( pg::PolicyGraph{T}, diff --git a/src/modeling_aids.jl b/src/modeling_aids.jl index 5cf6ae4ab..ca82896a4 100644 --- a/src/modeling_aids.jl +++ b/src/modeling_aids.jl @@ -65,8 +65,8 @@ function lattice_approximation(f::Function, states::Vector{Int}, scenarios::Int) p ./= sum(p, dims = 2) if any(isnan, p) @warn( - "Too few scenarios to form an approximation of the lattice. Restarting " * - "the approximation with $(10 * scenarios) scenarios." + "Too few scenarios to form an approximation of the lattice. " * + "Restarting the approximation with $(10 * scenarios) scenarios.", ) return lattice_approximation(f, states, 10 * scenarios) end @@ -77,8 +77,8 @@ end """ allocate_support_budget(f, budget, scenarios) -Allocate the `budget` nodes amongst the stages for a Markovian approximation. By default, -we distribute nodes based on the relative variance of the stages. +Allocate the `budget` nodes amongst the stages for a Markovian approximation. +By default, we distribute nodes based on the relative variance of the stages. """ function allocate_support_budget( f::Function, @@ -89,14 +89,16 @@ function allocate_support_budget( states = ones(Int, length(stage_var)) if budget < length(stage_var) @warn( - "Budget for nodes is less than the number of stages. Using one node per stage." + "Budget for nodes is less than the number of stages. Using one " * + "node per stage.", ) return states end s = sum(stage_var) if s ≈ 0.0 - # If the sum of the variances is 0, then the simulator must be deterministic. - # Regardless of the budget, return a single Markov state for each stage. + # If the sum of the variances is 0, then the simulator must be + # deterministic. Regardless of the budget, return a single Markov state + # for each stage. return states end for i in 1:length(states) @@ -111,6 +113,7 @@ function allocate_support_budget( end return states end + function allocate_support_budget( f::Function, budget::Vector{Int}, @@ -121,15 +124,19 @@ end """ MarkovianGraph( - simulator::Function; budget::Union{Int, Vector{Int}}, scenarios::Int = 1000 + simulator::Function; + budget::Union{Int,Vector{Int}}, + scenarios::Int = 1000, ) -Construct a Markovian graph by fitting Markov chain to scenarios generated by `simulator()`. +Construct a Markovian graph by fitting Markov chain to scenarios generated by +`simulator()`. -`budget` is the total number of nodes in the resulting Markov chain. This can either be -specified as a single `Int`, in which case we will attempt to intelligently distributed the -nodes between stages. Alternatively, `budget` can be a `Vector{Int}`, which details the -number of Markov state to have in each stage. +`budget` is the total number of nodes in the resulting Markov chain. This can +either be specified as a single `Int`, in which case we will attempt to +intelligently distributed the nodes between stages. Alternatively, `budget` can +be a `Vector{Int}`, which details the number of Markov state to have in each +stage. """ function MarkovianGraph( simulator::Function; diff --git a/src/plugins/bellman_functions.jl b/src/plugins/bellman_functions.jl index f3c37865c..6cedd2748 100644 --- a/src/plugins/bellman_functions.jl +++ b/src/plugins/bellman_functions.jl @@ -53,6 +53,7 @@ mutable struct ConvexApproximation end _magnitude(x) = x ≈ 0 ? 0 : log10(abs(x)) + function _dynamic_range_warning(intercept, coefficients) lo = hi = _magnitude(intercept) lo_v = hi_v = intercept @@ -80,9 +81,12 @@ function _dynamic_range_warning(intercept, coefficients) maxlog = 1, ) end + return end -# Add the cut `V.θ ≥ θᵏ + ⟨πᵏ, x′ - xᵏ⟩`. +""" +Add the cut `V.θ ≥ θᵏ + ⟨πᵏ, x′ - xᵏ⟩`. +""" function _add_cut( V::ConvexApproximation, θᵏ::Float64, @@ -129,7 +133,9 @@ function add_cut_constraint_to_model(V::ConvexApproximation, cut::Cut) return end -# Internal function: calculate the height of `cut` evaluated at `state`. +""" +Internal function: calculate the height of `cut` evaluated at `state`. +""" function _eval_height(cut::Cut, state::Dict{Symbol,Float64}) height = cut.intercept for (key, value) in cut.coefficients @@ -138,12 +144,17 @@ function _eval_height(cut::Cut, state::Dict{Symbol,Float64}) return height end -# Internal function: check if the candidate point dominates the incumbent. +""" +Internal function: check if the candidate point dominates the incumbent. +""" function _dominates(candidate, incumbent, minimization::Bool) return minimization ? candidate >= incumbent : candidate <= incumbent end -# Internal function: update the Level-One datastructures inside `bellman_function`. +""" +Internal function: update the Level-One datastructures inside +`bellman_function`. +""" function _cut_selection_update( V::ConvexApproximation, cut::Cut, @@ -157,10 +168,10 @@ function _cut_selection_update( model = JuMP.owner_model(V.theta) is_minimization = JuMP.objective_sense(model) == MOI.MIN_SENSE oracle = V.cut_oracle - sampled_state = SampledState(state, cut, _eval_height(cut, state)) - # Loop through previously sampled states and compare the height of the most recent cut - # against the current best. If this new cut is an improvement, store this one instead. + # Loop through previously sampled states and compare the height of the most + # recent cut against the current best. If this new cut is an improvement, + # store this one instead. for old_state in oracle.states height = _eval_height(cut, old_state.state) if _dominates(height, old_state.best_objective, is_minimization) @@ -171,9 +182,9 @@ function _cut_selection_update( end end push!(oracle.states, sampled_state) - # Now loop through previously discovered cuts and compare their height at - # `sampled_state`. If a cut is an improvement, add it to a queue to be added. + # `sampled_state`. If a cut is an improvement, add it to a queue to be + # added. for old_cut in oracle.cuts if old_cut.constraint_ref !== nothing # We only care about cuts not currently in the model. @@ -189,7 +200,6 @@ function _cut_selection_update( end end push!(oracle.cuts, cut) - # Delete cuts that need to be deleted. for cut in V.cut_oracle.cuts if cut.non_dominated_count < 1 @@ -229,8 +239,11 @@ end """ BellmanFunction(; - lower_bound = -Inf, upper_bound = Inf, deletion_minimum::Int = 1, - cut_type::CutType = MULTI_CUT) + lower_bound = -Inf, + upper_bound = Inf, + deletion_minimum::Int = 1, + cut_type::CutType = MULTI_CUT, + ) """ function BellmanFunction(; lower_bound = -Inf, @@ -332,6 +345,7 @@ end # adding 2^N constraints where N = |μ|. This is only feasible for # low-dimensional problems, e.g., N < 5. _add_initial_bounds(obj_state::Nothing, theta) = nothing + function _add_initial_bounds(obj_state::ObjectiveState, theta) model = JuMP.owner_model(theta) if length(obj_state.μ) < 5 @@ -596,7 +610,9 @@ function write_cuts_to_file(model::PolicyGraph{T}, filename::String) where {T} end _node_name_parser(::Type{Int}, name::String) = parse(Int, name) + _node_name_parser(::Type{Symbol}, name::String) = Symbol(name) + function _node_name_parser(::Type{NTuple{N,Int}}, name::String) where {N} keys = parse.(Int, strip.(split(name[2:end-1], ","))) if length(keys) != N diff --git a/src/plugins/duality_handlers.jl b/src/plugins/duality_handlers.jl index 29c793403..fb872133e 100644 --- a/src/plugins/duality_handlers.jl +++ b/src/plugins/duality_handlers.jl @@ -142,7 +142,7 @@ end iteration_limit::Int = 100, atol::Float64 = 1e-8, rtol::Float64 = 1e-8, - optimizer = nothing + optimizer = nothing, ) Obtain dual variables in the backward pass using Lagrangian duality and Kelley's diff --git a/src/plugins/forward_passes.jl b/src/plugins/forward_passes.jl index 0137fdf53..753478451 100644 --- a/src/plugins/forward_passes.jl +++ b/src/plugins/forward_passes.jl @@ -136,7 +136,7 @@ end """ RevisitingForwardPass( period::Int = 500; - sub_pass::AbstractForwardPass = DefaultForwardPass() + sub_pass::AbstractForwardPass = DefaultForwardPass(), ) A forward pass scheme that generate `period` new forward passes (using diff --git a/src/plugins/headers.jl b/src/plugins/headers.jl index 4693e8724..30ee02137 100644 --- a/src/plugins/headers.jl +++ b/src/plugins/headers.jl @@ -16,12 +16,14 @@ You need to define the following methods: abstract type AbstractRiskMeasure end """ - adjust_probability(measure::Expectation - risk_adjusted_probability::Vector{Float64}, - original_probability::Vector{Float64}, - noise_support::Vector{Noise{T}}, - objective_realizations::Vector{Float64}, - is_minimization::Bool) where T + adjust_probability( + measure::Expectation + risk_adjusted_probability::Vector{Float64}, + original_probability::Vector{Float64}, + noise_support::Vector{Noise{T}}, + objective_realizations::Vector{Float64}, + is_minimization::Bool, + ) where {T} """ function adjust_probability end @@ -38,7 +40,7 @@ You need to define the following methods: abstract type AbstractSamplingScheme end """ - sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where T + sample_scenario(graph::PolicyGraph{T}, ::AbstractSamplingScheme) where {T} Sample a scenario from the policy graph `graph` based on the sampling scheme. @@ -68,8 +70,10 @@ abstract type AbstractBellmanFunction end """ initialize_bellman_function( - ::Type{F}, graph::PolicyGraph{T}, node::Node{T} - ) where {F<:AbstractBellmanFunction, T} + ::Type{F}, + graph::PolicyGraph{T}, + node::Node{T}, + ) where {F<:AbstractBellmanFunction,T} Return an instance of the Bellman function F for `node` in the policy graph `graph`. @@ -77,16 +81,17 @@ Return an instance of the Bellman function F for `node` in the policy graph function initialize_bellman_function end """ - refine_bellman_function(graph::PolicyGraph{T}, - node::Node{T}, - bellman_function::AbstractBellmanFunction, - risk_measure::AbstractRiskMeasure, - state::Dict{Symbol, Float64}, - dual_variables::Vector{Dict{Symbol, Float64}}, - noise_supports::Vector{<:Noise}, - original_probability::Vector{Float64}, - objective_realizations::Vector{Float64} - ) where T + refine_bellman_function( + graph::PolicyGraph{T}, + node::Node{T}, + bellman_function::AbstractBellmanFunction, + risk_measure::AbstractRiskMeasure, + state::Dict{Symbol, Float64}, + dual_variables::Vector{Dict{Symbol,Float64}}, + noise_supports::Vector{<:Noise}, + original_probability::Vector{Float64}, + objective_realizations::Vector{Float64}, + ) where {T} """ function refine_bellman_function end @@ -118,12 +123,14 @@ Return a symbol describing the stopping rule. function stopping_rule_status end """ - convergence_test(model::PolicyGraph, log::Vector{Log}, ::AbstractStoppingRule)::Bool + convergence_test( + model::PolicyGraph, + log::Vector{Log}, + ::AbstractStoppingRule, + )::Bool Return a `Bool` indicating if the algorithm should terminate the training. """ -function convergence_test end - function convergence_test( graph::PolicyGraph, log::Vector{Log}, @@ -152,7 +159,7 @@ abstract type AbstractBackwardSamplingScheme end """ sample_backward_noise_terms( backward_sampling_scheme::AbstractBackwardSamplingScheme, - node::Node{T} + node::Node{T}, )::Vector{Noise} Returns a `Vector{Noise}` of noises sampled from `node.noise_terms` using @@ -202,7 +209,9 @@ abstract type AbstractParallelScheme end """ master_loop( - ::AbstractParallelScheme, model::PolicyGraph{T}, options::Options + ::AbstractParallelScheme, + model::PolicyGraph{T}, + options::Options, )::Symbol where {T} The solve loop of the SDDP algorithm. Returns a symbol corresponding to the @@ -245,6 +254,6 @@ Return a forward pass as a named tuple with the following fields: cumulative_value, ) -See `DefaultForwardPass` for details. +See [`DefaultForwardPass`](@ref) for details. """ function forward_pass end diff --git a/src/plugins/parallel_schemes.jl b/src/plugins/parallel_schemes.jl index 07f309c58..b456a9582 100644 --- a/src/plugins/parallel_schemes.jl +++ b/src/plugins/parallel_schemes.jl @@ -17,7 +17,9 @@ end Run SDDP in serial mode. """ struct Serial <: AbstractParallelScheme end + Base.show(io::IO, ::Serial) = print(io, "serial mode") + interrupt(::Serial) = nothing function master_loop( @@ -33,6 +35,7 @@ function master_loop( return result.status end end + return end function _simulate( @@ -60,8 +63,9 @@ end Run SDDP in asynchronous mode workers with pid's `slave_pids`. -After initializing the models on each worker, call `init_callback(model)`. Note that -`init_callback` is run _locally on the worker_ and _not_ on the master thread. +After initializing the models on each worker, call `init_callback(model)`. Note +that `init_callback` is run _locally on the worker_ and _not_ on the master +thread. """ function Asynchronous( init_callback::Function, @@ -154,6 +158,7 @@ function slave_loop( catch ex trap_error(ex) end + return end trap_error(ex::Exception) = throw(ex) @@ -255,6 +260,7 @@ function master_loop( return status end end + return end function _simulate( @@ -280,4 +286,5 @@ function _simulate( return _simulate(model, variables; kwargs...) end end + return end diff --git a/src/plugins/risk_measures.jl b/src/plugins/risk_measures.jl index e5d1fec2d..8b7876b98 100644 --- a/src/plugins/risk_measures.jl +++ b/src/plugins/risk_measures.jl @@ -135,7 +135,7 @@ end Create a weighted combination of risk measures. -### Examples +## Examples SDDP.ConvexCombination( (0.5, SDDP.Expectation()), @@ -161,6 +161,7 @@ function Base.show(io::IO, measure::ConvexCombination) print(io, m[1], " * ", m[2]) is_first = false end + return end import Base: +, * @@ -270,7 +271,7 @@ where 3. r is a user specified radius - the larger the radius, the more conservative the policy. -## Notes +## Notes The largest radius that will work with S scenarios is sqrt((S-1)/S). diff --git a/src/plugins/sampling_schemes.jl b/src/plugins/sampling_schemes.jl index 38d863930..83f99ac62 100644 --- a/src/plugins/sampling_schemes.jl +++ b/src/plugins/sampling_schemes.jl @@ -17,7 +17,7 @@ end max_depth::Int = 0, terminate_on_cycle::Function = false, terminate_on_dummy_leaf::Function = true, - rollout_limit::Function = (i::Int) -> typemax(Int) + rollout_limit::Function = (i::Int) -> typemax(Int), ) A Monte Carlo sampling scheme using the in-sample data from the policy graph @@ -107,25 +107,31 @@ then `max_depth` must be set > 0. You can use `rollout_limit` to set iteration specific depth limits. For example: - OutOfSampleMonteCarlo(rollout_limit = i -> 2 * i) +```julia +OutOfSampleMonteCarlo(rollout_limit = i -> 2 * i) +``` -### Example +## Examples - # Given linear policy graph `graph` with `T` stages: - sampler = OutOfSampleMonteCarlo(graph) do node - if node == 0 - return [SDDP.Noise(1, 1.0)] - else - noise_terms = [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)] - children = node < T ? [SDDP.Noise(node + 1, 0.9)] : SDDP.Noise{Int}[] - return children, noise_terms - end +Given linear policy graph `graph` with `T` stages: +```julia +sampler = OutOfSampleMonteCarlo(graph) do node + if node == 0 + return [SDDP.Noise(1, 1.0)] + else + noise_terms = [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)] + children = node < T ? [SDDP.Noise(node + 1, 0.9)] : SDDP.Noise{Int}[] + return children, noise_terms end +end +``` - # Given linear policy graph `graph` with `T` stages: - sampler = OutOfSampleMonteCarlo(graph, use_insample_transition=true) do node - return [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)] - end +Given linear policy graph `graph` with `T` stages: +```julia +sampler = OutOfSampleMonteCarlo(graph, use_insample_transition=true) do node + return [SDDP.Noise(node, 0.3), SDDP.Noise(node + 1, 0.7)] +end +``` """ function OutOfSampleMonteCarlo( f::Function, @@ -313,7 +319,7 @@ end A sampling scheme that samples a scenario from the vector of scenarios `scenarios` according to `probability`. -### Example +## Examples ```julia Historical( @@ -346,7 +352,7 @@ end A deterministic sampling scheme that iterates through the vector of provided `scenarios`. -## Example +## Examples ```julia Historical([ @@ -365,7 +371,7 @@ end A deterministic sampling scheme that always samples `scenario`. -## Example +## Examples ```julia Historical([(1, 0.5), (2, 1.5), (3, 0.75)]) diff --git a/src/plugins/stopping_rules.jl b/src/plugins/stopping_rules.jl index 0afde2d94..1d760d471 100644 --- a/src/plugins/stopping_rules.jl +++ b/src/plugins/stopping_rules.jl @@ -16,11 +16,7 @@ end stopping_rule_status(::IterationLimit) = :iteration_limit -function convergence_test( - graph::PolicyGraph, - log::Vector{Log}, - rule::IterationLimit, -) +function convergence_test(::PolicyGraph, log::Vector{Log}, rule::IterationLimit) return log[end].iteration >= rule.limit end @@ -37,15 +33,19 @@ end stopping_rule_status(::TimeLimit) = :time_limit -function convergence_test(graph::PolicyGraph, log::Vector{Log}, rule::TimeLimit) +function convergence_test(::PolicyGraph, log::Vector{Log}, rule::TimeLimit) return log[end].time >= rule.limit end # ========================= Statistical Stopping Rule ======================== # """ - Statistical(; num_replications, iteration_period = 1, z_score = 1.96, - verbose = true) + Statistical(; + num_replications, + iteration_period = 1, + z_score = 1.96, + verbose = true, + ) Perform an in-sample Monte Carlo simulation of the policy with `num_replications` replications every `iteration_period`s. Terminate if the @@ -105,7 +105,8 @@ function convergence_test( elseif graph.objective_sense == MOI.MAX_SENSE return current_bound <= sample_mean + sample_ci else - #If sense is none of the above for some awkward reason, return to previous criteria + # If sense is none of the above for some awkward reason, return to + # previous criteria return sample_mean - sample_ci <= current_bound <= sample_mean + sample_ci @@ -167,7 +168,7 @@ Terminate once all of the `rules` are statified. This stopping rule short-circuits, so subsequent rules are only tested if the previous pass. -## Example +## Examples A stopping rule that runs 100 iterations, then checks for the bound stalling: ```julia @@ -176,6 +177,7 @@ StoppingChain(IterationLimit(100), BoundStalling(5, 0.1)) """ struct StoppingChain <: AbstractStoppingRule rules::Vector{AbstractStoppingRule} + function StoppingChain(rules::AbstractStoppingRule...) return new(collect(rules)) end diff --git a/src/print.jl b/src/print.jl index 1a3080c13..86d95fe98 100644 --- a/src/print.jl +++ b/src/print.jl @@ -129,10 +129,11 @@ function print_problem_statistics( end function print_iteration_header(io) - return println( + println( io, " Iteration Simulation Bound Time (s) Proc. ID # Solves", ) + return end print_value(x::Real) = lpad(Printf.@sprintf("%1.6e", x), 13) @@ -145,7 +146,8 @@ function print_iteration(io, log::Log) print(io, " ", print_value(log.time)) print(io, " ", print_value(log.pid)) print(io, " ", print_value(log.total_solves)) - return println(io) + println(io) + return end function print_footer(io, training_results::TrainingResults) @@ -283,23 +285,28 @@ function _update_range(range::Vector{Float64}, func::JuMP.GenericAffExpr) for coefficient in values(func.terms) _update_range(range, coefficient) end + return end function _update_range(range::Vector{Float64}, func::MOI.LessThan) - return _update_range(range, func.upper) + _update_range(range, func.upper) + return end function _update_range(range::Vector{Float64}, func::MOI.GreaterThan) - return _update_range(range, func.lower) + _update_range(range, func.lower) + return end function _update_range(range::Vector{Float64}, func::MOI.EqualTo) - return _update_range(range, func.value) + _update_range(range, func.value) + return end function _update_range(range::Vector{Float64}, func::MOI.Interval) _update_range(range, func.upper) - return _update_range(range, func.lower) + _update_range(range, func.lower) + return end # Default fallback for unsupported constraints. @@ -328,8 +335,13 @@ function _coefficient_ranges(model::JuMP.Model) end """ - numerical_stability_report([io::IO=stdout,] model::PolicyGraph, - by_node::Bool=false, print=true, warn::Bool=true) + numerical_stability_report( + [io::IO=stdout,] + model::PolicyGraph, + by_node::Bool = false, + print::Bool = true, + warn::Bool = true, + ) Print a report identifying possible numeric stability issues. @@ -397,7 +409,8 @@ Assumes that the model has been trained via [`SDDP.train`](@ref). function write_log_to_csv(model::PolicyGraph, filename::String) if model.most_recent_training_results === nothing error( - "Unable to write the log to file because the model has not been trained.", + "Unable to write the log to file because the model has not " * + "been trained.", ) end open(filename, "w") do io @@ -415,4 +428,5 @@ function write_log_to_csv(model::PolicyGraph, filename::String) ) end end + return end diff --git a/src/user_interface.jl b/src/user_interface.jl index 241658171..ee5f379f1 100644 --- a/src/user_interface.jl +++ b/src/user_interface.jl @@ -64,6 +64,7 @@ function Base.show(io::IO, graph::Graph) end println(io) end + return end # Internal function used to validate the structure of a graph @@ -95,16 +96,19 @@ function _validate_graph(graph::Graph) ) end end + return end """ - add_node(graph::Graph{T}, node::T) where T + add_node(graph::Graph{T}, node::T) where {T} Add a node to the graph `graph`. -### Examples +## Examples - add_node(graph, :A) +```julia +add_node(graph, :A) +``` """ function add_node(graph::Graph{T}, node::T) where {T} if haskey(graph.nodes, node) || node == graph.root_node @@ -113,19 +117,22 @@ function add_node(graph::Graph{T}, node::T) where {T} graph.nodes[node] = Tuple{T,Float64}[] return end + function add_node(graph::Graph{T}, node) where {T} return error("Unable to add node $(node). Nodes must be of type $(T).") end """ - add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where T + add_edge(graph::Graph{T}, edge::Pair{T, T}, probability::Float64) where {T} Add an edge to the graph `graph`. -### Examples +## Examples - add_edge(graph, 1 => 2, 0.9) - add_edge(graph, :root => :A, 1.0) +```julia +add_edge(graph, 1 => 2, 0.9) +add_edge(graph, :root => :A, 1.0) +``` """ function add_edge( graph::Graph{T}, @@ -146,7 +153,11 @@ function add_edge( end """ - add_ambiguity_set(graph::Graph{T}, set::Vector{T}, lipschitz::Vector{Float64}) + add_ambiguity_set( + graph::Graph{T}, + set::Vector{T}, + lipschitz::Vector{Float64}, + ) where {T} Add `set` to the belief partition of `graph`. @@ -155,11 +166,13 @@ in `set`. The Lipschitz constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space. -### Examples +## Examples - graph = LinearGraph(3) - add_ambiguity_set(graph, [1, 2], [1e3, 1e2]) - add_ambiguity_set(graph, [3], [1e5]) +```julia +graph = LinearGraph(3) +add_ambiguity_set(graph, [1, 2], [1e3, 1e2]) +add_ambiguity_set(graph, [3], [1e5]) +``` """ function add_ambiguity_set( graph::Graph{T}, @@ -188,11 +201,13 @@ Add `set` to the belief partition of `graph`. constant is the maximum slope of the cost-to-go function with respect to the belief state associated with each node at any point in the state-space. -### Examples +## Examples - graph = LinearGraph(3) - add_ambiguity_set(graph, [1, 2], 1e3) - add_ambiguity_set(graph, [3], 1e5) +```julia +graph = LinearGraph(3) +add_ambiguity_set(graph, [1, 2], 1e3) +add_ambiguity_set(graph, [3], 1e5) +``` """ function add_ambiguity_set( graph::Graph{T}, @@ -234,12 +249,12 @@ end Construct a Markovian graph from the vector of transition matrices. -`transition_matrices[t][i, j]` gives the probability of transitioning from Markov state `i` -in stage `t - 1` to Markov state `j` in stage `t`. +`transition_matrices[t][i, j]` gives the probability of transitioning from +Markov state `i` in stage `t - 1` to Markov state `j` in stage `t`. The dimension of the first transition matrix should be `(1, N)`, and -`transition_matrics[1][1, i]` is the probability of transitioning from the root node to the -Markov state `i`. +`transition_matrics[1][1, i]` is the probability of transitioning from the root +node to the Markov state `i`. """ function MarkovianGraph(transition_matrices::Vector{Matrix{Float64}}) if size(transition_matrices[1], 1) != 1 @@ -292,18 +307,18 @@ end MarkovianGraph(; stages::Int, transition_matrix::Matrix{Float64}, - root_node_transition::Vector{Float64} + root_node_transition::Vector{Float64}, ) -Construct a Markovian graph object with `stages` number of stages and time-independent -Markov transition probabilities. +Construct a Markovian graph object with `stages` number of stages and +time-independent Markov transition probabilities. -`transition_matrix` must be a square matrix, and the probability of transitioning from -Markov state `i` in stage `t` to Markov state `j` in stage `t + 1` is given by -`transition_matrix[i, j]`. +`transition_matrix` must be a square matrix, and the probability of +transitioning from Markov state `i` in stage `t` to Markov state `j` in stage +`t + 1` is given by `transition_matrix[i, j]`. -`root_node_transition[i]` is the probability of transitioning from the root node to Markov -state `i` in the first stage. +`root_node_transition[i]` is the probability of transitioning from the root node +to Markov state `i` in the first stage. """ function MarkovianGraph(; stages::Int = 1, @@ -402,7 +417,8 @@ function Base.show(io::IO, node::Node) println(io, "Node $(node.index)") println(io, " # State variables : ", length(node.states)) println(io, " # Children : ", length(node.children)) - return println(io, " # Noise terms : ", length(node.noise_terms)) + println(io, " # Noise terms : ", length(node.noise_terms)) + return end function pre_optimize_hook(f::Function, node::Node) @@ -477,6 +493,7 @@ function Base.show(io::IO, graph::PolicyGraph) else println(io, " Node indices: ", nodes[1], ", ..., ", nodes[end]) end + return end # So we can query nodes in the graph as graph[node]. @@ -489,12 +506,12 @@ function construct_subproblem(optimizer_factory, direct_mode::Bool) if direct_mode return JuMP.direct_model(MOI.instantiate(optimizer_factory)) else - return JuMP.Model() # optimizer_factory) + return JuMP.Model() end end # Work around different JuMP modes (Automatic / Manual / Direct). -function construct_subproblem(optimizer_factory::Nothing, direct_mode::Bool) +function construct_subproblem(::Nothing, direct_mode::Bool) if direct_mode error( "You must specify an optimizer in the form:\n" * @@ -522,21 +539,23 @@ end """ MarkovianPolicyGraph( builder::Function; - transition_matrices::Vector{Array{Float64, 2}}, + transition_matrices::Vector{Array{Float64,2}}, kwargs... ) Create a Markovian policy graph based on the transition matrices given in `transition_matrices`. -`transition_matrices[t][i, j]` gives the probability of transitioning from Markov state `i` -in stage `t - 1` to Markov state `j` in stage `t`. +`transition_matrices[t][i, j]` gives the probability of transitioning from +Markov state `i` in stage `t - 1` to Markov state `j` in stage `t`. The dimension of the first transition matrix should be `(1, N)`, and -`transition_matrics[1][1, i]` is the probability of transitioning from the root node to the -Markov state `i`. +`transition_matrics[1][1, i]` is the probability of transitioning from the root +node to the Markov state `i`. + +See [`SDDP.MarkovianGraph`](@ref) for other ways of specifying a Markovian +policy graph. -See [`SDDP.MarkovianGraph`](@ref) for other ways of specifying a Markovian policy graph. See [`SDDP.PolicyGraph`](@ref) for the other keyword arguments. """ function MarkovianPolicyGraph( @@ -562,30 +581,34 @@ end Construct a policy graph based on the graph structure of `graph`. (See [`SDDP.Graph`](@ref) for details.) -# Example +## Examples - function builder(subproblem::JuMP.Model, index) - # ... subproblem definition ... - end +```julia +function builder(subproblem::JuMP.Model, index) + # ... subproblem definition ... +end - model = PolicyGraph( - builder, - graph; - lower_bound = 0.0, - optimizer = GLPK.Optimizer, - direct_mode = false - ) +model = PolicyGraph( + builder, + graph; + lower_bound = 0.0, + optimizer = GLPK.Optimizer, + direct_mode = false +) +``` Or, using the Julia `do ... end` syntax: - model = PolicyGraph( - graph; - lower_bound = 0.0, - optimizer = GLPK.Optimizer, - direct_mode = true - ) do subproblem, index - # ... subproblem definitions ... - end +```julia +model = PolicyGraph( + graph; + lower_bound = 0.0, + optimizer = GLPK.Optimizer, + direct_mode = true +) do subproblem, index + # ... subproblem definitions ... +end +``` """ function PolicyGraph( builder::Function, @@ -744,6 +767,7 @@ function initialize_belief_states( end end end + return end # Internal function: When created, θ has bounds of [-M, M], but, since we are @@ -754,7 +778,7 @@ function add_initial_bounds(node, μ::Dict) θ = bellman_term(node.bellman_function) lower_bound = JuMP.has_lower_bound(θ) ? JuMP.lower_bound(θ) : -Inf upper_bound = JuMP.has_upper_bound(θ) ? JuMP.upper_bound(θ) : Inf - for (key, variable) in μ + for (_, variable) in μ if lower_bound > -Inf @constraint(node.subproblem, variable + θ >= lower_bound) end @@ -762,6 +786,7 @@ function add_initial_bounds(node, μ::Dict) @constraint(node.subproblem, variable + θ <= upper_bound) end end + return end # Internal function: helper to get the node given a subproblem. @@ -775,11 +800,12 @@ function get_policy_graph(subproblem::JuMP.Model) end """ - parameterize(modify::Function, - subproblem::JuMP.Model, - realizations::Vector{T}, - probability::Vector{Float64} = fill(1.0 / length(realizations)) - ) where T + parameterize( + modify::Function, + subproblem::JuMP.Model, + realizations::Vector{T}, + probability::Vector{Float64} = fill(1.0 / length(realizations)) + ) where {T} Add a parameterization function `modify` to `subproblem`. The `modify` function takes one argument and modifies `subproblem` based on the realization of the @@ -789,11 +815,13 @@ noise sampled from `realizations` with corresponding probabilities In order to conduct an out-of-sample simulation, `modify` should accept arguments that are not in realizations (but still of type T). -# Example +## Examples - SDDP.parameterize(subproblem, [1, 2, 3], [0.4, 0.3, 0.3]) do ω - JuMP.set_upper_bound(x, ω) - end +```julia +SDDP.parameterize(subproblem, [1, 2, 3], [0.4, 0.3, 0.3]) do ω + JuMP.set_upper_bound(x, ω) +end +``` """ function parameterize( modify::Function, @@ -823,9 +851,11 @@ end Set the stage-objective of `subproblem` to `stage_objective`. -# Example +## Examples - SDDP.set_stage_objective(subproblem, 2x + 1) +```julia +SDDP.set_stage_objective(subproblem, 2x + 1) +``` """ function set_stage_objective( subproblem::JuMP.Model, @@ -849,9 +879,11 @@ end Set the stage-objective of `subproblem` to `expr`. -### Example +## Examples - @stageobjective(subproblem, 2x + y) +```julia +@stageobjective(subproblem, 2x + y) +``` """ macro stageobjective(subproblem, expr) code = MutableArithmetics.rewrite_and_return(expr) @@ -885,7 +917,7 @@ Setting tight values for these optional variables can significantly improve the speed of convergence. If the objective state is `N`-dimensional, each keyword argument must be an -`NTuple{N, Float64}`. For example, `initial_value = (0.0, 1.0)`. +`NTuple{N,Float64}`. For example, `initial_value = (0.0, 1.0)`. """ function add_objective_state( update::Function, @@ -963,14 +995,12 @@ Can only be called from [`SDDP.parameterize`](@ref). """ function objective_state(subproblem::JuMP.Model) objective_state = get_node(subproblem).objective_state - if objective_state !== nothing - if length(objective_state.state) == 1 - return objective_state.state[1] - else - return objective_state.state - end - else + if objective_state === nothing error("No objective state defined.") + elseif length(objective_state.state) == 1 + return objective_state.state[1] + else + return objective_state.state end end @@ -1009,7 +1039,7 @@ following signature and returns the outgoing belief: incoming_belief::Dict{T, Float64}, observed_partition::Int, observed_noise - )::Dict{T, Float64} + )::Dict{T,Float64} We use Bayes theorem: P(X′ | Y) = P(Y | X′) × P(X′) / P(Y), where P(Xᵢ′ | Y) is the probability of being in node i given the observation of ω. In addition diff --git a/src/visualization/publication_plot.jl b/src/visualization/publication_plot.jl index 1a35a8a04..5dc9257bf 100644 --- a/src/visualization/publication_plot.jl +++ b/src/visualization/publication_plot.jl @@ -29,7 +29,7 @@ Create a `Plots.jl` recipe plot of the simulations. See `Plots.jl` for the list of keyword arguments. -### Example +## Examples SDDP.publication_plot(simulations; title = "My title") do data return data[:stage_objective] diff --git a/src/visualization/spaghetti_plot.jl b/src/visualization/spaghetti_plot.jl index a955e65e7..606ba1a8b 100644 --- a/src/visualization/spaghetti_plot.jl +++ b/src/visualization/spaghetti_plot.jl @@ -67,13 +67,15 @@ Add a new figure to the SpaghettiPlot `plt`, where the y-value of the Defaults to `"linear"` see [the d3 docs](https://github.com/d3/d3-3.x-api-reference/blob/master/SVG-Shapes.md#line_interpolate) for all options. -# Examples +## Examples - simulations = simulate(model, 10) - plt = SDDP.spaghetti_plot(simulations) - SDDP.add_spaghetti(plt; title = "Stage objective") do data - return data[:stage_objective] - end +```julia +simulations = simulate(model, 10) +plt = SDDP.spaghetti_plot(simulations) +SDDP.add_spaghetti(plt; title = "Stage objective") do data + return data[:stage_objective] +end +``` """ function add_spaghetti( data_function::Function, diff --git a/src/visualization/value_functions.jl b/src/visualization/value_functions.jl index 254a67727..f48133eee 100644 --- a/src/visualization/value_functions.jl +++ b/src/visualization/value_functions.jl @@ -193,7 +193,7 @@ Evaluate the value function `V` at `point` in the state-space. Returns a tuple containing the height of the function, and the subgradient w.r.t. the convex state-variables. -## Example +## Examples ```julia evaluate(V, Dict(:volume => 1.0)) @@ -259,7 +259,7 @@ end Evalute the value function `V` at the point in the state-space specified by `kwargs`. -## Example +## Examples evaluate(V; volume = 1) """