diff --git a/docs/src/tutorial/15_performance.md b/docs/src/tutorial/15_performance.md index b9f19a964..ba88100de 100644 --- a/docs/src/tutorial/15_performance.md +++ b/docs/src/tutorial/15_performance.md @@ -36,3 +36,17 @@ speed-ups. pay to change these. In particular, forcing solvers to use the dual simplex algorithm (e.g., [`Method=1` in Gurobi](https://www.gurobi.com/documentation/8.1/refman/method.html) ) is usually a performance win. + +## Average cut vs multi-cut + +There are two competing ways that cuts can be created in SDDP: _average_ cut and +_multi_-cut. These can be specified as follows +```julia +SDDP.train(model; cut_type = SDDP.AVERAGE_CUT) +SDDP.train(model; cut_type = SDDP.MULTI_CUT) +``` + +The performance of each method is problem-dependent. We recommend that you try +both in order to see which one performs better. In general, the _average_ cut +method works better when the number of realizations of the stagewise-independent +random variable is large. diff --git a/docs/src/upgrading_guide.md b/docs/src/upgrading_guide.md index e98db0c0e..f2e97f579 100644 --- a/docs/src/upgrading_guide.md +++ b/docs/src/upgrading_guide.md @@ -5,6 +5,7 @@ v1.0. Some of the highlights of the new release include + - Support for "multi-cut"s - Support for stagewise-independent noise in the constraint matrix - A way to simulate out-of-sample realizations of the stagewise-independent noise terms diff --git a/examples/FAST_hydro_thermal.jl b/examples/FAST_hydro_thermal.jl index f0b0ca942..2ddc8e5a4 100644 --- a/examples/FAST_hydro_thermal.jl +++ b/examples/FAST_hydro_thermal.jl @@ -12,7 +12,7 @@ using SDDP, GLPK, Test function fast_hydro_thermal() model = SDDP.PolicyGraph(SDDP.LinearGraph(2), - bellman_function = SDDP.AverageCut(lower_bound = 0.0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0.0), optimizer = with_optimizer(GLPK.Optimizer) ) do sp, t @variable(sp, 0 <= x <= 8, SDDP.State, initial_value = 0.0) diff --git a/examples/FAST_production_management.jl b/examples/FAST_production_management.jl index 8d1effedc..b7db60ee6 100644 --- a/examples/FAST_production_management.jl +++ b/examples/FAST_production_management.jl @@ -10,14 +10,15 @@ using SDDP, GLPK, Test -function fast_production_management() +function fast_production_management(;cut_type) DEMAND = [2, 10] H = 3 N = 2 C = [0.2, 0.7] S = 2 .+ [0.33, 0.54] model = SDDP.PolicyGraph(SDDP.LinearGraph(H), - bellman_function = SDDP.AverageCut(lower_bound = -50.0), + bellman_function = SDDP.BellmanFunction( + lower_bound = -50.0, cut_type = cut_type), optimizer = with_optimizer(GLPK.Optimizer) ) do sp, t @variable(sp, x[1:N] >= 0, SDDP.State, initial_value = 0.0) @@ -38,4 +39,5 @@ function fast_production_management() @test SDDP.calculate_bound(model) ≈ -23.96 atol = 1e-2 end -fast_production_management() +fast_production_management(cut_type = SDDP.AVERAGE_CUT) +fast_production_management(cut_type = SDDP.MULTI_CUT) diff --git a/examples/FAST_quickstart.jl b/examples/FAST_quickstart.jl index 36248937b..d8112f829 100644 --- a/examples/FAST_quickstart.jl +++ b/examples/FAST_quickstart.jl @@ -12,7 +12,7 @@ using SDDP, GLPK, Test function fast_quickstart() model = SDDP.PolicyGraph(SDDP.LinearGraph(2), - bellman_function = SDDP.AverageCut(lower_bound = -5), + bellman_function = SDDP.BellmanFunction(lower_bound = -5), optimizer = with_optimizer(GLPK.Optimizer) ) do sp, t @variable(sp, x >= 0, SDDP.State, initial_value = 0.0) diff --git a/examples/StochDynamicProgramming.jl_multistock.jl b/examples/StochDynamicProgramming.jl_multistock.jl index ba534f80e..c7e6bb3b0 100644 --- a/examples/StochDynamicProgramming.jl_multistock.jl +++ b/examples/StochDynamicProgramming.jl_multistock.jl @@ -23,7 +23,7 @@ using SDDP, GLPK, Test function test_multistock_example() model = SDDP.PolicyGraph(SDDP.LinearGraph(5), optimizer = with_optimizer(GLPK.Optimizer), - bellman_function = SDDP.AverageCut(lower_bound = -5) + bellman_function = SDDP.BellmanFunction(lower_bound = -5) ) do subproblem, stage @variable(subproblem, 0 <= stock[i=1:3] <= 1, SDDP.State, initial_value = 0.5) diff --git a/examples/StochDynamicProgramming.jl_stock.jl b/examples/StochDynamicProgramming.jl_stock.jl index be0bcb435..bdf7f185e 100644 --- a/examples/StochDynamicProgramming.jl_stock.jl +++ b/examples/StochDynamicProgramming.jl_stock.jl @@ -22,7 +22,7 @@ using SDDP, GLPK, Test function stock_example() model = SDDP.PolicyGraph(SDDP.LinearGraph(5), - bellman_function = SDDP.AverageCut(lower_bound = -2), + bellman_function = SDDP.BellmanFunction(lower_bound = -2), optimizer = with_optimizer(GLPK.Optimizer) ) do sp, stage diff --git a/examples/StructDualDynProg.jl_prob5.2_2stages.jl b/examples/StructDualDynProg.jl_prob5.2_2stages.jl index 3f7e68e2d..4d10ec7d1 100644 --- a/examples/StructDualDynProg.jl_prob5.2_2stages.jl +++ b/examples/StructDualDynProg.jl_prob5.2_2stages.jl @@ -12,7 +12,7 @@ using SDDP, GLPK, Test function test_prob52_2stages() model = SDDP.PolicyGraph(SDDP.LinearGraph(2), - bellman_function = SDDP.AverageCut(lower_bound = 0.0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0.0), optimizer = with_optimizer(GLPK.Optimizer) ) do subproblem, stage # ========== Problem data ========== diff --git a/examples/StructDualDynProg.jl_prob5.2_3stages.jl b/examples/StructDualDynProg.jl_prob5.2_3stages.jl index 02905d6d3..d814330a0 100644 --- a/examples/StructDualDynProg.jl_prob5.2_3stages.jl +++ b/examples/StructDualDynProg.jl_prob5.2_3stages.jl @@ -12,7 +12,7 @@ using SDDP, GLPK, Test function test_prob52_3stages() model = SDDP.PolicyGraph(SDDP.LinearGraph(3), - bellman_function = SDDP.AverageCut(lower_bound = 0.0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0.0), optimizer = with_optimizer(GLPK.Optimizer) ) do sp, t n = 4 diff --git a/examples/agriculture_mccardle_farm.jl b/examples/agriculture_mccardle_farm.jl index 86ccccf3a..a1c5c8e12 100644 --- a/examples/agriculture_mccardle_farm.jl +++ b/examples/agriculture_mccardle_farm.jl @@ -55,7 +55,7 @@ function test_mccardle_farm_model() ]) model = SDDP.PolicyGraph(graph, - bellman_function = SDDP.AverageCut(lower_bound = 0.0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0.0), optimizer = with_optimizer(GLPK.Optimizer) ) do subproblem, index stage, weather = index diff --git a/examples/asset_management_simple.jl b/examples/asset_management_simple.jl index d84878802..04e12d960 100644 --- a/examples/asset_management_simple.jl +++ b/examples/asset_management_simple.jl @@ -21,7 +21,7 @@ function asset_management_simple() [0.5 0.5; 0.5 0.5], [0.5 0.5; 0.5 0.5] ]), - bellman_function = SDDP.AverageCut(lower_bound = -1_000.0), + bellman_function = SDDP.BellmanFunction(lower_bound = -1_000.0), optimizer = with_optimizer(GLPK.Optimizer) ) do subproblem, index (stage, markov_state) = index diff --git a/examples/asset_management_stagewise.jl b/examples/asset_management_stagewise.jl new file mode 100644 index 000000000..188503990 --- /dev/null +++ b/examples/asset_management_stagewise.jl @@ -0,0 +1,64 @@ +# Copyright 2018, Oscar Dowson +# This Source Code Form is subject to the terms of the Mozilla Public +# License, v. 2.0. If a copy of the MPL was not distributed with this +# file, You can obtain one at http://mozilla.org/MPL/2.0/. +############################################################################# + +#== +Modified version of the Asset Management problem taken from + + J. R. Birge, F. Louveaux, Introduction to Stochastic Programming, + Springer Series in Operations Research and Financial Engineering, + Springer New York, New York, NY, 2011 +==# + +using SDDP, GLPK, Test + +function asset_management_stagewise(; cut_type) + ws = [1.25, 1.06] + wb = [1.14, 1.12] + Phi = [-1, 5] + Psi = [0.02, 0.0] + + model = SDDP.MarkovianPolicyGraph( + sense = :Max, + transition_matrices = Array{Float64, 2}[ + [1.0]', [0.5 0.5], [0.5 0.5; 0.5 0.5], [0.5 0.5; 0.5 0.5]], + bellman_function = SDDP.BellmanFunction( + upper_bound = 1000.0, cut_type = cut_type), + optimizer = with_optimizer(GLPK.Optimizer)) do subproblem, node + t, i = node + @variable(subproblem, xs >= 0, SDDP.State, initial_value=0) + @variable(subproblem, xb >= 0, SDDP.State, initial_value=0) + if t == 1 + @constraint(subproblem, xs.out + xb.out == 55 + xs.in + xb.in) + @stageobjective(subproblem, 0) + elseif t == 2 || t == 3 + @variable(subproblem, phi) + @constraint(subproblem, ws[i] * xs.in + wb[i] * xb.in + phi == xs.out + xb.out) + SDDP.parameterize(subproblem, [1, 2], [0.6, 0.4]) do ω + JuMP.fix(phi, Phi[ω]) + @stageobjective(subproblem, Psi[ω] * xs.out) + end + else + @variable(subproblem, u >= 0) + @variable(subproblem, v >= 0) + @constraint(subproblem, ws[i] * xs.in + wb[i] * xb.in + u - v == 80) + @stageobjective(subproblem, -4u + v) + end + end + SDDP.train(model; + iteration_limit = 100, + print_level = 0, + risk_measure = (node) -> begin + if node[1] != 3 + SDDP.Expectation() + else + SDDP.EAVaR(lambda = 0.5, beta=0.5) + end + end) + @test SDDP.calculate_bound(model) ≈ 1.278 atol=1e-3 +end + +asset_management_stagewise(cut_type = SDDP.AVERAGE_CUT) +asset_management_stagewise(cut_type = SDDP.MULTI_CUT) diff --git a/examples/biobjective_hydro.jl b/examples/biobjective_hydro.jl index 71503c7e2..218927079 100644 --- a/examples/biobjective_hydro.jl +++ b/examples/biobjective_hydro.jl @@ -7,7 +7,7 @@ using SDDP, GLPK, Statistics, Test function biobjective_hydro() model = SDDP.PolicyGraph(SDDP.LinearGraph(3), - bellman_function = SDDP.AverageCut(lower_bound = 0.0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0.0), optimizer = with_optimizer(GLPK.Optimizer) ) do subproblem, stage @variable(subproblem, 0 <= v <= 200, SDDP.State, initial_value = 50) diff --git a/examples/complicated_hydro.jl b/examples/complicated_hydro.jl index dea712d54..e32543172 100644 --- a/examples/complicated_hydro.jl +++ b/examples/complicated_hydro.jl @@ -10,7 +10,7 @@ const POWER_KNOTS = [55.0, 65.0, 70.0] model = SDDP.LinearPolicyGraph( stages = T, sense = :Min, - bellman_function = SDDP.AverageCut( + bellman_function = SDDP.BellmanFunction( lower_bound = 0, deletion_minimum = 1_000_000 ), diff --git a/examples/infinite_horizon/ball_on_beam.jl b/examples/infinite_horizon/ball_on_beam.jl index e74a208ab..7ff46170f 100644 --- a/examples/infinite_horizon/ball_on_beam.jl +++ b/examples/infinite_horizon/ball_on_beam.jl @@ -13,7 +13,7 @@ function infinite_ball_on_beam() [(:root_node => :time_step, 1.0), (:time_step => :time_step, 0.999)] ) model = SDDP.PolicyGraph(graph, - bellman_function = SDDP.AverageCut(lower_bound = 0.0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0.0), optimizer = with_optimizer(Ipopt.Optimizer, print_level = 0), direct_mode = false, sense = :Min diff --git a/examples/infinite_horizon/powder.jl b/examples/infinite_horizon/powder.jl index acea55f58..19d155b1c 100644 --- a/examples/infinite_horizon/powder.jl +++ b/examples/infinite_horizon/powder.jl @@ -44,7 +44,7 @@ function infinite_powder(; discount_factor = 0.75, stocking_rate::Float64 = NaN, model = SDDP.PolicyGraph(graph, sense = :Max, - bellman_function = SDDP.AverageCut(upper_bound = 1e5), + bellman_function = SDDP.BellmanFunction(upper_bound = 1e5), optimizer = with_optimizer(Gurobi.Optimizer, OutputFlag = 0) ) do subproblem, index # Unpack the node index. diff --git a/examples/infinite_horizon_hydro_thermal.jl b/examples/infinite_horizon_hydro_thermal.jl index 5c6fb80ae..ae80a2345 100644 --- a/examples/infinite_horizon_hydro_thermal.jl +++ b/examples/infinite_horizon_hydro_thermal.jl @@ -5,7 +5,7 @@ using SDDP, GLPK, Test, Statistics -function infinite_hydro_thermal() +function infinite_hydro_thermal(; cut_type) Ω = [ (inflow = 0.0, demand = 7.5), (inflow = 5.0, demand = 5), @@ -17,7 +17,8 @@ function infinite_hydro_thermal() [(:root_node => :week, 1.0), (:week => :week, 0.9)] ) model = SDDP.PolicyGraph(graph, - bellman_function = SDDP.AverageCut(lower_bound = 0), + bellman_function = SDDP.BellmanFunction( + lower_bound = 0, cut_type = cut_type), optimizer = with_optimizer(GLPK.Optimizer) ) do subproblem, node @variable(subproblem, @@ -55,4 +56,5 @@ function infinite_hydro_thermal() @test sample_mean - sample_ci <= 119.167 <= sample_mean + sample_ci end -infinite_hydro_thermal() +infinite_hydro_thermal(cut_type = SDDP.AVERAGE_CUT) +infinite_hydro_thermal(cut_type = SDDP.MULTI_CUT) diff --git a/examples/infinite_horizon_trivial.jl b/examples/infinite_horizon_trivial.jl index 855fa4cb7..852c7f3ff 100644 --- a/examples/infinite_horizon_trivial.jl +++ b/examples/infinite_horizon_trivial.jl @@ -12,7 +12,7 @@ function infinite_trivial() [(:root_node => :week, 1.0), (:week => :week, 0.9)] ) model = SDDP.PolicyGraph(graph, - bellman_function = SDDP.AverageCut(lower_bound = 0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0), optimizer = with_optimizer(GLPK.Optimizer) ) do subproblem, node @variable(subproblem, state, SDDP.State, initial_value = 0) diff --git a/examples/inventory_management.jl b/examples/inventory_management.jl index 83d8e4d1f..989d15122 100644 --- a/examples/inventory_management.jl +++ b/examples/inventory_management.jl @@ -12,7 +12,7 @@ function infinite_lin_HD() [(:root_node => :week, 1.0), (:week => :week, 0.95)] ) model = SDDP.PolicyGraph(graph, - bellman_function = SDDP.AverageCut(lower_bound = 0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0), optimizer = with_optimizer(GLPK.Optimizer) ) do subproblem, node @@ -63,7 +63,7 @@ function infinite_lin_DH() ] ) model = SDDP.PolicyGraph(graph, - bellman_function = SDDP.AverageCut(lower_bound = 0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0), optimizer = with_optimizer(GLPK.Optimizer) ) do subproblem, node @variable(subproblem, -10 <= state <= 10, SDDP.State, initial_value = 0) diff --git a/examples/objective_state_newsvendor.jl b/examples/objective_state_newsvendor.jl index f3da40e21..a5ebd3aa1 100644 --- a/examples/objective_state_newsvendor.jl +++ b/examples/objective_state_newsvendor.jl @@ -31,11 +31,11 @@ function joint_distribution(; kwargs...) return distribution[:] end -function newsvendor_example() +function newsvendor_example(;cut_type) model = SDDP.PolicyGraph( SDDP.LinearGraph(3), sense = :Max, - bellman_function = SDDP.AverageCut(upper_bound = 50.0), + upper_bound = 50.0, optimizer = with_optimizer(GLPK.Optimizer) ) do subproblem, stage @variables(subproblem, begin @@ -57,17 +57,16 @@ function newsvendor_example() @stageobjective(subproblem, price * u) end end - return model + SDDP.train(model, + iteration_limit = 100, print_level = 0, time_limit = 20.0, + cut_type = cut_type) + @test SDDP.calculate_bound(model) ≈ 4.04 atol=0.05 + results = SDDP.simulate(model, 500) + objectives = [ + sum(s[:stage_objective] for s in simulation) for simulation in results + ] + @test round(Statistics.mean(objectives); digits = 2) ≈ 4.04 atol=0.1 end -model = newsvendor_example() -SDDP.train(model, iteration_limit = 100, print_level = 0) - -results = SDDP.simulate(model, 500) -objectives = [ - sum(s[:stage_objective] for s in simulation) for simulation in results -] -sample_mean = round(Statistics.mean(objectives); digits = 2) -sample_ci = round(1.96 * Statistics.std(objectives) / sqrt(500); digits = 2) -println("Confidence_interval = $(sample_mean) ± $(sample_ci)") -@test SDDP.calculate_bound(model) ≈ sample_mean atol = sample_ci +newsvendor_example(cut_type = SDDP.AVERAGE_CUT) +newsvendor_example(cut_type = SDDP.MULTI_CUT) diff --git a/examples/sddp.jl_not_updated/AssetManagement/asset_management_stagewise.jl b/examples/sddp.jl_not_updated/AssetManagement/asset_management_stagewise.jl deleted file mode 100644 index 764317627..000000000 --- a/examples/sddp.jl_not_updated/AssetManagement/asset_management_stagewise.jl +++ /dev/null @@ -1,84 +0,0 @@ -# Copyright 2018, Oscar Dowson -# This Source Code Form is subject to the terms of the Mozilla Public -# License, v. 2.0. If a copy of the MPL was not distributed with this -# file, You can obtain one at http://mozilla.org/MPL/2.0/. -############################################################################# - -#== -Modified version of the Asset Management problem taken from - - J. R. Birge, F. Louveaux, Introduction to Stochastic Programming, - Springer Series in Operations Research and Financial Engineering, - Springer New York, New York, NY, 2011 -==# - -using SDDP, JuMP, Clp, Base.Test - -ws = [1.25, 1.06] -wb = [1.14, 1.12] -Phi = [-1, 5] -Psi = [0.02, 0.0] - -m = SDDPModel( - sense = :Min, - stages = 4, - objective_bound = -1000.0, - solver = ClpSolver(), - markov_transition = Array{Float64, 2}[ - [1.0]', - [0.5 0.5], - [0.5 0.5; 0.5 0.5], - [0.5 0.5; 0.5 0.5] - ], - risk_measure = [Expectation(), Expectation(), EAVaR(lambda = 0.5, beta=0.5), Expectation()] - ) do sp, t, i - @state(sp, xs >= 0, xsbar==0) - @state(sp, xb >= 0, xbbar==0) - if t == 1 - @constraint(sp, xs + xb == 55 + xsbar + xbbar) - @stageobjective(sp, 0) - elseif t == 2 || t == 3 - @rhsnoise(sp, phi=Phi, ws[i] * xsbar + - wb[i] * xbbar + phi == xs + xb) - @stageobjective(sp, psi = Psi, -psi * xs) - setnoiseprobability!(sp, [0.6, 0.4]) - else - @variable(sp, u >= 0) - @variable(sp, v >= 0) - @constraint(sp, ws[i] * xsbar + wb[i] * xbbar + - u - v == 80) - @stageobjective(sp, 4u - v) - end -end - -srand(111) -status = solve(m, - iteration_limit = 100, - simulation = MonteCarloSimulation( - frequency = 5, - min = 100, - step = 100, - max = 500, - terminate = false - ), - bound_stalling = BoundStalling( - iterations = 5, - rtol = 0.0, - atol = 0.001 - ), - log_file="asset.log" -) -rm("asset.log") -@test status == :bound_stalling -@test isapprox(getbound(m), -1.278, atol=1e-3) - -# results = simulate(m, 100, [:xs, :xb]) -# -# p = SDDP.newplot() -# SDDP.addplot!(p, 1:100, 1:4, (i, t)->round(results[i][:stageobjective][t], 2), -# title="Objective", ylabel="Profit (\$)", cumulative=true) -# SDDP.addplot!(p, 1:100, 1:4, (i, t)->round(results[i][:xs][t], 2), -# title="Stocks") -# SDDP.addplot!(p, 1:100, 1:4, (i, t)->round(results[i][:xb][t], 2), -# title="Bonds") -# SDDP.show(p) diff --git a/src/algorithm.jl b/src/algorithm.jl index e2e44e9b5..dbee5269f 100644 --- a/src/algorithm.jl +++ b/src/algorithm.jl @@ -583,6 +583,7 @@ function train(model::PolicyGraph; stopping_rules = AbstractStoppingRule[], risk_measure = SDDP.Expectation(), sampling_scheme = SDDP.InSampleMonteCarlo(), + cut_type = SDDP.AVERAGE_CUT, cycle_discretization_delta = 0.0, refine_at_similar_nodes = true ) @@ -630,6 +631,10 @@ function train(model::PolicyGraph; cycle_discretization_delta, refine_at_similar_nodes ) + # Update the nodes with the selected cut type (AVERAGE_CUT or MULTI_CUT). + for (key, node) in model.nodes + node.bellman_function.cut_type = cut_type + end # The default status. This should never be seen by the user. status = :not_solved log = Log[] diff --git a/src/plugins/bellman_functions.jl b/src/plugins/bellman_functions.jl index 27dd5ac91..918152ff1 100644 --- a/src/plugins/bellman_functions.jl +++ b/src/plugins/bellman_functions.jl @@ -3,76 +3,172 @@ # v. 2.0. If a copy of the MPL was not distributed with this file, You can # obtain one at http://mozilla.org/MPL/2.0/. -# ============================== SDDP.AverageCut =============================== - mutable struct Cut - # The intercept of the cut. intercept::Float64 - # The coefficients on the state variables. coefficients::Dict{Symbol, Float64} - # A count of the number of points at which the cut is non-dominated. non_dominated_count::Int - # The constraint reference (if it is currently in the model). - constraint_ref::Union{Nothing, JuMP.ConstraintRef} # TODO(odow): improve type. + constraint_ref::Union{Nothing, JuMP.ConstraintRef} end mutable struct SampledState - # A sampled point in state-space. state::Dict{Symbol, Float64} - # Current dominating cut. dominating_cut::Cut - # Current evaluation of the dominating cut at `state`. best_objective::Float64 end -struct AverageCut <: AbstractBellmanFunction - # The cost-to-go variable. - variable::JuMP.VariableRef - # Data for Level-One cut selection. +struct LevelOneOracle cuts::Vector{Cut} states::Vector{SampledState} - # Storage for cuts that can be purged. cuts_to_be_deleted::Vector{Cut} - # Minimum number of cuts to purge before activating JuMP.delete. deletion_minimum::Int + function LevelOneOracle(deletion_minimum) + return new(Cut[], SampledState[], Cut[], deletion_minimum) + end +end + +struct ConvexApproximation + theta::JuMP.VariableRef + states::Dict{Symbol, JuMP.VariableRef} + cut_oracle::LevelOneOracle + function ConvexApproximation(theta, states, deletion_minimum) + return new(theta, states, LevelOneOracle(deletion_minimum)) + end +end + +# Add the cut `V.θ ≥ θᵏ + ⟨πᵏ, x′ - xᵏ⟩`. +function _add_cut(V::ConvexApproximation, θᵏ, πᵏ, xᵏ, μᵀy=JuMP.AffExpr(0.0); cut_selection::Bool=true) + model = JuMP.owner_model(V.theta) + for (key, x) in xᵏ + θᵏ -= πᵏ[key] * xᵏ[key] + end + is_minimization = JuMP.objective_sense(model) == MOI.MIN_SENSE + cut = Cut(θᵏ, πᵏ, 1, nothing) + if μᵀy == JuMP.AffExpr(0.0) && cut_selection + _level_one_update(V.cut_oracle, cut, xᵏ, is_minimization) + _purge_cuts(V) + end + cut.constraint_ref = if is_minimization + @constraint(model, V.theta + μᵀy >= θᵏ + sum(πᵏ[i] * x for (i, x) in V.states)) + else + @constraint(model, V.theta + μᵀy <= θᵏ + sum(πᵏ[i] * x for (i, x) in V.states)) + end + return +end + +function _purge_cuts(V::ConvexApproximation) + model = JuMP.owner_model(V.theta) + if length(V.cut_oracle.cuts_to_be_deleted) >= V.cut_oracle.deletion_minimum + for cut in V.cut_oracle.cuts_to_be_deleted + JuMP.delete(model, cut.constraint_ref) + end + empty!(V.cut_oracle.cuts_to_be_deleted) + end + return end + +# Internal function: calculate the height of `cut` evaluated at `state`. +function _eval_height(cut::Cut, state::Dict{Symbol, Float64}) + height = cut.intercept + for (key, value) in cut.coefficients + height += value * state[key] + end + return height +end + +# Internal function: check if the candidate point dominates the incumbent. +function _dominates(candidate, incumbent, minimization::Bool) + return minimization ? candidate > incumbent : candidate < incumbent +end + +# Internal function: update the Level-One datastructures inside +# `bellman_function`. +function _level_one_update(oracle::LevelOneOracle, cut::Cut, + state::Dict{Symbol, Float64}, is_minimization::Bool) + sampled_state = SampledState(state, cut, _eval_height(cut, state)) + # Loop through previously sampled states and compare the height of the most + # recent cut against the current best. If this cut is an improvement, store + # this one instead. + for old_state in oracle.states + height = _eval_height(cut, old_state.state) + if _dominates(height, old_state.best_objective, is_minimization) + old_state.dominating_cut.non_dominated_count -= 1 + if old_state.dominating_cut.non_dominated_count <= 0 + push!(oracle.cuts_to_be_deleted, old_state.dominating_cut) + end + cut.non_dominated_count += 1 + old_state.dominating_cut = cut + old_state.best_objective = height + end + end + # Now loop through previously discovered cuts and compare their height at + # the most recent sampled point in the state-space. If this cut is an + # improvement, store this one instead. Note that we have to do this because + # we might have previously thrown out a cut that is now relevant. + for old_cut in oracle.cuts + # If the constriant ref is not nothing, this cut is already in the + # model, so it can't be better than the one we just found. + if old_cut.constraint_ref !== nothing + continue + elseif !JuMP.is_valid(old_cut.constraint_ref) + old_cut.constraint_ref = nothing + continue + end + height = _eval_height(old_cut, state) + if dominates(height, sampled_state.best_objective, is_minimization) + sampled_state.dominating_cut.non_dominated_count -= 1 + if sampled_state.dominating_cut.non_dominated_count <= 0 + push!(oracle.cuts_to_be_deleted, sampled_state.dominating_cut) + end + old_cut.non_dominated_count += 1 + sampled_state.dominating_cut = old_cut + sampled_state.best_objective = height + end + end + push!(oracle.cuts, cut) + push!(oracle.states, sampled_state) + return +end + +@enum(CutType, AVERAGE_CUT, MULTI_CUT) + # Internal struct: this struct is just a cache for arguments until we can build # an actual instance of the type T at a later point. -struct BellmanFactory{T} +struct InstanceFactory{T} args kwargs - BellmanFactory{T}(args...; kwargs...) where {T} = new{T}(args, kwargs) + InstanceFactory{T}(args...; kwargs...) where {T} = new{T}(args, kwargs) end -""" - AverageCut(; lower_bound = -Inf, upper_bound = Inf, deletion_minimum = 1) - -The AverageCut Bellman function. Provide a `lower_bound` if minimizing, or an -`upper_bound` if maximizing. +mutable struct BellmanFunction <: AbstractBellmanFunction + global_theta::ConvexApproximation + local_thetas::Vector{ConvexApproximation} + cut_type::CutType +end -This Bellman function also implements Level-One cut selection. This requires a -solver that supports constraint deletion. If the solver doesn't support -constraint deletion (e.g., Ipopt), JuMP must be used in Automatic mode instead -of direct mode. In automatic mode, constraint deletion incurs a high cost -because a new model is copied to the solver with every change. Thus, it is -possible to cache constraint deletions until `deletion_minimum` are ready to -improve performance. Cut selection can be "turned off" by setting -`deletion_minimum` to a very large positive integer. """ -function AverageCut(; lower_bound = -Inf, upper_bound = Inf, - deletion_minimum::Int = 1) - return BellmanFactory{AverageCut}(lower_bound = lower_bound, - upper_bound = upper_bound, deletion_minimum = deletion_minimum) + BellmanFunction(; + lower_bound = -Inf, upper_bound = Inf, deletion_minimum::Int = 1, + cut_type::CutType = AVERAGE_CUT) +""" +function BellmanFunction(; + lower_bound = -Inf, upper_bound = Inf, deletion_minimum::Int = 1, + cut_type::CutType = AVERAGE_CUT) + return InstanceFactory{BellmanFunction}( + lower_bound = lower_bound, upper_bound = upper_bound, + deletion_minimum = deletion_minimum, cut_type = cut_type) +end + +function bellman_term(bellman_function::BellmanFunction) + return bellman_function.global_theta.theta end -function initialize_bellman_function(factory::BellmanFactory{AverageCut}, - graph::PolicyGraph{T}, - node::Node{T}) where {T} - lower_bound, upper_bound = -Inf, Inf - deletion_minimum = 0 +function initialize_bellman_function( + factory::InstanceFactory{BellmanFunction}, model::PolicyGraph{T}, + node::Node{T}) where {T} + lower_bound, upper_bound, deletion_minimum, cut_type = -Inf, Inf, 0, AVERAGE_CUT if length(factory.args) > 0 - error("Positional arguments $(factory.args) ignored in AverageCut.") + error("Positional arguments $(factory.args) ignored in BellmanFunction.") end for (kw, value) in factory.kwargs if kw == :lower_bound @@ -81,48 +177,44 @@ function initialize_bellman_function(factory::BellmanFactory{AverageCut}, upper_bound = value elseif kw == :deletion_minimum deletion_minimum = value + elseif kw == :cut_type + cut_type = value else - error("Keyword $(kw) not recognised as argument to AverageCut.") + error("Keyword $(kw) not recognised as argument to BellmanFunction.") end end + if lower_bound == -Inf && upper_bound == Inf + error("You must specify a finite bound on the cost-to-go term.") + end if length(node.children) == 0 lower_bound = upper_bound = 0.0 end - bellman_variable = if lower_bound == -Inf && upper_bound < Inf - @variable(node.subproblem, upper_bound = upper_bound) - elseif lower_bound > -Inf && upper_bound == Inf - @variable(node.subproblem, lower_bound = lower_bound) - elseif lower_bound > -Inf && upper_bound < Inf - @variable(node.subproblem, - lower_bound = lower_bound, upper_bound = upper_bound) - else - error("You must supply a non-infinite bound for the cost-to-go variable.") - end + Θᴳ = @variable(node.subproblem) + lower_bound > -Inf && JuMP.set_lower_bound(Θᴳ, lower_bound) + upper_bound < Inf && JuMP.set_upper_bound(Θᴳ, upper_bound) # Initialize bounds for the objective states. If objective_state==nothing, # this check will be skipped by dispatch. - add_initial_bounds(node.objective_state, node.subproblem, bellman_variable) - return AverageCut( - bellman_variable, Cut[], SampledState[], Cut[], deletion_minimum - ) -end - -# Internal function: helper used in add_objective_state_constraint. -function _dot(y::NTuple{N, Float64}, μ::NTuple{N, JuMP.VariableRef}) where {N} - return sum(y[i] * μ[i] for i in 1:N) + _add_initial_bounds(node.objective_state, Θᴳ) + x′ = Dict(key => var.out for (key, var) in node.states) + return BellmanFunction(ConvexApproximation(Θᴳ, x′, deletion_minimum), + ConvexApproximation[], cut_type) end -# Internal function: helper used in add_initial_bounds. -function add_objective_state_constraint(subproblem, y, μ, θ) - lower_bound = JuMP.has_lower_bound(θ) ? JuMP.lower_bound(θ) : -Inf - upper_bound = JuMP.has_upper_bound(θ) ? JuMP.upper_bound(θ) : Inf +# Internal function: helper used in _add_initial_bounds. +function _add_objective_state_constraint( + theta::JuMP.VariableRef, y::NTuple{N, Float64}, + μ::NTuple{N, JuMP.VariableRef}) where {N} + model = JuMP.owner_model(theta) + lower_bound = JuMP.has_lower_bound(theta) ? JuMP.lower_bound(theta) : -Inf + upper_bound = JuMP.has_upper_bound(theta) ? JuMP.upper_bound(theta) : Inf if lower_bound > -Inf - @constraint(subproblem, _dot(y, μ) + θ >= lower_bound) + @constraint(model, sum(y[i] * μ[i] for i in 1:N) + theta >= lower_bound) end if upper_bound < Inf - @constraint(subproblem, _dot(y, μ) + θ <= upper_bound) + @constraint(model, sum(y[i] * μ[i] for i in 1:N) + theta <= upper_bound) end if lower_bound ≈ upper_bound ≈ 0.0 - @constraint(subproblem, [i=1:length(μ)], μ[i] == 0.0) + @constraint(model, [i=1:N], μ[i] == 0.0) end return end @@ -133,170 +225,138 @@ end # rectangular, we want to add a constraint at each extreme point. This involves # adding 2^N constraints where N = |μ|. This is only feasible for # low-dimensional problems, e.g., N < 5. -add_initial_bounds(obj_state::Nothing, subproblem, θ) = nothing -function add_initial_bounds(obj_state::ObjectiveState, subproblem, θ) +_add_initial_bounds(obj_state::Nothing, theta) = nothing +function _add_initial_bounds(obj_state::ObjectiveState, theta) + model = JuMP.owner_model(theta) if length(obj_state.μ) < 5 for y in Base.product(zip(obj_state.lower_bound, obj_state.upper_bound)...) - add_objective_state_constraint(subproblem, y, obj_state.μ, θ) + _add_objective_state_constraint(theta, y, obj_state.μ) end else - add_objective_state_constraint( - subproblem, obj_state.lower_bound, obj_state.μ, θ) - add_objective_state_constraint( - subproblem, obj_state.upper_bound, obj_state.μ, θ) + _add_objective_state_constraint(theta, obj_state.lower_bound, obj_state.μ) + _add_objective_state_constraint(theta, obj_state.upper_bound, obj_state.μ) end end -bellman_term(bellman::AverageCut) = bellman.variable - -function refine_bellman_function(graph::PolicyGraph{T}, - node::Node{T}, - bellman_function::AverageCut, - risk_measure::AbstractRiskMeasure, - outgoing_state::Dict{Symbol, Float64}, - dual_variables::Vector{Dict{Symbol, Float64}}, - noise_supports::Vector, - original_probability::Vector{Float64}, - objective_realizations::Vector{Float64} - ) where {T} - is_minimization = graph.objective_sense == MOI.MIN_SENSE - risk_adjusted_probability = similar(original_probability) - adjust_probability(risk_measure, - risk_adjusted_probability, - original_probability, - noise_supports, - objective_realizations, - is_minimization) - # Initialize average cut coefficients. - intercept = 0.0 - coefficients = Dict{Symbol, Float64}() - for state in keys(outgoing_state) - coefficients[state] = 0.0 - end - # Gather up coefficients for cut calculation. - # β = F[λ] - # α = F[θ] - βᵀ ̄x' - # θ ≥ α + βᵀ x' - for (objective, dual, prob) in zip(objective_realizations, dual_variables, - risk_adjusted_probability) - intercept += prob * objective - for state in keys(outgoing_state) - coefficients[state] += prob * dual[state] - end +function refine_bellman_function( + model::PolicyGraph{T}, + node::Node{T}, + bellman_function::BellmanFunction, + risk_measure::AbstractRiskMeasure, + outgoing_state::Dict{Symbol, Float64}, + dual_variables::Vector{Dict{Symbol, Float64}}, + noise_supports::Vector, + nominal_probability::Vector{Float64}, + objective_realizations::Vector{Float64}) where {T} + # Sanity checks. + @assert length(dual_variables) == length(noise_supports) == + length(nominal_probability) == length(objective_realizations) + # Preliminaries that are common to all cut types. + risk_adjusted_probability = similar(nominal_probability) + adjust_probability( + risk_measure, risk_adjusted_probability, nominal_probability, + noise_supports, objective_realizations, + model.objective_sense == MOI.MIN_SENSE) + # The meat of the function. + if bellman_function.cut_type == AVERAGE_CUT + _add_average_cut( + node, + outgoing_state, + risk_adjusted_probability, + objective_realizations, + dual_variables) + else # Add a multi-cut + @assert bellman_function.cut_type == MULTI_CUT + _add_locals_if_necessary(bellman_function, length(dual_variables)) + _add_multi_cut( + node, + outgoing_state, + risk_adjusted_probability, + objective_realizations, + dual_variables) end - # Height of the cut at outgoing_state. We cache the value here for the - # tolerance check that happens later. - current_height = intercept - # Calculate the intercept of the cut. - for (name, value) in outgoing_state - intercept -= coefficients[name] * value - end - # Coefficients in the objective state dimension. - objective_state_component = get_objective_state_component(node) - # Initialize the cut struct. It gets initialized with a non_dominated_count - # of 1 because if there is cut selection, it dominates at the most recent - # point (`outgoing_state`). - cut = Cut(intercept, coefficients, 1, nothing) - # No cut selection if there is objective-state interpolation :(. - if objective_state_component == JuMP.AffExpr(0.0) - levelone_update(bellman_function, cut, outgoing_state, is_minimization) - purge_cuts(node, bellman_function) - end - add_new_cut(node, bellman_function.variable, cut, objective_state_component, - is_minimization) - return end -# Internal function: delete dominated cuts. -function purge_cuts(node::Node, bellman::AverageCut) - if length(bellman.cuts_to_be_deleted) >= bellman.deletion_minimum - for existing_cut in bellman.cuts_to_be_deleted - JuMP.delete(node.subproblem, existing_cut.constraint_ref) +function _add_average_cut(node::Node, outgoing_state::Dict{Symbol, Float64}, + risk_adjusted_probability::Vector{Float64}, + objective_realizations::Vector{Float64}, + dual_variables::Vector{Dict{Symbol, Float64}}) + N = length(risk_adjusted_probability) + @assert N == length(objective_realizations) == length(dual_variables) + # Calculate the expected intercept and dual variables with respect to the + # risk-adjusted probability distributino. + πᵏ = Dict(key => 0.0 for key in keys(outgoing_state)) + θᵏ = 0.0 + for i in 1:length(objective_realizations) + p = risk_adjusted_probability[i] + θᵏ += p * objective_realizations[i] + for (key, dual) in dual_variables[i] + πᵏ[key] += p * dual end - empty!(bellman.cuts_to_be_deleted) end + # Now add the average-cut to the subproblem. We include the objective-state + # component μᵀy. + _add_cut(node.bellman_function.global_theta, θᵏ, πᵏ, outgoing_state, + get_objective_state_component(node)) return end -# Internal function: add a new cut to the model. -function add_new_cut(node::Node, theta::JuMP.VariableRef, cut::Cut, - objective_state, is_minimization::Bool) - if is_minimization - cut.constraint_ref = @constraint(node.subproblem, - theta + objective_state >= cut.intercept + - sum(cut.coefficients[name] * state.out - for (name, state) in node.states)) +function _add_multi_cut(node::Node, outgoing_state::Dict{Symbol, Float64}, + risk_adjusted_probability::Vector{Float64}, + objective_realizations::Vector{Float64}, + dual_variables::Vector{Dict{Symbol, Float64}}) + N = length(risk_adjusted_probability) + @assert N == length(objective_realizations) == length(dual_variables) + bellman_function = node.bellman_function + μᵀy = get_objective_state_component(node) + for i in 1:length(dual_variables) + _add_cut(bellman_function.local_thetas[i], objective_realizations[i], + dual_variables[i], outgoing_state, μᵀy) + end + model = JuMP.owner_model(bellman_function.global_theta.theta) + cut_expr = @expression(model, sum( + risk_adjusted_probability[i] * bellman_function.local_thetas[i].theta + for i in 1:N) - (1 - sum(risk_adjusted_probability)) * μᵀy + ) + # TODO(odow): hash the risk_adjusted_probability (or cut expression if + # sum(ξ)<1 and μᵀy != 0) and only add if it's a new constraint. + if JuMP.objective_sense(model) == MOI.MIN_SENSE + @constraint(model, bellman_function.global_theta.theta >= cut_expr) else - cut.constraint_ref = @constraint(node.subproblem, - theta + objective_state <= cut.intercept + - sum(cut.coefficients[name] * state.out - for (name, state) in node.states)) + @constraint(model, bellman_function.global_theta.theta <= cut_expr) end return end -# Internal function: calculate the height of `cut` evaluated at `state`. -function eval_height(cut::Cut, state::Dict{Symbol, Float64}) - height = cut.intercept - for (key, value) in cut.coefficients - height += value * state[key] - end - return height -end - -# Internal function: check if the candidate point dominates the incumbent. -function dominates(candidate, incumbent, minimization::Bool) - return minimization ? candidate > incumbent : candidate < incumbent -end - -# Internal function: update the Level-One datastructures inside -# `bellman_function`. -function levelone_update(bellman_function::AverageCut, cut::Cut, - state::Dict{Symbol, Float64}, is_minimization) - sampled_state = SampledState(state, cut, eval_height(cut, state)) - # Loop through previously sampled states and compare the height of the most - # recent cut against the current best. If this cut is an improvement, store - # this one instead. - for old_state in bellman_function.states - height = eval_height(cut, old_state.state) - if dominates(height, old_state.best_objective, is_minimization) - old_state.dominating_cut.non_dominated_count -= 1 - if old_state.dominating_cut.non_dominated_count <= 0 - push!(bellman_function.cuts_to_be_deleted, - old_state.dominating_cut) - end - cut.non_dominated_count += 1 - old_state.dominating_cut = cut - old_state.best_objective = height +# If we are adding a multi-cut for the first time, then the local θ variables +# won't have been added. +# TODO(odow): a way to set different bounds for each variable in the multi-cut. +function _add_locals_if_necessary(bellman_function::BellmanFunction, N::Int) + num_local_thetas = length(bellman_function.local_thetas) + if num_local_thetas == N + # Do nothing. Already initialized. + elseif num_local_thetas == 0 + global_theta = bellman_function.global_theta + model = JuMP.owner_model(global_theta.theta) + local_thetas = @variable(model, [1:N]) + if JuMP.has_lower_bound(global_theta.theta) + JuMP.set_lower_bound.(local_thetas, JuMP.lower_bound(global_theta.theta)) end - end - # Now loop through previously discovered cuts and compare their height at - # the most recent sampled point in the state-space. If this cut is an - # improvement, store this one instead. Note that we have to do this because - # we might have previously thrown out a cut that is now relevant. - for old_cut in bellman_function.cuts - # If the constriant ref is not nothing, this cut is already in the - # model, so it can't be better than the one we just found. - if old_cut.constraint_ref !== nothing - continue - elseif !JuMP.is_valid(old_cut.constraint_ref) - old_cut.constraint_ref = nothing - continue + if JuMP.has_upper_bound(global_theta.theta) + JuMP.set_upper_bound.(local_thetas, JuMP.upper_bound(global_theta.theta)) end - height = eval_height(old_cut, state) - if dominates(height, sampled_state.best_objective, is_minimization) - sampled_state.dominating_cut.non_dominated_count -= 1 - if sampled_state.dominating_cut.non_dominated_count <= 0 - push!(bellman_function.cuts_to_be_deleted, - sampled_state.dominating_cut) - end - old_cut.non_dominated_count += 1 - sampled_state.dominating_cut = old_cut - sampled_state.best_objective = height + for local_theta in local_thetas + push!( + bellman_function.local_thetas, + ConvexApproximation( + local_theta, global_theta.states, + global_theta.cut_oracle.deletion_minimum + ) + ) end + else + error("Expected $(N) local θ variables but there were $(num_local_thetas).") end - push!(bellman_function.cuts, cut) - push!(bellman_function.states, sampled_state) return end @@ -311,11 +371,14 @@ function write_cuts_to_file(model::PolicyGraph{T}, filename::String) where {T} cuts = Dict{T, Vector{Dict{Symbol, Float64}}}() for (node_name, node) in model.nodes if node.objective_state !== nothing - error("Unable to write cuts to file because it contains objective" * - " states.") + error("Unable to write cuts to file because model contains " * + "objective states.") + elseif length(node.bellman_function.local_thetas) > 0 + error("Unable to write cuts to file because model contains " * + "multi-cuts.") end cuts[node_name] = Dict{String, Float64}[] - for cut in node.bellman_function.cuts + for cut in node.bellman_function.global_theta.cut_oracle.cuts cut_dict = copy(cut.coefficients) cut_dict[:cut_intercept] = cut.intercept push!(cuts[node_name], cut_dict) @@ -376,12 +439,17 @@ function read_cuts_from_file( coefficients[Symbol(name)] = coef end end - add_new_cut( - node, - node.bellman_function.variable, - Cut(intercept, coefficients, 1, nothing), - JuMP.AffExpr(0.0), - model.objective_sense == MOI.MIN_SENSE) + # So they cuts are written to file after they have been normalized + # to `θᴳ ≥ [θᵏ - ⟨πᵏ, xᵏ⟩] + ⟨πᵏ, x′⟩`. Thus, we pass `xᵏ=0` so that + # eveything works out okay. + # Importantly, don't run cut selection when adding these cuts. + _add_cut( + node.bellman_function.global_theta, + intercept, + coefficients, + Dict(key=>0.0 for key in keys(coefficients)), + cut_selection = false + ) end end return diff --git a/src/plugins/headers.jl b/src/plugins/headers.jl index f4b687a18..4f64f3e98 100644 --- a/src/plugins/headers.jl +++ b/src/plugins/headers.jl @@ -115,7 +115,7 @@ end Return a JuMP expression representing the Bellman function. """ function bellman_term(bellman::AbstractBellmanFunction) - error("SDDP.bellman term not implemented for $(bellman).") + error("SDDP.bellman_term not implemented for $(bellman).") end # =============================== stopping_rules ============================= # diff --git a/src/user_interface.jl b/src/user_interface.jl index e59e0b2fa..a1de05452 100644 --- a/src/user_interface.jl +++ b/src/user_interface.jl @@ -290,7 +290,7 @@ end """ PolicyGraph(builder::Function, graph::Graph{T}; - bellman_function = AverageCut, + bellman_function = BellmanFunction, optimizer = nothing, direct_mode = true) where T @@ -303,14 +303,14 @@ for details.) # ... subproblem definition ... end model = PolicyGraph(builder, graph; - bellman_function = AverageCut, + bellman_function = BellmanFunction, optimizer = with_optimizer(GLPK.Optimizer), direct_mode = false) Or, using the Julia `do ... end` syntax: model = PolicyGraph(graph; - bellman_function = AverageCut, + bellman_function = BellmanFunction, optimizer = with_optimizer(GLPK.Optimizer), direct_mode = true) do subproblem, index # ... subproblem definitions ... @@ -331,7 +331,7 @@ function PolicyGraph(builder::Function, graph::Graph{T}; error("You must specify a bound on the objective value, through " * "`lower_bound` if minimizing, or `upper_bound` if maximizing.") else - bellman_function = AverageCut( + bellman_function = BellmanFunction( lower_bound = lower_bound, upper_bound = upper_bound) end end diff --git a/test/algorithm.jl b/test/algorithm.jl index 316b30072..d5092d156 100644 --- a/test/algorithm.jl +++ b/test/algorithm.jl @@ -8,7 +8,7 @@ using SDDP, Test, GLPK @testset "Forward Pass" begin model = SDDP.PolicyGraph(SDDP.LinearGraph(2); sense = :Max, - bellman_function = SDDP.AverageCut(upper_bound = 100.0), + bellman_function = SDDP.BellmanFunction(upper_bound = 100.0), optimizer = with_optimizer(GLPK.Optimizer) ) do node, stage @variable(node, x, SDDP.State, initial_value = 0.0) @@ -39,7 +39,7 @@ end @testset "to nodal forms" begin model = SDDP.PolicyGraph(SDDP.LinearGraph(2), - bellman_function = SDDP.AverageCut(lower_bound = 0.0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0.0), optimizer = with_optimizer(GLPK.Optimizer) ) do node, stage @variable(node, x >= 0, SDDP.State, initial_value = 0.0) @@ -61,7 +61,7 @@ end @testset "solve" begin model = SDDP.PolicyGraph(SDDP.LinearGraph(2), - bellman_function = SDDP.AverageCut(lower_bound = 0.0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0.0), optimizer = with_optimizer(GLPK.Optimizer) ) do node, stage @variable(node, x >= 0, SDDP.State, initial_value = 0.0) diff --git a/test/plugins/stopping_rules.jl b/test/plugins/stopping_rules.jl index 9f0828af7..3e861653b 100644 --- a/test/plugins/stopping_rules.jl +++ b/test/plugins/stopping_rules.jl @@ -32,7 +32,7 @@ end @testset "Statistical" begin model = SDDP.PolicyGraph(SDDP.LinearGraph(2), - bellman_function = SDDP.AverageCut(lower_bound = 0.0), + bellman_function = SDDP.BellmanFunction(lower_bound = 0.0), optimizer = with_optimizer(GLPK.Optimizer), sense = :Min ) do node, stage diff --git a/test/runtests.jl b/test/runtests.jl index 35eabcdf3..89b39b2ed 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -9,15 +9,6 @@ function read_dir(dir, exclude = String[]) return filter(s->!(s in exclude) && endswith(s, ".jl"), readdir(dir)) end -@testset "Unit Tests" begin - @testset "plugins/$(file)" for file in read_dir("plugins") - include(joinpath("plugins", file)) - end - @testset "$(file)" for file in read_dir(".", ["runtests.jl"]) - include(file) - end -end - const EXCLUDED_EXAMPLES = [ "inventory_management.jl", "daniel_hydro_complete.jl" @@ -25,9 +16,20 @@ const EXCLUDED_EXAMPLES = [ const EXAMPLES_DIR = joinpath(dirname(dirname(@__FILE__)), "examples") -@testset "Examples" begin - @testset "$example" for example in read_dir(EXAMPLES_DIR, EXCLUDED_EXAMPLES) - Random.seed!(12345) - include(joinpath(EXAMPLES_DIR, example)) +@testset "SDDP.jl" begin + @testset "Unit Tests" begin + @testset "plugins/$(file)" for file in read_dir("plugins") + include(joinpath("plugins", file)) + end + @testset "$(file)" for file in read_dir(".", ["runtests.jl"]) + include(file) + end + end + + @testset "Examples" begin + @testset "$example" for example in read_dir(EXAMPLES_DIR, EXCLUDED_EXAMPLES) + Random.seed!(12345) + include(joinpath(EXAMPLES_DIR, example)) + end end end