Skip to content

Commit

Permalink
Add tests for ValueFunction
Browse files Browse the repository at this point in the history
  • Loading branch information
odow committed Dec 17, 2019
1 parent 865d43a commit 8ec45d8
Show file tree
Hide file tree
Showing 5 changed files with 161 additions and 24 deletions.
13 changes: 8 additions & 5 deletions src/plugins/bellman_functions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -270,17 +270,20 @@ function _add_objective_state_constraint(
y::NTuple{N,Float64},
μ::NTuple{N,JuMP.VariableRef},
) where {N}
is_finite = [-Inf < y[i] < Inf for i = 1:N]
model = JuMP.owner_model(theta)
lower_bound = JuMP.has_lower_bound(theta) ? JuMP.lower_bound(theta) : -Inf
upper_bound = JuMP.has_upper_bound(theta) ? JuMP.upper_bound(theta) : Inf
if lower_bound upper_bound 0.0
@constraint(model, [i = 1:N], μ[i] == 0.0)
return
end
expr = @expression(model, sum(y[i] * μ[i] for i = 1:N if is_finite[i]) + theta)
if lower_bound > -Inf
@constraint(model, sum(y[i] * μ[i] for i = 1:N) + theta >= lower_bound)
@constraint(model, expr >= lower_bound)
end
if upper_bound < Inf
@constraint(model, sum(y[i] * μ[i] for i = 1:N) + theta <= upper_bound)
end
if lower_bound upper_bound 0.0
@constraint(model, [i = 1:N], μ[i] == 0.0)
@constraint(model, expr <= upper_bound)
end
return
end
Expand Down
50 changes: 32 additions & 18 deletions src/visualization/value_functions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ struct ValueFunction{
belief_state::B
end

JuMP.set_optimizer(v::ValueFunction, optimizer) = set_optimizer(v.model, optimizer)

function _add_to_value_function(
model::JuMP.Model,
states::Dict{Symbol,JuMP.VariableRef},
Expand All @@ -59,24 +61,23 @@ function _add_to_value_function(
set_upper_bound(theta, upper_bound(convex_approximation.theta))
end
for cut in convex_approximation.cut_oracle.cuts
cut_expr = AffExpr(cut.intercept)
for (key, coef) in cut.coefficients
if !haskey(states, key)
states[key] = @variable(model, base_name = "$(key)")
end
add_to_expression!(cut_expr, coef, states[key])
end
cut_expr = @expression(
model,
cut.intercept + sum(coef * states[key] for (key, coef) in cut.coefficients)
)
if objective_state !== nothing
@assert cut.obj_y !== nothing
for (y, μ) in zip(cut.obj_y, objective_state)
add_to_expression!(cut_expr, -y, μ)
end
cut_expr = @expression(
model,
cut_expr - sum(y * μ for (y, μ) in zip(cut.obj_y, objective_state))
)
end
if belief_state !== nothing
@assert cut.belief_y !== nothing
for (key, μ) in belief_state
add_to_expression!(cut_expr, -cut.belief_y[key], μ)
end
cut_expr = @expression(
model,
cut_expr - sum(cut.belief_y[key] * μ for (key, μ) in belief_state)
)
end
if objective_sense(model) == MOI.MIN_SENSE
@constraint(model, theta >= cut_expr)
Expand All @@ -87,16 +88,28 @@ function _add_to_value_function(
return theta
end

function ValueFunction(node::Node{T}, optimizer) where {T}
function ValueFunction(node::Node{T}) where {T}
b = node.bellman_function
sense = objective_sense(node.subproblem)
model = Model(optimizer)
model = Model()
if node.optimizer !== nothing
set_optimizer(model, node.optimizer)
end
set_objective_sense(model, sense)
states = Dict{Symbol,VariableRef}()
states = Dict{Symbol,VariableRef}(
key => @variable(model, base_name = "$(key)") for (key, x) in node.states
)
objective_state = if node.objective_state === nothing
nothing
else
VariableRef[@variable(model, lower_bound = lower_bound(μ), upper_bound = upper_bound(μ)) for μ in node.objective_state.μ]
tuple(
VariableRef[@variable(
model,
lower_bound = lower_bound(μ),
upper_bound = upper_bound(μ),
base_name = "_objective_state_$(i)"
) for (i, μ) in enumerate(node.objective_state.μ)]...,
)
end
belief_state = if node.belief_state === nothing
nothing
Expand Down Expand Up @@ -177,8 +190,9 @@ function evaluate(
optimize!(V.model)
obj = objective_value(V.model)
duals = Dict{Symbol,Float64}()
sign = objective_sense(V.model) == MOI.MIN_SENSE ? 1.0 : -1.0
for (key, var) in V.states
duals[key] = dual(FixRef(var))
duals[key] = sign * dual(FixRef(var))
end
return obj, duals
end
4 changes: 3 additions & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ const EXAMPLES_DIR = joinpath(dirname(dirname(@__FILE__)), "examples")
@testset "plugins/$(file)" for file in read_dir("plugins", ["parallel_schemes.jl"])
include(joinpath("plugins", file))
end
@testset "visualization/$(file)" for file in read_dir("visualization")
include(joinpath("visualization", file))
end
@testset "$(file)" for file in read_dir(".", ["runtests.jl"])
include(file)
end
Expand All @@ -48,4 +51,3 @@ JuliaFormatter.format(joinpath(dirname(@__DIR__), "src"))
JuliaFormatter.format(@__DIR__)
# Format /examples
JuliaFormatter.format(joinpath(dirname(@__DIR__), "examples"))

118 changes: 118 additions & 0 deletions test/visualization/value_functions.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
# Copyright 2017-19, Oscar Dowson.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

using SDDP
using Test
using GLPK

@testset "Min" begin
model = SDDP.LinearPolicyGraph(
stages = 2,
lower_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
@stageobjective(sp, 2 * x.out)
end
V1 = SDDP.ValueFunction(model[1])
@test SDDP.evaluate(V1, Dict(:x => 1.0)) == (0.0, Dict(:x => 0.0))
SDDP.train(model, iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
for (xhat, yhat, pihat) in [(0.0, 0.0, 0.0), (1.0, 2.0, 2.0), (2.0, 4.0, 2.0)]
@test SDDP.evaluate(V1, Dict(:x => xhat)) == (yhat, Dict(:x => pihat))
end
end

@testset "Max" begin
model = SDDP.LinearPolicyGraph(
stages = 2,
sense = :Max,
upper_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
@stageobjective(sp, -2 * x.out)
end
SDDP.train(model, iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
for (xhat, yhat, pihat) in [(0.0, 0.0, 0.0), (1.0, 2.0, 2.0), (2.0, 4.0, 2.0)]
(y, duals) = SDDP.evaluate(V1, Dict(:x => xhat))
@test y == -yhat
@test duals == Dict(:x => -pihat)
end
end

@testset "optimizer" begin
model = SDDP.LinearPolicyGraph(
stages = 2,
lower_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
direct_mode = true,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
@stageobjective(sp, 2 * x.out)
end
SDDP.train(model, iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
@test_throws JuMP.NoOptimizer() SDDP.evaluate(V1, Dict(:x => 1.0))
JuMP.set_optimizer(V1, with_optimizer(GLPK.Optimizer))
(y, _) = SDDP.evaluate(V1, Dict(:x => 1.0))
@test y == 2.0
end

@testset "objective state" begin
model = SDDP.LinearPolicyGraph(
stages = 2,
lower_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
SDDP.add_objective_state(sp; initial_value = 0.0, lipschitz = 10.0) do p, ω
return p + ω
end
@constraint(sp, x.out == x.in)
SDDP.parameterize(sp, [1, 2]) do ω
price = SDDP.objective_state(sp)
@stageobjective(sp, price * x.out)
end
end
SDDP.train(model, iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
@test_throws AssertionError SDDP.evaluate(V1, Dict(:x => 1.0))
@test SDDP.evaluate(V1, Dict(:x => 1.0); objective_state = 1) == (2.5, Dict(:x => 2.5))
@test SDDP.evaluate(V1, Dict(:x => 0.0); objective_state = 2) == (0.0, Dict(:x => 3.5))
end

@testset "belief state" begin
graph = SDDP.MarkovianGraph(Matrix{Float64}[[0.5 0.5], [1.0 0.0; 0.0 1.0]])
SDDP.add_ambiguity_set(graph, [(1, 1), (1, 2)])
SDDP.add_ambiguity_set(graph, [(2, 1), (2, 2)])
model = SDDP.PolicyGraph(
graph,
lower_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
) do sp, node
(t, i) = node
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
P = [[0.2, 0.8], [0.8, 0.2]]
SDDP.parameterize(sp, [1, 2], P[i]) do ω
@stageobjective(sp, ω * x.out)
end
end
SDDP.train(model, iteration_limit = 10, print_level = 0)
V11 = SDDP.ValueFunction(model[(1, 1)])
@test_throws AssertionError SDDP.evaluate(V11, Dict(:x => 1.0))
b = Dict((1, 1) => 0.8, (1, 2) => 0.2)
(y, duals) = SDDP.evaluate(V11, Dict(:x => 1.0); belief_state = b)
@test duals[:x] y 1.68

V12 = SDDP.ValueFunction(model[(1, 2)])
(y, duals) = SDDP.evaluate(V12, Dict(:x => 1.0); belief_state = b)
@test duals[:x] y 1.68
end
File renamed without changes.

0 comments on commit 8ec45d8

Please sign in to comment.