Skip to content

Commit

Permalink
Add tests for ValueFunction
Browse files Browse the repository at this point in the history
  • Loading branch information
odow committed Dec 17, 2019
1 parent 8c6fb06 commit dcd8009
Show file tree
Hide file tree
Showing 8 changed files with 174 additions and 38 deletions.
13 changes: 8 additions & 5 deletions src/modeling_aids.jl
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.

function find_min(x::Vector{T}, y::T) where {T <: Real}
function find_min(x::Vector{T}, y::T) where {T<:Real}
best_i = 0
best_z = Inf
for i = 1:length(x)
Expand All @@ -40,7 +40,7 @@ end
function lattice_approximation(f::Function, states::Vector{Int}, scenarios::Int)
path = f()::Vector{Float64}
support = [fill(path[t], states[t]) for t = 1:length(states)]
probability = [zeros(states[t - 1], states[t]) for t = 2:length(states)]
probability = [zeros(states[t-1], states[t]) for t = 2:length(states)]
prepend!(probability, Ref(zeros(1, states[1])))
distance = 0.0
for n = 1:scenarios
Expand All @@ -55,7 +55,8 @@ function lattice_approximation(f::Function, states::Vector{Int}, scenarios::Int)
min_dist, best_idx = find_min(support[t], path[t])
dist += min_dist^2
probability[t][last_index, best_idx] += 1.0
support[t][best_idx] -= min_dist * (support[t][best_idx] - path[t]) / (3000 + n)^0.75
support[t][best_idx] -=
min_dist * (support[t][best_idx] - path[t]) / (3000 + n)^0.75
last_index = best_idx
end
distance = (distance * (n - 1) + dist) / n
Expand Down Expand Up @@ -114,7 +115,9 @@ nodes between stages. Alternatively, `budget` can be a `Vector{Int}`, which deta
number of Markov state to have in each stage.
"""
function MarkovianGraph(
simulator::Function; budget::Union{Int, Vector{Int}}, scenarios::Int = 1000
simulator::Function;
budget::Union{Int,Vector{Int}},
scenarios::Int = 1000,
)
scenarios = max(scenarios, 10)
states = allocate_support_budget(simulator, budget, scenarios)
Expand All @@ -127,7 +130,7 @@ function MarkovianGraph(
for t = 2:length(support)
for (j, sj) in enumerate(support[t])
add_node(g, (t, sj))
for (i, si) in enumerate(support[t - 1])
for (i, si) in enumerate(support[t-1])
add_edge(g, (t - 1, si) => (t, sj), probability[t][i, j])
end
end
Expand Down
13 changes: 8 additions & 5 deletions src/plugins/bellman_functions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -270,17 +270,20 @@ function _add_objective_state_constraint(
y::NTuple{N,Float64},
μ::NTuple{N,JuMP.VariableRef},
) where {N}
is_finite = [-Inf < y[i] < Inf for i = 1:N]
model = JuMP.owner_model(theta)
lower_bound = JuMP.has_lower_bound(theta) ? JuMP.lower_bound(theta) : -Inf
upper_bound = JuMP.has_upper_bound(theta) ? JuMP.upper_bound(theta) : Inf
if lower_bound upper_bound 0.0
@constraint(model, [i = 1:N], μ[i] == 0.0)
return
end
expr = @expression(model, sum(y[i] * μ[i] for i = 1:N if is_finite[i]) + theta)
if lower_bound > -Inf
@constraint(model, sum(y[i] * μ[i] for i = 1:N) + theta >= lower_bound)
@constraint(model, expr >= lower_bound)
end
if upper_bound < Inf
@constraint(model, sum(y[i] * μ[i] for i = 1:N) + theta <= upper_bound)
end
if lower_bound upper_bound 0.0
@constraint(model, [i = 1:N], μ[i] == 0.0)
@constraint(model, expr <= upper_bound)
end
return
end
Expand Down
10 changes: 4 additions & 6 deletions src/user_interface.jl
Original file line number Diff line number Diff line change
Expand Up @@ -305,12 +305,10 @@ function MarkovianGraph(;
)
@assert size(transition_matrix, 1) == size(transition_matrix, 2)
@assert length(root_node_transition) == size(transition_matrix, 1)
return MarkovianGraph(
vcat(
[Base.reshape(root_node_transition, 1, length(root_node_transition))],
[transition_matrix for stage = 1:(stages-1)],
)
)
return MarkovianGraph(vcat(
[Base.reshape(root_node_transition, 1, length(root_node_transition))],
[transition_matrix for stage = 1:(stages-1)],
))
end

"""
Expand Down
50 changes: 32 additions & 18 deletions src/visualization/value_functions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ struct ValueFunction{
belief_state::B
end

JuMP.set_optimizer(v::ValueFunction, optimizer) = set_optimizer(v.model, optimizer)

function _add_to_value_function(
model::JuMP.Model,
states::Dict{Symbol,JuMP.VariableRef},
Expand All @@ -59,24 +61,23 @@ function _add_to_value_function(
set_upper_bound(theta, upper_bound(convex_approximation.theta))
end
for cut in convex_approximation.cut_oracle.cuts
cut_expr = AffExpr(cut.intercept)
for (key, coef) in cut.coefficients
if !haskey(states, key)
states[key] = @variable(model, base_name = "$(key)")
end
add_to_expression!(cut_expr, coef, states[key])
end
cut_expr = @expression(
model,
cut.intercept + sum(coef * states[key] for (key, coef) in cut.coefficients)
)
if objective_state !== nothing
@assert cut.obj_y !== nothing
for (y, μ) in zip(cut.obj_y, objective_state)
add_to_expression!(cut_expr, -y, μ)
end
cut_expr = @expression(
model,
cut_expr - sum(y * μ for (y, μ) in zip(cut.obj_y, objective_state))
)
end
if belief_state !== nothing
@assert cut.belief_y !== nothing
for (key, μ) in belief_state
add_to_expression!(cut_expr, -cut.belief_y[key], μ)
end
cut_expr = @expression(
model,
cut_expr - sum(cut.belief_y[key] * μ for (key, μ) in belief_state)
)
end
if objective_sense(model) == MOI.MIN_SENSE
@constraint(model, theta >= cut_expr)
Expand All @@ -87,16 +88,28 @@ function _add_to_value_function(
return theta
end

function ValueFunction(node::Node{T}, optimizer) where {T}
function ValueFunction(node::Node{T}) where {T}
b = node.bellman_function
sense = objective_sense(node.subproblem)
model = Model(optimizer)
model = Model()
if node.optimizer !== nothing
set_optimizer(model, node.optimizer)
end
set_objective_sense(model, sense)
states = Dict{Symbol,VariableRef}()
states = Dict{Symbol,VariableRef}(
key => @variable(model, base_name = "$(key)") for (key, x) in node.states
)
objective_state = if node.objective_state === nothing
nothing
else
VariableRef[@variable(model, lower_bound = lower_bound(μ), upper_bound = upper_bound(μ)) for μ in node.objective_state.μ]
tuple(
VariableRef[@variable(
model,
lower_bound = lower_bound(μ),
upper_bound = upper_bound(μ),
base_name = "_objective_state_$(i)"
) for (i, μ) in enumerate(node.objective_state.μ)]...,
)
end
belief_state = if node.belief_state === nothing
nothing
Expand Down Expand Up @@ -177,8 +190,9 @@ function evaluate(
optimize!(V.model)
obj = objective_value(V.model)
duals = Dict{Symbol,Float64}()
sign = objective_sense(V.model) == MOI.MIN_SENSE ? 1.0 : -1.0
for (key, var) in V.states
duals[key] = dual(FixRef(var))
duals[key] = sign * dual(FixRef(var))
end
return obj, duals
end
4 changes: 1 addition & 3 deletions test/modeling_aids.jl
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,7 @@ end
end

@testset "lattice_approximation" begin
support, probability = SDDP.lattice_approximation(
() -> rand(5), [1, 2, 3, 4, 5], 100
)
support, probability = SDDP.lattice_approximation(() -> rand(5), [1, 2, 3, 4, 5], 100)
for (t, s) in enumerate(support)
@test length(s) == t
@test all(x -> 0 <= x <= 1, s)
Expand Down
4 changes: 3 additions & 1 deletion test/runtests.jl
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ const EXAMPLES_DIR = joinpath(dirname(dirname(@__FILE__)), "examples")
@testset "plugins/$(file)" for file in read_dir("plugins", ["parallel_schemes.jl"])
include(joinpath("plugins", file))
end
@testset "visualization/$(file)" for file in read_dir("visualization")
include(joinpath("visualization", file))
end
@testset "$(file)" for file in read_dir(".", ["runtests.jl"])
include(file)
end
Expand All @@ -48,4 +51,3 @@ JuliaFormatter.format(joinpath(dirname(@__DIR__), "src"))
JuliaFormatter.format(@__DIR__)
# Format /examples
JuliaFormatter.format(joinpath(dirname(@__DIR__), "examples"))

118 changes: 118 additions & 0 deletions test/visualization/value_functions.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,118 @@
# Copyright 2017-19, Oscar Dowson.
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.

using SDDP
using Test
using GLPK

@testset "Min" begin
model = SDDP.LinearPolicyGraph(
stages = 2,
lower_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
@stageobjective(sp, 2 * x.out)
end
V1 = SDDP.ValueFunction(model[1])
@test SDDP.evaluate(V1, Dict(:x => 1.0)) == (0.0, Dict(:x => 0.0))
SDDP.train(model, iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
for (xhat, yhat, pihat) in [(0.0, 0.0, 0.0), (1.0, 2.0, 2.0), (2.0, 4.0, 2.0)]
@test SDDP.evaluate(V1, Dict(:x => xhat)) == (yhat, Dict(:x => pihat))
end
end

@testset "Max" begin
model = SDDP.LinearPolicyGraph(
stages = 2,
sense = :Max,
upper_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
@stageobjective(sp, -2 * x.out)
end
SDDP.train(model, iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
for (xhat, yhat, pihat) in [(0.0, 0.0, 0.0), (1.0, 2.0, 2.0), (2.0, 4.0, 2.0)]
(y, duals) = SDDP.evaluate(V1, Dict(:x => xhat))
@test y == -yhat
@test duals == Dict(:x => -pihat)
end
end

@testset "optimizer" begin
model = SDDP.LinearPolicyGraph(
stages = 2,
lower_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
direct_mode = true,
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
@stageobjective(sp, 2 * x.out)
end
SDDP.train(model, iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
@test_throws JuMP.NoOptimizer() SDDP.evaluate(V1, Dict(:x => 1.0))
JuMP.set_optimizer(V1, with_optimizer(GLPK.Optimizer))
(y, _) = SDDP.evaluate(V1, Dict(:x => 1.0))
@test y == 2.0
end

@testset "objective state" begin
model = SDDP.LinearPolicyGraph(
stages = 2,
lower_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
) do sp, t
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
SDDP.add_objective_state(sp; initial_value = 0.0, lipschitz = 10.0) do p, ω
return p + ω
end
@constraint(sp, x.out == x.in)
SDDP.parameterize(sp, [1, 2]) do ω
price = SDDP.objective_state(sp)
@stageobjective(sp, price * x.out)
end
end
SDDP.train(model, iteration_limit = 2, print_level = 0)
V1 = SDDP.ValueFunction(model[1])
@test_throws AssertionError SDDP.evaluate(V1, Dict(:x => 1.0))
@test SDDP.evaluate(V1, Dict(:x => 1.0); objective_state = 1) == (2.5, Dict(:x => 2.5))
@test SDDP.evaluate(V1, Dict(:x => 0.0); objective_state = 2) == (0.0, Dict(:x => 3.5))
end

@testset "belief state" begin
graph = SDDP.MarkovianGraph(Matrix{Float64}[[0.5 0.5], [1.0 0.0; 0.0 1.0]])
SDDP.add_ambiguity_set(graph, [(1, 1), (1, 2)])
SDDP.add_ambiguity_set(graph, [(2, 1), (2, 2)])
model = SDDP.PolicyGraph(
graph,
lower_bound = 0.0,
optimizer = with_optimizer(GLPK.Optimizer),
) do sp, node
(t, i) = node
@variable(sp, x >= 0, SDDP.State, initial_value = 1.5)
@constraint(sp, x.out == x.in)
P = [[0.2, 0.8], [0.8, 0.2]]
SDDP.parameterize(sp, [1, 2], P[i]) do ω
@stageobjective(sp, ω * x.out)
end
end
SDDP.train(model, iteration_limit = 10, print_level = 0)
V11 = SDDP.ValueFunction(model[(1, 1)])
@test_throws AssertionError SDDP.evaluate(V11, Dict(:x => 1.0))
b = Dict((1, 1) => 0.8, (1, 2) => 0.2)
(y, duals) = SDDP.evaluate(V11, Dict(:x => 1.0); belief_state = b)
@test duals[:x] y 1.68

V12 = SDDP.ValueFunction(model[(1, 2)])
(y, duals) = SDDP.evaluate(V12, Dict(:x => 1.0); belief_state = b)
@test duals[:x] y 1.68
end
File renamed without changes.

0 comments on commit dcd8009

Please sign in to comment.