Skip to content

Commit

Permalink
Remove JADEForwardPass and _TeriminateOnCycle and update to SDDP v0.4.8
Browse files Browse the repository at this point in the history
  • Loading branch information
odow committed Dec 19, 2022
1 parent c4621f9 commit f8bb244
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 124 deletions.
121 changes: 0 additions & 121 deletions src/sddp_modifications.jl
Original file line number Diff line number Diff line change
Expand Up @@ -192,124 +192,3 @@ function write_cuts_to_file(
end
return
end

struct _TerminateOnCycle{T<:SDDP.AbstractSamplingScheme} <: SDDP.AbstractSamplingScheme
scheme::T
end

function SDDP.sample_scenario(
graph::SDDP.PolicyGraph,
scheme::_TerminateOnCycle;
kwargs...,
) where {T}
sample, _ = SDDP.sample_scenario(graph, scheme.scheme; kwargs...)
return sample, true
end

struct JADEForwardPass <: SDDP.AbstractForwardPass end

"""
SDDP.forward_pass(
model::SDDP.PolicyGraph{T},
options::SDDP.Options,
::JADEForwardPass,
) where {T}
This custom forward pass method for JADE enables historical sequences of inflows to
wrap back, giving new starting states for the next iteration. In order to fit with SDDP.jl,
if we wish to enable this functionality, there is an additional node given in the historical
scenario path. This is used to determine `final_node`. We are not using Markov states, so this code
is a bit more complicated than is currently necessary, but should work if Markov states were
to be introduced.
"""
function SDDP.forward_pass(
model::SDDP.PolicyGraph{T},
options::SDDP.Options,
::JADEForwardPass,
) where {T}
# First up, sample a scenario. Note that if a cycle is detected, this will
# return the cycle node as well.
SDDP.TimerOutputs.@timeit SDDP.SDDP_TIMER "sample_scenario" begin
scenario_path, terminated_due_to_cycle =
SDDP.sample_scenario(model, options.sampling_scheme)
end

if terminated_due_to_cycle
final_node = scenario_path[end]
scenario_path = scenario_path[1:end-1]
end

# Storage for the list of outgoing states that we visit on the forward pass.
sampled_states = Dict{Symbol,Float64}[]

# Our initial incoming state.
incoming_state_value = copy(options.initial_state)

# A cumulator for the stage-objectives.
cumulative_value = 0.0

# Iterate down the scenario.
for (depth, (node_index, noise)) in enumerate(scenario_path)
node = model[node_index]

# ===== Begin: starting state for infinite horizon =====
starting_states = options.starting_states[node_index]
if length(starting_states) > 0
# There is at least one other possible starting state. If our
# incoming state is more than δ away from the other states, add it
# as a possible starting state.
if SDDP.distance(starting_states, incoming_state_value) >
options.cycle_discretization_delta
push!(starting_states, incoming_state_value)
end
incoming_state_value = splice!(starting_states, rand(1:length(starting_states)))
end
# ===== End: starting state for infinite horizon =====
# Solve the subproblem, note that `require_duals = false`.
SDDP.TimerOutputs.@timeit SDDP.SDDP_TIMER "solve_subproblem" begin
subproblem_results = SDDP.solve_subproblem(
model,
node,
incoming_state_value,
noise,
scenario_path[1:depth],
duality_handler = nothing,
)
end
# Cumulate the stage_objective.
cumulative_value += subproblem_results.stage_objective

# Set the outgoing state value as the incoming state value for the next
# node.
incoming_state_value = copy(subproblem_results.state)
# Add the outgoing state variable to the list of states we have sampled
# on this forward pass.
push!(sampled_states, incoming_state_value)
end
if terminated_due_to_cycle
# Get the last node in the scenario.
final_node_index = final_node[1]
# We terminated due to a cycle. Here is the list of possible starting
# states for that node:
starting_states = options.starting_states[final_node_index]
# We also need the incoming state variable to the final node, which is
# the outgoing state value of the last node:
incoming_state_value = sampled_states[end]
# If this incoming state value is more than δ away from another state,
# add it to the list.
if SDDP.distance(starting_states, incoming_state_value) >
options.cycle_discretization_delta
push!(starting_states, incoming_state_value)
end
end
# ===== End: drop off starting state if terminated due to cycle =====
return (
scenario_path = scenario_path,
sampled_states = sampled_states,
#objective_states = objective_states,
#belief_states = belief_states,
objective_states = NTuple{1,Float64}[],
belief_states = Tuple{Int,Dict{T,Float64}}[],
cumulative_value = cumulative_value,
)
end
5 changes: 2 additions & 3 deletions src/solve.jl
Original file line number Diff line number Diff line change
Expand Up @@ -260,13 +260,13 @@ function optimize_policy!(
sddpm,
iteration_limit = solveoptions.iterations,
cut_deletion_minimum = solveoptions.cutselection,
sampling_scheme = _TerminateOnCycle(SDDP.Historical(sample_paths)),
sampling_scheme = SDDP.Historical(sample_paths; terminate_on_cycle = true),
cycle_discretization_delta = 10.0,
dashboard = false,
risk_measure = solveoptions.riskmeasure,
parallel_scheme = parallel_scheme,
print_level = print_level,
forward_pass = JADEForwardPass(),
forward_pass = SDDP.DefaultForwardPass(; include_last_node = false),
)
else
solveresults = SDDP.train(
Expand All @@ -278,7 +278,6 @@ function optimize_policy!(
risk_measure = solveoptions.riskmeasure,
parallel_scheme = parallel_scheme,
print_level = print_level,
forward_pass = JADEForwardPass(),
)
end
# Save cuts to a file
Expand Down

0 comments on commit f8bb244

Please sign in to comment.