Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Proposal: More Exotic Policy Graphs #92

Closed
odow opened this issue Feb 21, 2018 · 0 comments
Closed

Proposal: More Exotic Policy Graphs #92

odow opened this issue Feb 21, 2018 · 0 comments

Comments

@odow
Copy link
Owner

odow commented Feb 21, 2018

We can elaborate on this once my thesis is written, saving this here while I remember.

This is a nice way to avoid confusing the whole stage/node/do sp, t/do sp, t, i situation.

struct PolicyGraph{T}
    root::T
    subproblems::Dict{T, Vector{Tuple{T, Float64}}}
end

function LinearGraph(stages::Int)
    subproblems = Dict{Int, Vector{Tuple{Int, Float64}}}()
    subproblems[0] = Tuple{Int, Float64}[]
    for t in 1:stages
        subproblems[t] = Tuple{Int, Float64}[]
        push!(subproblems[t-1], (t, 1.0))
    end
    PolicyGraph(0, subproblems)
end

function MarkovianGraph(transition_probabilities::AbstractVector{AbstractArray{Float64, 2}})
    # check is Markov transition matrix
    i = 1
    for T in transition_probabilities
        @assert size(T, 1) == i
        i = size(T, 2)
        for j in 1:size(T, 1)
            @assert isapprox(sum(T[j,:]), 1.0)
        end
    end

    typ = Tuple{Int, Int}
    subproblems = Dict{typ, Vector{Tuple{typ, Float64}}}()
    subproblems[(0,1)] = Tuple{typ, Float64}[]
    for (t, T) in enumerate(transition_probabilities)
        for j in 1:size(T, 2)
            subproblems[(t, j)] = Tuple{typ, Float64}[]
        end
        for i in 1:size(T, 1)
            for j in 1:size(T, 2)
                push!(subproblems[(t-1, i)], ((t, j), T[i,j]))
            end
        end
    end
    PolicyGraph((0,1), subproblems)
end
SDDPModel(
    policy_graph = LinearGraph(3)
) do sp, index
    stage = index
    # stage = 1,2,3
end

SDDPModel(
    policy_graph = MarkovianGraph([ 
        [1.0]',
        [0.2 0.8],
        [0.5 0.5; 0.1 0.9]
    ])
) do sp, index
    (stage, markov_state) = index
    # (stage, markov_state) = (1,1), (2,1), (2,2), (3,1), (3,2)

end
function children(g::PolicyGraph{T}, subproblem::T) where T
    [s[2] for s in g.subproblems[subproblem]]
end

g = LinearGraph(3)
children(g, 0) # [1]
children(g, 2) # [3]
children(g, 3) # []

g = MarkovianGraph([ 
        [1.0]',
        [0.2 0.8],
        [0.5 0.5; 0.1 0.9]
    ]
children(g, (0,1)) # [(1,1)]
children(g, (2,2)) # [(3,1), (3,2)]
children(g, (3,1)) # []
@odow odow changed the title Proposal: More Exotic Node Graphs Proposal: More Exotic Staging Graphs Feb 22, 2018
@odow odow changed the title Proposal: More Exotic Staging Graphs Proposal: More Exotic Policy Graphs Mar 2, 2018
@odow odow added this to the v2.0.0 milestone Mar 6, 2018
@odow odow added the proposal label Apr 26, 2018
@odow odow mentioned this issue Mar 13, 2019
@odow odow removed this from the v2.0.0 milestone Mar 13, 2019
@odow odow closed this as completed in #180 Mar 20, 2019
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

1 participant