Hi! I have a question about this one error with my code. I was trying to mimic the example in SDDP.jl objective status and include some integer variables. But I got the error: LoadError: MethodError: no method matching zero(::Type{SDDP.State{VariableRef}}). I do confused about what this error refers to. Thank you ahead! My code is attached below:
using SDDP, Gurobi
model = SDDP.LinearPolicyGraph(;
stages = 5,
sense = :Max,
lower_bound = 0.0,
upper_bound = 500.0,
optimizer = Gurobi.Optimizer,
) do subproblem, t
@variable(subproblem, 0 <= invest_decision <= 1, SDDP.State, initial_value = 1, integer = true)
@variable(subproblem, 0 <= total_invest <= 100, SDDP.State, initial_value = 10)
@variable(subproblem, downturn)
@variable(subproblem, 0 <= exit_decision <= 1, integer = true)
@constraints(
subproblem,
begin
invest_decision.out <= invest_decision.in
exit_decision + invest_decision.out == 1
total_invest.out == total_invest.in + 10*invest_decision.out
end
)
SDDP.add_objective_state(
subproblem;
initial_value = 100.0,
lipschitz = 10_000.0
) do firm_value, ω
return ω.fv_noise + firm_value
Ω = [
(fv_noise = f, downturn = w) for f in [10, 20, 30, 40] for
w in [0.0,0.0,0.0,-50.0, -100.0]
]
SDDP.parameterize(subproblem, Ω) do ω
# Query the current firm_value.
firm_value = SDDP.objective_state(subproblem)
@stageobjective(subproblem, firm_value * exit_decision - total_invest - downturn)
return JuMP.fix(downturn, ω.downturn)
end
end
SDDP.train(model)
simulations = SDDP.simulate(model, 1)